diff --git a/backend/init/init.go b/backend/init/init.go index 3853fbea02f4..8f70a1f81e74 100644 --- a/backend/init/init.go +++ b/backend/init/init.go @@ -22,6 +22,7 @@ import ( backendHTTP "github.com/hashicorp/terraform/backend/remote-state/http" backendInmem "github.com/hashicorp/terraform/backend/remote-state/inmem" backendManta "github.com/hashicorp/terraform/backend/remote-state/manta" + backendOSS "github.com/hashicorp/terraform/backend/remote-state/oss" backendPg "github.com/hashicorp/terraform/backend/remote-state/pg" backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3" backendSwift "github.com/hashicorp/terraform/backend/remote-state/swift" @@ -62,6 +63,7 @@ func Init(services *disco.Disco) { "http": func() backend.Backend { return backendHTTP.New() }, "inmem": func() backend.Backend { return backendInmem.New() }, "manta": func() backend.Backend { return backendManta.New() }, + "oss": func() backend.Backend { return backendOSS.New() }, "pg": func() backend.Backend { return backendPg.New() }, "s3": func() backend.Backend { return backendS3.New() }, "swift": func() backend.Backend { return backendSwift.New() }, diff --git a/backend/remote-state/oss/backend.go b/backend/remote-state/oss/backend.go new file mode 100644 index 000000000000..33c2cbf5c4d4 --- /dev/null +++ b/backend/remote-state/oss/backend.go @@ -0,0 +1,309 @@ +package oss + +import ( + "context" + "fmt" + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + "os" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/services/location" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/version" + "log" + "net/http" + "strconv" + "time" +) + +// New creates a new backend for OSS remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "access_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Access Key ID", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_ID")), + }, + + "secret_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Access Secret Key", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_SECRET")), + }, + + "security_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Security Token", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECURITY_TOKEN", ""), + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The region of the OSS bucket.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", os.Getenv("ALICLOUD_DEFAULT_REGION")), + }, + "tablestore_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the TableStore API", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_TABLESTORE_ENDPOINT", ""), + }, + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the OSS API", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_OSS_ENDPOINT", os.Getenv("OSS_ENDPOINT")), + }, + + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The name of the OSS bucket", + }, + + "prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The directory where state files will be saved inside the bucket", + Default: "env:", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + prefix := v.(string) + if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") { + return nil, []error{fmt.Errorf("workspace_key_prefix must not start with '/' or './'")} + } + return nil, nil + }, + }, + + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The path of the state file inside the bucket", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") { + return nil, []error{fmt.Errorf("key can not start and end with '/'")} + } + return nil, nil + }, + Default: "terraform.tfstate", + }, + + "tablestore_table": { + Type: schema.TypeString, + Optional: true, + Description: "TableStore table for state locking and consistency", + Default: "", + }, + + "encrypt": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable server side encryption of the state file", + Default: false, + }, + + "acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Object ACL to be applied to the state file", + Default: "", + ValidateFunc: func(v interface{}, k string) ([]string, []error) { + if value := v.(string); value != "" { + acls := oss.ACLType(value) + if acls != oss.ACLPrivate && acls != oss.ACLPublicRead && acls != oss.ACLPublicReadWrite { + return nil, []error{fmt.Errorf( + "%q must be a valid ACL value , expected %s, %s or %s, got %q", + k, oss.ACLPrivate, oss.ACLPublicRead, oss.ACLPublicReadWrite, acls)} + } + } + return nil, nil + }, + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + ossClient *oss.Client + otsClient *tablestore.TableStoreClient + + bucketName string + statePrefix string + stateKey string + serverSideEncryption bool + acl string + endpoint string + otsEndpoint string + otsTable string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.ossClient != nil { + return nil + } + + // Grab the resource data + d := schema.FromContextBackendConfig(ctx) + + b.bucketName = d.Get("bucket").(string) + b.statePrefix = strings.TrimPrefix(strings.Trim(d.Get("prefix").(string), "/"), "./") + b.stateKey = d.Get("key").(string) + b.serverSideEncryption = d.Get("encrypt").(bool) + b.acl = d.Get("acl").(string) + + accessKey := d.Get("access_key").(string) + secretKey := d.Get("secret_key").(string) + securityToken := d.Get("security_token").(string) + region := d.Get("region").(string) + endpoint := d.Get("endpoint").(string) + schma := "https" + + if endpoint == "" { + endpointItem, _ := b.getOSSEndpointByRegion(accessKey, secretKey, securityToken, region) + if endpointItem != nil && len(endpointItem.Endpoint) > 0 { + if len(endpointItem.Protocols.Protocols) > 0 { + // HTTP or HTTPS + schma = strings.ToLower(endpointItem.Protocols.Protocols[0]) + for _, p := range endpointItem.Protocols.Protocols { + if strings.ToLower(p) == "https" { + schma = strings.ToLower(p) + break + } + } + } + endpoint = endpointItem.Endpoint + } else { + endpoint = fmt.Sprintf("oss-%s.aliyuncs.com", region) + } + } + if !strings.HasPrefix(endpoint, "http") { + endpoint = fmt.Sprintf("%s://%s", schma, endpoint) + } + log.Printf("[DEBUG] Instantiate OSS client using endpoint: %#v", endpoint) + var options []oss.ClientOption + if securityToken != "" { + options = append(options, oss.SecurityToken(securityToken)) + } + options = append(options, oss.UserAgent(fmt.Sprintf("%s/%s", TerraformUA, TerraformVersion))) + + client, err := oss.New(endpoint, accessKey, secretKey, options...) + b.ossClient = client + otsEndpoint := d.Get("tablestore_endpoint").(string) + if otsEndpoint != "" { + if !strings.HasPrefix(otsEndpoint, "http") { + otsEndpoint = fmt.Sprintf("%s://%s", schma, otsEndpoint) + } + b.otsEndpoint = otsEndpoint + parts := strings.Split(strings.TrimPrefix(strings.TrimPrefix(otsEndpoint, "https://"), "http://"), ".") + b.otsClient = tablestore.NewClientWithConfig(otsEndpoint, parts[0], accessKey, secretKey, securityToken, tablestore.NewDefaultTableStoreConfig()) + } + b.otsTable = d.Get("tablestore_table").(string) + + return err +} + +func (b *Backend) getOSSEndpointByRegion(access_key, secret_key, security_token, region string) (*location.DescribeEndpointResponse, error) { + args := location.CreateDescribeEndpointRequest() + args.ServiceCode = "oss" + args.Id = region + args.Domain = "location-readonly.aliyuncs.com" + + locationClient, err := location.NewClientWithOptions(region, getSdkConfig(), credentials.NewStsTokenCredential(access_key, secret_key, security_token)) + if err != nil { + return nil, fmt.Errorf("Unable to initialize the location client: %#v", err) + + } + locationClient.AppendUserAgent(TerraformUA, TerraformVersion) + endpointsResponse, err := locationClient.DescribeEndpoint(args) + if err != nil { + return nil, fmt.Errorf("Describe oss endpoint using region: %#v got an error: %#v.", region, err) + } + return endpointsResponse, nil +} + +func getSdkConfig() *sdk.Config { + return sdk.NewConfig(). + WithMaxRetryTime(5). + WithTimeout(time.Duration(30) * time.Second). + WithGoRoutinePoolSize(10). + WithDebug(false). + WithHttpTransport(getTransport()). + WithScheme("HTTPS") +} + +func getTransport() *http.Transport { + handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout")) + if err != nil { + handshakeTimeout = 120 + } + transport := cleanhttp.DefaultTransport() + transport.TLSHandshakeTimeout = time.Duration(handshakeTimeout) * time.Second + transport.Proxy = http.ProxyFromEnvironment + return transport +} + +type Invoker struct { + catchers []*Catcher +} + +type Catcher struct { + Reason string + RetryCount int + RetryWaitSeconds int +} + +const TerraformUA = "HashiCorp-Terraform" + +var TerraformVersion = strings.TrimSuffix(version.String(), "-dev") +var ClientErrorCatcher = Catcher{"AliyunGoClientFailure", 10, 3} +var ServiceBusyCatcher = Catcher{"ServiceUnavailable", 10, 3} + +func NewInvoker() Invoker { + i := Invoker{} + i.AddCatcher(ClientErrorCatcher) + i.AddCatcher(ServiceBusyCatcher) + return i +} + +func (a *Invoker) AddCatcher(catcher Catcher) { + a.catchers = append(a.catchers, &catcher) +} + +func (a *Invoker) Run(f func() error) error { + err := f() + + if err == nil { + return nil + } + + for _, catcher := range a.catchers { + if strings.Contains(err.Error(), catcher.Reason) { + catcher.RetryCount-- + + if catcher.RetryCount <= 0 { + return fmt.Errorf("Retry timeout and got an error: %#v.", err) + } else { + time.Sleep(time.Duration(catcher.RetryWaitSeconds) * time.Second) + return a.Run(f) + } + } + } + return err +} diff --git a/backend/remote-state/oss/backend_state.go b/backend/remote-state/oss/backend_state.go new file mode 100644 index 000000000000..28c40ee4afb3 --- /dev/null +++ b/backend/remote-state/oss/backend_state.go @@ -0,0 +1,199 @@ +package oss + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" + + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "log" + "path" +) + +const ( + lockFileSuffix = ".tflock" +) + +// get a remote client configured for this state +func (b *Backend) remoteClient(name string) (*RemoteClient, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + client := &RemoteClient{ + ossClient: b.ossClient, + bucketName: b.bucketName, + stateFile: b.stateFile(name), + lockFile: b.lockFile(name), + serverSideEncryption: b.serverSideEncryption, + acl: b.acl, + otsTable: b.otsTable, + otsClient: b.otsClient, + } + if b.otsEndpoint != "" && b.otsTable != "" { + table, err := b.otsClient.DescribeTable(&tablestore.DescribeTableRequest{ + TableName: b.otsTable, + }) + if err != nil { + return client, fmt.Errorf("Error describing table store %s: %#v", b.otsTable, err) + } + for _, t := range table.TableMeta.SchemaEntry { + pkMeta := TableStorePrimaryKeyMeta{ + PKName: *t.Name, + } + if *t.Type == tablestore.PrimaryKeyType_INTEGER { + pkMeta.PKType = "Integer" + } else if *t.Type == tablestore.PrimaryKeyType_STRING { + pkMeta.PKType = "String" + } else if *t.Type == tablestore.PrimaryKeyType_BINARY { + pkMeta.PKType = "Binary" + } else { + return client, fmt.Errorf("Unsupported PrimaryKey type: %d.", *t.Type) + } + client.otsTabkePK = pkMeta + break + } + } + + return client, nil +} + +func (b *Backend) Workspaces() ([]string, error) { + bucket, err := b.ossClient.Bucket(b.bucketName) + if err != nil { + return []string{""}, fmt.Errorf("Error getting bucket: %#v", err) + } + + var options []oss.Option + options = append(options, oss.Prefix(b.statePrefix+"/")) + resp, err := bucket.ListObjects(options...) + + if err != nil { + return nil, err + } + + result := []string{backend.DefaultStateName} + prefix := b.statePrefix + for _, obj := range resp.Objects { + // we have 3 parts, the state prefix, the workspace name, and the state file: // + if path.Join(b.statePrefix, b.stateKey) == obj.Key { + // filter the default workspace + continue + } + + parts := strings.Split(strings.TrimPrefix(obj.Key, prefix+"/"), "/") + if len(parts) > 0 && parts[0] != "" { + result = append(result, parts[0]) + } + } + + sort.Strings(result[1:]) + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client, err := b.remoteClient(name) + if err != nil { + return err + } + return client.Delete() +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + client, err := b.remoteClient(name) + if err != nil { + return nil, err + } + stateMgr := &remote.State{Client: client} + + // Check to see if this state already exists. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + log.Printf("[DEBUG] Current workspace name: %s. All workspaces:%#v", name, existing) + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + // We need to create the object so it's listed by States. + if !exists { + // take a lock on this state while we write it + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("Failed to lock OSS state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(e error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(stateUnlockError), lockId, err) + } + return e + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + return stateMgr, nil +} + +func (b *Backend) stateFile(name string) string { + if name == backend.DefaultStateName { + return path.Join(b.statePrefix, b.stateKey) + } + return path.Join(b.statePrefix, name, b.stateKey) +} + +func (b *Backend) lockFile(name string) string { + return b.stateFile(name) + lockFileSuffix +} + +const stateUnlockError = ` +Error unlocking Alibaba Cloud OSS state file: + +Lock ID: %s +Error message: %#v + +You may have to force-unlock this state in order to use it again. +The Alibaba Cloud backend acquires a lock during initialization to ensure the initial state file is created. +` diff --git a/backend/remote-state/oss/backend_test.go b/backend/remote-state/oss/backend_test.go new file mode 100644 index 000000000000..c23d935ab2b1 --- /dev/null +++ b/backend/remote-state/oss/backend_test.go @@ -0,0 +1,171 @@ +package oss + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/config/hcl2shim" + "strings" +) + +// verify that we are doing ACC tests or the OSS tests specifically +func testACC(t *testing.T) { + skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_OSS_TEST") == "" + if skip { + t.Log("oss backend tests require setting TF_ACC or TF_OSS_TEST") + t.Skip() + } + if os.Getenv("ALICLOUD_REGION") == "" { + os.Setenv("ALICLOUD_REGION", "cn-beijing") + } +} + +func TestBackend_impl(t *testing.T) { + var _ backend.Backend = new(Backend) +} + +func TestBackendConfig(t *testing.T) { + testACC(t) + config := map[string]interface{}{ + "region": "cn-beijing", + "bucket": "terraform-backend-oss-test", + "prefix": "mystate", + "key": "first.tfstate", + "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", + "tablestore_table": "TableStore", + } + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config)).(*Backend) + + if !strings.HasPrefix(b.ossClient.Config.Endpoint, "https://oss-cn-beijing") { + t.Fatalf("Incorrect region was provided") + } + if b.bucketName != "terraform-backend-oss-test" { + t.Fatalf("Incorrect bucketName was provided") + } + if b.statePrefix != "mystate" { + t.Fatalf("Incorrect state file path was provided") + } + if b.stateKey != "first.tfstate" { + t.Fatalf("Incorrect keyName was provided") + } + + if b.ossClient.Config.AccessKeyID == "" { + t.Fatalf("No Access Key Id was provided") + } + if b.ossClient.Config.AccessKeySecret == "" { + t.Fatalf("No Secret Access Key was provided") + } +} + +func TestBackendConfig_invalidKey(t *testing.T) { + testACC(t) + cfg := hcl2shim.HCL2ValueFromConfigValue(map[string]interface{}{ + "region": "cn-beijing", + "bucket": "terraform-backend-oss-test", + "prefix": "/leading-slash", + "name": "/test.tfstate", + "tablestore_endpoint": "https://terraformstate.cn-beijing.ots.aliyuncs.com", + "tablestore_table": "TableStore", + }) + + _, results := New().PrepareConfig(cfg) + if !results.HasErrors() { + t.Fatal("expected config validation error") + } +} + +func TestBackend(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("terraform-remote-oss-test-%x", time.Now().Unix()) + statePrefix := "multi/level/path/" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": statePrefix, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": statePrefix, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + + backend.TestBackendStates(t, b1) + backend.TestBackendStateLocks(t, b1, b2) + backend.TestBackendStateForceUnlock(t, b1, b2) +} + +func createOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) { + // Be clear about what we're doing in case the user needs to clean this up later. + if err := ossClient.CreateBucket(bucketName); err != nil { + t.Fatal("failed to create test OSS bucket:", err) + } +} + +func deleteOSSBucket(t *testing.T, ossClient *oss.Client, bucketName string) { + warning := "WARNING: Failed to delete the test OSS bucket. It may have been left in your Alicloud account and may incur storage charges. (error was %s)" + + // first we have to get rid of the env objects, or we can't delete the bucket + bucket, err := ossClient.Bucket(bucketName) + if err != nil { + t.Fatal("Error getting bucket:", err) + return + } + objects, err := bucket.ListObjects() + if err != nil { + t.Logf(warning, err) + return + } + for _, obj := range objects.Objects { + if err := bucket.DeleteObject(obj.Key); err != nil { + // this will need cleanup no matter what, so just warn and exit + t.Logf(warning, err) + return + } + } + + if err := ossClient.DeleteBucket(bucketName); err != nil { + t.Logf(warning, err) + } +} + +// create the dynamoDB table, and wait until we can query it. +func createTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) { + tableMeta := new(tablestore.TableMeta) + tableMeta.TableName = tableName + tableMeta.AddPrimaryKeyColumn("testbackend", tablestore.PrimaryKeyType_STRING) + + tableOption := new(tablestore.TableOption) + tableOption.TimeToAlive = -1 + tableOption.MaxVersion = 1 + + reservedThroughput := new(tablestore.ReservedThroughput) + + _, err := otsClient.CreateTable(&tablestore.CreateTableRequest{ + TableMeta: tableMeta, + TableOption: tableOption, + ReservedThroughput: reservedThroughput, + }) + if err != nil { + t.Fatal(err) + } +} + +func deleteTablestoreTable(t *testing.T, otsClient *tablestore.TableStoreClient, tableName string) { + params := &tablestore.DeleteTableRequest{ + TableName: tableName, + } + _, err := otsClient.DeleteTable(params) + if err != nil { + t.Logf("WARNING: Failed to delete the test TableStore table %q. It has been left in your Alibaba Cloud account and may incur charges. (error was %s)", tableName, err) + } +} diff --git a/backend/remote-state/oss/client.go b/backend/remote-state/oss/client.go new file mode 100644 index 000000000000..e50f801e8406 --- /dev/null +++ b/backend/remote-state/oss/client.go @@ -0,0 +1,484 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/json" + "fmt" + "io" + + "encoding/hex" + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/pkg/errors" + "log" + "sync" + "time" +) + +// Store the last saved serial in tablestore with this suffix for consistency checks. +const ( + stateIDSuffix = "-md5" + statePKValue = "terraform-remote-state-lock" +) + +var ( + // The amount of time we will retry a state waiting for it to match the + // expected checksum. + consistencyRetryTimeout = 10 * time.Second + + // delay when polling the state + consistencyRetryPollInterval = 2 * time.Second +) + +// test hook called when checksums don't match +var testChecksumHook func() + +type TableStorePrimaryKeyMeta struct { + PKName string + PKType string +} + +type RemoteClient struct { + ossClient *oss.Client + otsClient *tablestore.TableStoreClient + bucketName string + stateFile string + lockFile string + serverSideEncryption bool + acl string + info *state.LockInfo + mu sync.Mutex + otsTable string + otsTabkePK TableStorePrimaryKeyMeta +} + +func (c *RemoteClient) Get() (payload *remote.Payload, err error) { + deadline := time.Now().Add(consistencyRetryTimeout) + + // If we have a checksum, and the returned payload doesn't match, we retry + // up until deadline. + for { + payload, err = c.getObj() + if err != nil { + return nil, err + } + + // If the remote state was manually removed the payload will be nil, + // but if there's still a digest entry for that state we will still try + // to compare the MD5 below. + var digest []byte + if payload != nil { + digest = payload.MD5 + } + + // verify that this state is what we expect + if expected, err := c.getMD5(); err != nil { + log.Printf("[WARN] failed to fetch state md5: %s", err) + } else if len(expected) > 0 && !bytes.Equal(expected, digest) { + log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest) + + if testChecksumHook != nil { + testChecksumHook() + } + + if time.Now().Before(deadline) { + time.Sleep(consistencyRetryPollInterval) + log.Println("[INFO] retrying OSS RemoteClient.Get...") + continue + } + + return nil, fmt.Errorf(errBadChecksumFmt, digest) + } + + break + } + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return fmt.Errorf("Error getting bucket: %#v", err) + } + + body := bytes.NewReader(data) + + var options []oss.Option + if c.acl != "" { + options = append(options, oss.ACL(oss.ACLType(c.acl))) + } + options = append(options, oss.ContentType("application/json")) + if c.serverSideEncryption { + options = append(options, oss.ServerSideEncryption("AES256")) + } + options = append(options, oss.ContentLength(int64(len(data)))) + + if body != nil { + if err := bucket.PutObject(c.stateFile, body, options...); err != nil { + return fmt.Errorf("Failed to upload state %s: %#v", c.stateFile, err) + } + } + + sum := md5.Sum(data) + if err := c.putMD5(sum[:]); err != nil { + // if this errors out, we unfortunately have to error out altogether, + // since the next Get will inevitably fail. + return fmt.Errorf("Failed to store state MD5: %s", err) + } + return nil +} + +func (c *RemoteClient) Delete() error { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return fmt.Errorf("Error getting bucket %s: %#v", c.bucketName, err) + } + + log.Printf("[DEBUG] Deleting remote state from OSS: %#v", c.stateFile) + + if err := bucket.DeleteObject(c.stateFile); err != nil { + return fmt.Errorf("Error deleting state %s: %#v", c.stateFile, err) + } + + if err := c.deleteMD5(); err != nil { + log.Printf("[WARN] Error deleting state MD5: %s", err) + } + return nil +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + if c.otsTable == "" { + return "", nil + } + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + info.ID = lockID + } + + putParams := &tablestore.PutRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + Columns: []tablestore.AttributeColumn{ + { + ColumnName: "LockID", + Value: c.lockFile, + }, + { + ColumnName: "Info", + Value: string(info.Marshal()), + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST, + }, + } + + log.Printf("[DEBUG] Recoring state lock in tablestore: %#v", putParams) + + _, err := c.otsClient.PutRow(&tablestore.PutRowRequest{ + PutRowChange: putParams, + }) + if err != nil { + log.Printf("[WARN] Error storing state lock in tablestore: %#v", err) + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + log.Printf("[WARN] Error getting lock info: %#v", err) + err = multierror.Append(err, infoErr) + } + lockErr := &state.LockError{ + Err: err, + Info: lockInfo, + } + log.Printf("[WARN] state lock error: %#v", lockErr) + return "", lockErr + } + + return info.ID, nil +} + +func (c *RemoteClient) getMD5() ([]byte, error) { + if c.otsTable == "" { + return nil, nil + } + + getParams := &tablestore.SingleRowQueryCriteria{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + ColumnsToGet: []string{"LockID", "Digest"}, + MaxVersion: 1, + } + + log.Printf("[DEBUG] Retrieving state serial in tablestore: %#v", getParams) + + object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{ + SingleRowQueryCriteria: getParams, + }) + + if err != nil { + return nil, err + } + + var val string + if v, ok := object.GetColumnMap().Columns["Digest"]; ok && len(v) > 0 { + val = v[0].Value.(string) + } + + sum, err := hex.DecodeString(val) + if err != nil || len(sum) != md5.Size { + return nil, errors.New("invalid md5") + } + + return sum, nil +} + +// store the hash of the state to that clients can check for stale state files. +func (c *RemoteClient) putMD5(sum []byte) error { + if c.otsTable == "" { + return nil + } + + if len(sum) != md5.Size { + return errors.New("invalid payload md5") + } + + putParams := &tablestore.PutRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + Columns: []tablestore.AttributeColumn{ + { + ColumnName: "LockID", + Value: c.lockPath() + stateIDSuffix, + }, + { + ColumnName: "Digest", + Value: hex.EncodeToString(sum), + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST, + }, + } + + log.Printf("[DEBUG] Recoring state serial in tablestore: %#v", putParams) + + _, err := c.otsClient.PutRow(&tablestore.PutRowRequest{ + PutRowChange: putParams, + }) + + if err != nil { + log.Printf("[WARN] failed to record state serial in tablestore: %s", err) + } + + return nil +} + +// remove the hash value for a deleted state +func (c *RemoteClient) deleteMD5() error { + if c.otsTable == "" { + return nil + } + + params := &tablestore.DeleteRowRequest{ + DeleteRowChange: &tablestore.DeleteRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST, + }, + }, + } + + log.Printf("[DEBUG] Deleting state serial in tablestore: %#v", params) + + if _, err := c.otsClient.DeleteRow(params); err != nil { + return err + } + + return nil +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + getParams := &tablestore.SingleRowQueryCriteria{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + ColumnsToGet: []string{"LockID", "Info"}, + MaxVersion: 1, + } + + log.Printf("[DEBUG] Retrieving state lock info from tablestore: %#v", getParams) + + object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{ + SingleRowQueryCriteria: getParams, + }) + if err != nil { + return nil, err + } + + var infoData string + if v, ok := object.GetColumnMap().Columns["Info"]; ok && len(v) > 0 { + infoData = v[0].Value.(string) + } + lockInfo := &state.LockInfo{} + err = json.Unmarshal([]byte(infoData), lockInfo) + if err != nil { + return nil, err + } + return lockInfo, nil +} +func (c *RemoteClient) Unlock(id string) error { + if c.otsTable == "" { + return nil + } + + lockErr := &state.LockError{} + + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + params := &tablestore.DeleteRowRequest{ + DeleteRowChange: &tablestore.DeleteRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST, + }, + }, + } + + log.Printf("[DEBUG] Deleting state lock from tablestore: %#v", params) + + _, err = c.otsClient.DeleteRow(params) + + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} + +func (c *RemoteClient) lockPath() string { + return fmt.Sprintf("%s/%s", c.bucketName, c.stateFile) +} + +func (c *RemoteClient) getObj() (*remote.Payload, error) { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return nil, fmt.Errorf("Error getting bucket %s: %#v", c.bucketName, err) + } + + if exist, err := bucket.IsObjectExist(c.stateFile); err != nil { + return nil, fmt.Errorf("Estimating object %s is exist got an error: %#v", c.stateFile, err) + } else if !exist { + return nil, nil + } + + var options []oss.Option + output, err := bucket.GetObject(c.stateFile, options...) + if err != nil { + return nil, fmt.Errorf("Error getting object: %#v", err) + } + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, output); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + sum := md5.Sum(buf.Bytes()) + payload := &remote.Payload{ + Data: buf.Bytes(), + MD5: sum[:], + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) getPKValue() (value interface{}) { + value = statePKValue + if c.otsTabkePK.PKType == "Integer" { + value = hashcode.String(statePKValue) + } else if c.otsTabkePK.PKType == "Binary" { + value = stringToBin(statePKValue) + } + return +} + +func stringToBin(s string) (binString string) { + for _, c := range s { + binString = fmt.Sprintf("%s%b", binString, c) + } + return +} + +const errBadChecksumFmt = `state data in OSS does not have the expected content. + +This may be caused by unusually long delays in OSS processing a previous state +update. Please wait for a minute or two and try again. If this problem +persists, and neither OSS nor TableStore are experiencing an outage, you may need +to manually verify the remote state and update the Digest value stored in the +TableStore table to the following value: %x +` diff --git a/backend/remote-state/oss/client_test.go b/backend/remote-state/oss/client_test.go new file mode 100644 index 000000000000..1749765264e3 --- /dev/null +++ b/backend/remote-state/oss/client_test.go @@ -0,0 +1,330 @@ +package oss + +import ( + "fmt" + "strings" + "testing" + "time" + + "bytes" + "crypto/md5" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states/statefile" +) + +// NOTE: Before running this testcase, please create a OTS instance called 'tf-oss-remote' +var RemoteTestUsedOTSEndpoint = "https://tf-oss-remote.cn-hangzhou.ots.aliyuncs.com" + +func TestRemoteClient_impl(t *testing.T) { + var _ remote.Client = new(RemoteClient) + var _ remote.ClientLocker = new(RemoteClient) +} + +func TestRemoteClient(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + path := "testState" + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + })).(*Backend) + + createOSSBucket(t, b.ossClient, bucketName) + defer deleteOSSBucket(t, b.ossClient, bucketName) + + state, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestClient(t, state.(*remote.State).Client) +} + +func TestRemoteClientLocks(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + remote.TestRemoteLocks(t, s1.(*remote.State).Client, s2.(*remote.State).Client) +} + +// verify that we can unlock a state with an existing lock +func TestRemoteForceUnlock(t *testing.T) { + testACC(t) + bucketName := fmt.Sprintf("tf-remote-oss-test-force-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "encrypt": true, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + // first test with default + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + + info := state.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err := s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal("failed to get default state to force unlock:", err) + } + + if err := s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock default state") + } + + // now try the same thing with a named state + // first test with default + s1, err = b1.StateMgr("test") + if err != nil { + t.Fatal(err) + } + + info = state.NewLockInfo() + info.Operation = "test" + info.Who = "clientA" + + lockID, err = s1.Lock(info) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // s1 is now locked, get the same state through s2 and unlock it + s2, err = b2.StateMgr("test") + if err != nil { + t.Fatal("failed to get named state to force unlock:", err) + } + + if err = s2.Unlock(lockID); err != nil { + t.Fatal("failed to force-unlock named state") + } +} + +func TestRemoteClient_clientMD5(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b.ossClient, bucketName) + defer deleteOSSBucket(t, b.ossClient, bucketName) + createTablestoreTable(t, b.otsClient, tableName) + defer deleteTablestoreTable(t, b.otsClient, tableName) + + s, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client := s.(*remote.State).Client.(*RemoteClient) + + sum := md5.Sum([]byte("test")) + + if err := client.putMD5(sum[:]); err != nil { + t.Fatal(err) + } + + getSum, err := client.getMD5() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(getSum, sum[:]) { + t.Fatalf("getMD5 returned the wrong checksum: expected %x, got %x", sum[:], getSum) + } + + if err := client.deleteMD5(); err != nil { + t.Fatal(err) + } + + if getSum, err := client.getMD5(); err == nil { + t.Fatalf("expected getMD5 error, got none. checksum: %x", getSum) + } +} + +// verify that a client won't return a state with an incorrect checksum. +func TestRemoteClient_stateChecksum(t *testing.T) { + testACC(t) + + bucketName := fmt.Sprintf("tf-remote-oss-test-%x", time.Now().Unix()) + tableName := fmt.Sprintf("tfRemoteTestForce%x", time.Now().Unix()) + path := "testState" + + b1 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + "tablestore_table": tableName, + "tablestore_endpoint": RemoteTestUsedOTSEndpoint, + })).(*Backend) + + createOSSBucket(t, b1.ossClient, bucketName) + defer deleteOSSBucket(t, b1.ossClient, bucketName) + createTablestoreTable(t, b1.otsClient, tableName) + defer deleteTablestoreTable(t, b1.otsClient, tableName) + + s1, err := b1.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client1 := s1.(*remote.State).Client + + // create an old and new state version to persist + s := state.TestStateInitial() + sf := &statefile.File{State: s} + var oldState bytes.Buffer + if err := statefile.Write(sf, &oldState); err != nil { + t.Fatal(err) + } + sf.Serial++ + var newState bytes.Buffer + if err := statefile.Write(sf, &newState); err != nil { + t.Fatal(err) + } + + // Use b2 without a tablestore_table to bypass the lock table to write the state directly. + // client2 will write the "incorrect" state, simulating oss eventually consistency delays + b2 := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{ + "bucket": bucketName, + "prefix": path, + })).(*Backend) + s2, err := b2.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatal(err) + } + client2 := s2.(*remote.State).Client + + // write the new state through client2 so that there is no checksum yet + if err := client2.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + + // verify that we can pull a state without a checksum + if _, err := client1.Get(); err != nil { + t.Fatal(err) + } + + // write the new state back with its checksum + if err := client1.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + + // put an empty state in place to check for panics during get + if err := client2.Put([]byte{}); err != nil { + t.Fatal(err) + } + + // remove the timeouts so we can fail immediately + origTimeout := consistencyRetryTimeout + origInterval := consistencyRetryPollInterval + defer func() { + consistencyRetryTimeout = origTimeout + consistencyRetryPollInterval = origInterval + }() + consistencyRetryTimeout = 0 + consistencyRetryPollInterval = 0 + + // fetching an empty state through client1 should now error out due to a + // mismatched checksum. + if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { + t.Fatalf("expected state checksum error: got %s", err) + } + + // put the old state in place of the new, without updating the checksum + if err := client2.Put(oldState.Bytes()); err != nil { + t.Fatal(err) + } + + // fetching the wrong state through client1 should now error out due to a + // mismatched checksum. + if _, err := client1.Get(); !strings.HasPrefix(err.Error(), errBadChecksumFmt[:80]) { + t.Fatalf("expected state checksum error: got %s", err) + } + + // update the state with the correct one after we Get again + testChecksumHook = func() { + if err := client2.Put(newState.Bytes()); err != nil { + t.Fatal(err) + } + testChecksumHook = nil + } + + consistencyRetryTimeout = origTimeout + + // this final Get will fail to fail the checksum verification, the above + // callback will update the state with the correct version, and Get should + // retry automatically. + if _, err := client1.Get(); err != nil { + t.Fatal(err) + } +} diff --git a/go.mod b/go.mod index b726bff34d9d..6a13b8a14e33 100644 --- a/go.mod +++ b/go.mod @@ -8,12 +8,16 @@ require ( github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect github.com/agext/levenshtein v1.2.2 github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6 // indirect + github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a + github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70 + github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible github.com/apparentlymart/go-cidr v1.0.0 github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go v1.19.18 + github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect github.com/blang/semver v3.5.1+incompatible github.com/boltdb/bolt v1.3.1 // indirect github.com/chzyer/logex v1.1.10 // indirect @@ -69,6 +73,7 @@ require ( github.com/hashicorp/vault v0.10.4 github.com/jonboulle/clockwork v0.1.0 // indirect github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926 + github.com/json-iterator/go v1.1.5 // indirect github.com/jtolds/gls v4.2.1+incompatible // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba // indirect @@ -90,9 +95,11 @@ require ( github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4 github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51 github.com/mitchellh/reflectwalk v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c // indirect - github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 // indirect + github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 github.com/posener/complete v1.2.1 github.com/satori/go.uuid v1.2.0 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect diff --git a/go.sum b/go.sum index 3327b71bf398..402095caa1e6 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,12 @@ github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXva github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6 h1:LoeFxdq5zUCBQPhbQKE6zvoGwHMxCBlqwbH9+9kHoHA= github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a h1:APorzFpCcv6wtD5vmRWYqNm4N55kbepL7c7kTq9XI6A= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70 h1:FrF4uxA24DF3ARNXVbUin3wa5fDLaB1Cy8mKks/LRz4= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible h1:ABQ7FF+IxSFHDMOTtjCfmMDMHiCq6EsAoCV/9sFinaM= +github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible/go.mod h1:LDQHRZylxvcg8H7wBIDfvO5g/cy4/sz1iucBlc2l3Jw= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e h1:ptBAamGVd6CfRsUtyHD+goy2JGhv1QC32v3gqM8mYAM= github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= @@ -51,6 +57,8 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.16.36 h1:POeH34ZME++pr7GBGh+ZO6Y5kOwSMQpqp5BGUgooJ6k= github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/aws/aws-sdk-go v1.19.18 h1:Hb3+b9HCqrOrbAtFstUWg7H5TQ+/EcklJtE8VShVs8o= github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= @@ -234,6 +242,8 @@ github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926 h1:kie3qOosvRKqwij2HGzXWffwpXvcqfPPXRUw8I4F/mg= github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro= @@ -307,6 +317,10 @@ github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51 h1:eD92Am0Qf3 github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= @@ -525,6 +539,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/LICENSE b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credential.go new file mode 100644 index 000000000000..7f20b7a40c0d --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credential.go @@ -0,0 +1,18 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package auth + +type Credential interface { +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go new file mode 100644 index 000000000000..68f8226330f6 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go @@ -0,0 +1,34 @@ +package credentials + +// Deprecated: Use AccessKeyCredential in this package instead. +type BaseCredential struct { + AccessKeyId string + AccessKeySecret string +} + +type AccessKeyCredential struct { + AccessKeyId string + AccessKeySecret string +} + +// Deprecated: Use NewAccessKeyCredential in this package instead. +func NewBaseCredential(accessKeyId, accessKeySecret string) *BaseCredential { + return &BaseCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } +} + +func (baseCred *BaseCredential) ToAccessKeyCredential() *AccessKeyCredential { + return &AccessKeyCredential{ + AccessKeyId: baseCred.AccessKeyId, + AccessKeySecret: baseCred.AccessKeySecret, + } +} + +func NewAccessKeyCredential(accessKeyId, accessKeySecret string) *AccessKeyCredential { + return &AccessKeyCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go new file mode 100644 index 000000000000..6d4763e663fe --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go @@ -0,0 +1,12 @@ +package credentials + +type BearerTokenCredential struct { + BearerToken string +} + +// NewBearerTokenCredential return a BearerTokenCredential object +func NewBearerTokenCredential(token string) *BearerTokenCredential { + return &BearerTokenCredential{ + BearerToken: token, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go new file mode 100644 index 000000000000..55a5c2da0367 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go @@ -0,0 +1,29 @@ +package credentials + +func (oldCred *StsRoleNameOnEcsCredential) ToEcsRamRoleCredential() *EcsRamRoleCredential { + return &EcsRamRoleCredential{ + RoleName: oldCred.RoleName, + } +} + +type EcsRamRoleCredential struct { + RoleName string +} + +func NewEcsRamRoleCredential(roleName string) *EcsRamRoleCredential { + return &EcsRamRoleCredential{ + RoleName: roleName, + } +} + +// Deprecated: Use EcsRamRoleCredential in this package instead. +type StsRoleNameOnEcsCredential struct { + RoleName string +} + +// Deprecated: Use NewEcsRamRoleCredential in this package instead. +func NewStsRoleNameOnEcsCredential(roleName string) *StsRoleNameOnEcsCredential { + return &StsRoleNameOnEcsCredential{ + RoleName: roleName, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go new file mode 100644 index 000000000000..3cd0d020a75d --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go @@ -0,0 +1,30 @@ +package provider + +import ( + "errors" + "os" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" +) + +type EnvProvider struct{} + +var ProviderEnv = new(EnvProvider) + +func NewEnvProvider() Provider { + return &EnvProvider{} +} + +func (p *EnvProvider) Resolve() (auth.Credential, error) { + accessKeyID, ok1 := os.LookupEnv(ENVAccessKeyID) + accessKeySecret, ok2 := os.LookupEnv(ENVAccessKeySecret) + if !ok1 || !ok2 { + return nil, nil + } + if accessKeyID == "" || accessKeySecret == "" { + return nil, errors.New("Environmental variable (ALIBABACLOUD_ACCESS_KEY_ID or ALIBABACLOUD_ACCESS_KEY_SECRET) is empty") + } + return credentials.NewAccessKeyCredential(accessKeyID, accessKeySecret), nil +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go new file mode 100644 index 000000000000..1906d21f67de --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go @@ -0,0 +1,92 @@ +package provider + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + +type InstanceCredentialsProvider struct{} + +var ProviderInstance = new(InstanceCredentialsProvider) + +var HookGet = func(fn func(string) (int, []byte, error)) func(string) (int, []byte, error) { + return fn +} + +func NewInstanceCredentialsProvider() Provider { + return &InstanceCredentialsProvider{} +} + +func (p *InstanceCredentialsProvider) Resolve() (auth.Credential, error) { + roleName, ok := os.LookupEnv(ENVEcsMetadata) + if !ok { + return nil, nil + } + if roleName == "" { + return nil, errors.New("Environmental variable 'ALIBABA_CLOUD_ECS_METADATA' are empty") + } + status, content, err := HookGet(get)(securityCredURL + roleName) + if err != nil { + return nil, err + } + if status != 200 { + if status == 404 { + return nil, fmt.Errorf("The role was not found in the instance") + } + return nil, fmt.Errorf("Received %d when getting security credentials for %s", status, roleName) + } + body := make(map[string]interface{}) + + if err := json.Unmarshal(content, &body); err != nil { + return nil, err + } + + accessKeyID, err := extractString(body, "AccessKeyId") + if err != nil { + return nil, err + } + accessKeySecret, err := extractString(body, "AccessKeySecret") + if err != nil { + return nil, err + } + securityToken, err := extractString(body, "SecurityToken") + if err != nil { + return nil, err + } + + return credentials.NewStsTokenCredential(accessKeyID, accessKeySecret, securityToken), nil +} + +func get(url string) (status int, content []byte, err error) { + httpClient := http.DefaultClient + httpClient.Timeout = time.Second * 1 + resp, err := httpClient.Get(url) + if err != nil { + return + } + defer resp.Body.Close() + content, err = ioutil.ReadAll(resp.Body) + return resp.StatusCode, content, err +} + +func extractString(m map[string]interface{}, key string) (string, error) { + raw, ok := m[key] + if !ok { + return "", fmt.Errorf("%s not in map", key) + } + str, ok := raw.(string) + if !ok { + return "", fmt.Errorf("%s is not a string in map", key) + } + return str, nil +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go new file mode 100644 index 000000000000..8d525c37adb5 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go @@ -0,0 +1,158 @@ +package provider + +import ( + "bufio" + "errors" + "os" + "runtime" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + + ini "gopkg.in/ini.v1" +) + +type ProfileProvider struct { + Profile string +} + +var ProviderProfile = NewProfileProvider() + +// NewProfileProvider receive zero or more parameters, +// when length of name is 0, the value of field Profile will be "default", +// and when there are multiple inputs, the function will take the +// first one and discard the other values. +func NewProfileProvider(name ...string) Provider { + p := new(ProfileProvider) + if len(name) == 0 { + p.Profile = "default" + } else { + p.Profile = name[0] + } + return p +} + +// Resolve implements the Provider interface +// when credential type is rsa_key_pair, the content of private_key file +// must be able to be parsed directly into the required string +// that NewRsaKeyPairCredential function needed +func (p *ProfileProvider) Resolve() (auth.Credential, error) { + path, ok := os.LookupEnv(ENVCredentialFile) + if !ok { + path, err := checkDefaultPath() + if err != nil { + return nil, err + } + if path == "" { + return nil, nil + } + } else if path == "" { + return nil, errors.New("Environment variable '" + ENVCredentialFile + "' cannot be empty") + } + + ini, err := ini.Load(path) + if err != nil { + return nil, errors.New("ERROR: Can not open file" + err.Error()) + } + + section, err := ini.GetSection(p.Profile) + if err != nil { + return nil, errors.New("ERROR: Can not load section" + err.Error()) + } + + value, err := section.GetKey("type") + if err != nil { + return nil, errors.New("ERROR: Can not find credential type" + err.Error()) + } + + switch value.String() { + case "access_key": + value1, err1 := section.GetKey("access_key_id") + value2, err2 := section.GetKey("access_key_secret") + if err1 != nil || err2 != nil { + return nil, errors.New("ERROR: Failed to get value") + } + if value1.String() == "" || value2.String() == "" { + return nil, errors.New("ERROR: Value can't be empty") + } + return credentials.NewAccessKeyCredential(value1.String(), value2.String()), nil + case "ecs_ram_role": + value1, err1 := section.GetKey("role_name") + if err1 != nil { + return nil, errors.New("ERROR: Failed to get value") + } + if value1.String() == "" { + return nil, errors.New("ERROR: Value can't be empty") + } + return credentials.NewEcsRamRoleCredential(value1.String()), nil + case "ram_role_arn": + value1, err1 := section.GetKey("access_key_id") + value2, err2 := section.GetKey("access_key_secret") + value3, err3 := section.GetKey("role_arn") + value4, err4 := section.GetKey("role_session_name") + if err1 != nil || err2 != nil || err3 != nil || err4 != nil { + return nil, errors.New("ERROR: Failed to get value") + } + if value1.String() == "" || value2.String() == "" || value3.String() == "" || value4.String() == "" { + return nil, errors.New("ERROR: Value can't be empty") + } + return credentials.NewRamRoleArnCredential(value1.String(), value2.String(), value3.String(), value4.String(), 3600), nil + case "rsa_key_pair": + value1, err1 := section.GetKey("public_key_id") + value2, err2 := section.GetKey("private_key_file") + if err1 != nil || err2 != nil { + return nil, errors.New("ERROR: Failed to get value") + } + if value1.String() == "" || value2.String() == "" { + return nil, errors.New("ERROR: Value can't be empty") + } + file, err := os.Open(value2.String()) + if err != nil { + return nil, errors.New("ERROR: Can not get private_key") + } + defer file.Close() + var privateKey string + scan := bufio.NewScanner(file) + var data string + for scan.Scan() { + if strings.HasPrefix(scan.Text(), "----") { + continue + } + data += scan.Text() + "\n" + } + return credentials.NewRsaKeyPairCredential(privateKey, value1.String(), 3600), nil + default: + return nil, errors.New("ERROR: Failed to get credential") + } +} + +// GetHomePath return home directory according to the system. +// if the environmental virables does not exist, will return empty +func GetHomePath() string { + if runtime.GOOS == "windows" { + path, ok := os.LookupEnv("USERPROFILE") + if !ok { + return "" + } + return path + } + path, ok := os.LookupEnv("HOME") + if !ok { + return "" + } + return path +} + +func checkDefaultPath() (path string, err error) { + path = GetHomePath() + if path == "" { + return "", errors.New("The default credential file path is invalid") + } + path = strings.Replace("~/.alibabacloud/credentials", "~", path, 1) + _, err = os.Stat(path) + if err != nil { + return "", nil + } + return path, nil +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go new file mode 100644 index 000000000000..ae4e168eb1ae --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go @@ -0,0 +1,19 @@ +package provider + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" +) + +//Environmental virables that may be used by the provider +const ( + ENVAccessKeyID = "ALIBABA_CLOUD_ACCESS_KEY_ID" + ENVAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET" + ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE" + ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA" + PATHCredentialFile = "~/.alibabacloud/credentials" +) + +// When you want to customize the provider, you only need to implement the method of the interface. +type Provider interface { + Resolve() (auth.Credential, error) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go new file mode 100644 index 000000000000..3f9315d138eb --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go @@ -0,0 +1,34 @@ +package provider + +import ( + "errors" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" +) + +type ProviderChain struct { + Providers []Provider +} + +var defaultproviders = []Provider{ProviderEnv, ProviderProfile, ProviderInstance} +var DefaultChain = NewProviderChain(defaultproviders) + +func NewProviderChain(providers []Provider) Provider { + return &ProviderChain{ + Providers: providers, + } +} + +func (p *ProviderChain) Resolve() (auth.Credential, error) { + for _, provider := range p.Providers { + creds, err := provider.Resolve() + if err != nil { + return nil, err + } else if err == nil && creds == nil { + continue + } + return creds, err + } + return nil, errors.New("No credential found") + +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go new file mode 100644 index 000000000000..00d688eb8d14 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go @@ -0,0 +1,15 @@ +package credentials + +type RsaKeyPairCredential struct { + PrivateKey string + PublicKeyId string + SessionExpiration int +} + +func NewRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration int) *RsaKeyPairCredential { + return &RsaKeyPairCredential{ + PrivateKey: privateKey, + PublicKeyId: publicKeyId, + SessionExpiration: sessionExpiration, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go new file mode 100644 index 000000000000..554431ff0bce --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go @@ -0,0 +1,15 @@ +package credentials + +type StsTokenCredential struct { + AccessKeyId string + AccessKeySecret string + AccessKeyStsToken string +} + +func NewStsTokenCredential(accessKeyId, accessKeySecret, accessKeyStsToken string) *StsTokenCredential { + return &StsTokenCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + AccessKeyStsToken: accessKeyStsToken, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go new file mode 100644 index 000000000000..27602fd74946 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go @@ -0,0 +1,61 @@ +package credentials + +// Deprecated: Use RamRoleArnCredential in this package instead. +type StsRoleArnCredential struct { + AccessKeyId string + AccessKeySecret string + RoleArn string + RoleSessionName string + RoleSessionExpiration int +} + +type RamRoleArnCredential struct { + AccessKeyId string + AccessKeySecret string + RoleArn string + RoleSessionName string + RoleSessionExpiration int + Policy string +} + +// Deprecated: Use RamRoleArnCredential in this package instead. +func NewStsRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName string, roleSessionExpiration int) *StsRoleArnCredential { + return &StsRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + } +} + +func (oldCred *StsRoleArnCredential) ToRamRoleArnCredential() *RamRoleArnCredential { + return &RamRoleArnCredential{ + AccessKeyId: oldCred.AccessKeyId, + AccessKeySecret: oldCred.AccessKeySecret, + RoleArn: oldCred.RoleArn, + RoleSessionName: oldCred.RoleSessionName, + RoleSessionExpiration: oldCred.RoleSessionExpiration, + } +} + +func NewRamRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName string, roleSessionExpiration int) *RamRoleArnCredential { + return &RamRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + } +} + +func NewRamRoleArnWithPolicyCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int) *RamRoleArnCredential { + return &RamRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + Policy: policy, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go new file mode 100644 index 000000000000..77fcec231e3c --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go @@ -0,0 +1,136 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package auth + +import ( + "bytes" + "sort" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +var debug utils.Debug + +var hookGetDate = func(fn func() string) string { + return fn() +} + +func init() { + debug = utils.Init("sdk") +} + +func signRoaRequest(request requests.AcsRequest, signer Signer, regionId string) (err error) { + completeROASignParams(request, signer, regionId) + stringToSign := buildRoaStringToSign(request) + request.SetStringToSign(stringToSign) + signature := signer.Sign(stringToSign, "") + accessKeyId, err := signer.GetAccessKeyId() + if err != nil { + return nil + } + + request.GetHeaders()["Authorization"] = "acs " + accessKeyId + ":" + signature + + return +} + +func completeROASignParams(request requests.AcsRequest, signer Signer, regionId string) { + headerParams := request.GetHeaders() + + // complete query params + queryParams := request.GetQueryParams() + //if _, ok := queryParams["RegionId"]; !ok { + // queryParams["RegionId"] = regionId + //} + if extraParam := signer.GetExtraParam(); extraParam != nil { + for key, value := range extraParam { + if key == "SecurityToken" { + headerParams["x-acs-security-token"] = value + continue + } + if key == "BearerToken" { + headerParams["x-acs-bearer-token"] = value + continue + } + queryParams[key] = value + } + } + + // complete header params + headerParams["Date"] = hookGetDate(utils.GetTimeInFormatRFC2616) + headerParams["x-acs-signature-method"] = signer.GetName() + headerParams["x-acs-signature-version"] = signer.GetVersion() + if request.GetFormParams() != nil && len(request.GetFormParams()) > 0 { + formString := utils.GetUrlFormedMap(request.GetFormParams()) + request.SetContent([]byte(formString)) + headerParams["Content-Type"] = requests.Form + } + contentMD5 := utils.GetMD5Base64(request.GetContent()) + headerParams["Content-MD5"] = contentMD5 + if _, contains := headerParams["Content-Type"]; !contains { + headerParams["Content-Type"] = requests.Raw + } + switch format := request.GetAcceptFormat(); format { + case "JSON": + headerParams["Accept"] = requests.Json + case "XML": + headerParams["Accept"] = requests.Xml + default: + headerParams["Accept"] = requests.Raw + } +} + +func buildRoaStringToSign(request requests.AcsRequest) (stringToSign string) { + + headers := request.GetHeaders() + + stringToSignBuilder := bytes.Buffer{} + stringToSignBuilder.WriteString(request.GetMethod()) + stringToSignBuilder.WriteString(requests.HeaderSeparator) + + // append header keys for sign + appendIfContain(headers, &stringToSignBuilder, "Accept", requests.HeaderSeparator) + appendIfContain(headers, &stringToSignBuilder, "Content-MD5", requests.HeaderSeparator) + appendIfContain(headers, &stringToSignBuilder, "Content-Type", requests.HeaderSeparator) + appendIfContain(headers, &stringToSignBuilder, "Date", requests.HeaderSeparator) + + // sort and append headers witch starts with 'x-acs-' + var acsHeaders []string + for key := range headers { + if strings.HasPrefix(key, "x-acs-") { + acsHeaders = append(acsHeaders, key) + } + } + sort.Strings(acsHeaders) + for _, key := range acsHeaders { + stringToSignBuilder.WriteString(key + ":" + headers[key]) + stringToSignBuilder.WriteString(requests.HeaderSeparator) + } + + // append query params + stringToSignBuilder.WriteString(request.BuildQueries()) + stringToSign = stringToSignBuilder.String() + debug("stringToSign: %s", stringToSign) + return +} + +func appendIfContain(sourceMap map[string]string, target *bytes.Buffer, key, separator string) { + if value, contain := sourceMap[key]; contain && len(value) > 0 { + target.WriteString(sourceMap[key]) + target.WriteString(separator) + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go new file mode 100644 index 000000000000..14ea15ca4367 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go @@ -0,0 +1,94 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package auth + +import ( + "net/url" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +var hookGetUUIDV4 = func(fn func() string) string { + return fn() +} + +func signRpcRequest(request requests.AcsRequest, signer Signer, regionId string) (err error) { + err = completeRpcSignParams(request, signer, regionId) + if err != nil { + return + } + // remove while retry + if _, containsSign := request.GetQueryParams()["Signature"]; containsSign { + delete(request.GetQueryParams(), "Signature") + } + stringToSign := buildRpcStringToSign(request) + request.SetStringToSign(stringToSign) + signature := signer.Sign(stringToSign, "&") + request.GetQueryParams()["Signature"] = signature + + return +} + +func completeRpcSignParams(request requests.AcsRequest, signer Signer, regionId string) (err error) { + queryParams := request.GetQueryParams() + queryParams["Version"] = request.GetVersion() + queryParams["Action"] = request.GetActionName() + queryParams["Format"] = request.GetAcceptFormat() + queryParams["Timestamp"] = hookGetDate(utils.GetTimeInFormatISO8601) + queryParams["SignatureMethod"] = signer.GetName() + queryParams["SignatureType"] = signer.GetType() + queryParams["SignatureVersion"] = signer.GetVersion() + queryParams["SignatureNonce"] = hookGetUUIDV4(utils.GetUUIDV4) + queryParams["AccessKeyId"], err = signer.GetAccessKeyId() + + if err != nil { + return + } + + if _, contains := queryParams["RegionId"]; !contains { + queryParams["RegionId"] = regionId + } + if extraParam := signer.GetExtraParam(); extraParam != nil { + for key, value := range extraParam { + queryParams[key] = value + } + } + + request.GetHeaders()["Content-Type"] = requests.Form + formString := utils.GetUrlFormedMap(request.GetFormParams()) + request.SetContent([]byte(formString)) + + return +} + +func buildRpcStringToSign(request requests.AcsRequest) (stringToSign string) { + signParams := make(map[string]string) + for key, value := range request.GetQueryParams() { + signParams[key] = value + } + for key, value := range request.GetFormParams() { + signParams[key] = value + } + + stringToSign = utils.GetUrlFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = request.GetMethod() + "&%2F&" + stringToSign + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signer.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signer.go new file mode 100644 index 000000000000..cbbc3cef7de9 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signer.go @@ -0,0 +1,98 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package auth + +import ( + "fmt" + "reflect" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +type Signer interface { + GetName() string + GetType() string + GetVersion() string + GetAccessKeyId() (string, error) + GetExtraParam() map[string]string + Sign(stringToSign, secretSuffix string) string +} + +func NewSignerWithCredential(credential Credential, commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)) (signer Signer, err error) { + switch instance := credential.(type) { + case *credentials.AccessKeyCredential: + { + signer = signers.NewAccessKeySigner(instance) + } + case *credentials.StsTokenCredential: + { + signer = signers.NewStsTokenSigner(instance) + } + case *credentials.BearerTokenCredential: + { + signer = signers.NewBearerTokenSigner(instance) + } + case *credentials.RamRoleArnCredential: + { + signer, err = signers.NewRamRoleArnSigner(instance, commonApi) + } + case *credentials.RsaKeyPairCredential: + { + signer, err = signers.NewSignerKeyPair(instance, commonApi) + } + case *credentials.EcsRamRoleCredential: + { + signer = signers.NewEcsRamRoleSigner(instance, commonApi) + } + case *credentials.BaseCredential: // deprecated user interface + { + signer = signers.NewAccessKeySigner(instance.ToAccessKeyCredential()) + } + case *credentials.StsRoleArnCredential: // deprecated user interface + { + signer, err = signers.NewRamRoleArnSigner(instance.ToRamRoleArnCredential(), commonApi) + } + case *credentials.StsRoleNameOnEcsCredential: // deprecated user interface + { + signer = signers.NewEcsRamRoleSigner(instance.ToEcsRamRoleCredential(), commonApi) + } + default: + message := fmt.Sprintf(errors.UnsupportedCredentialErrorMessage, reflect.TypeOf(credential)) + err = errors.NewClientError(errors.UnsupportedCredentialErrorCode, message, nil) + } + return +} + +func Sign(request requests.AcsRequest, signer Signer, regionId string) (err error) { + switch request.GetStyle() { + case requests.ROA: + { + err = signRoaRequest(request, signer, regionId) + } + case requests.RPC: + { + err = signRpcRequest(request, signer, regionId) + } + default: + message := fmt.Sprintf(errors.UnknownRequestTypeErrorMessage, reflect.TypeOf(request)) + err = errors.NewClientError(errors.UnknownRequestTypeErrorCode, message, nil) + } + + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go new file mode 100644 index 000000000000..887f502094e9 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go @@ -0,0 +1,57 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "crypto" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" +) + +func ShaHmac1(source, secret string) string { + key := []byte(secret) + hmac := hmac.New(sha1.New, key) + hmac.Write([]byte(source)) + signedBytes := hmac.Sum(nil) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +func Sha256WithRsa(source, secret string) string { + // block, _ := pem.Decode([]byte(secret)) + decodeString, err := base64.StdEncoding.DecodeString(secret) + if err != nil { + panic(err) + } + private, err := x509.ParsePKCS8PrivateKey(decodeString) + if err != nil { + panic(err) + } + + h := crypto.Hash.New(crypto.SHA256) + h.Write([]byte(source)) + hashed := h.Sum(nil) + signature, err := rsa.SignPKCS1v15(rand.Reader, private.(*rsa.PrivateKey), + crypto.SHA256, hashed) + if err != nil { + panic(err) + } + + return base64.StdEncoding.EncodeToString(signature) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go new file mode 100644 index 000000000000..ba291a41e888 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go @@ -0,0 +1,54 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +const defaultInAdvanceScale = 0.95 + +type credentialUpdater struct { + credentialExpiration int + lastUpdateTimestamp int64 + inAdvanceScale float64 + buildRequestMethod func() (*requests.CommonRequest, error) + responseCallBack func(response *responses.CommonResponse) error + refreshApi func(request *requests.CommonRequest) (response *responses.CommonResponse, err error) +} + +func (updater *credentialUpdater) needUpdateCredential() (result bool) { + if updater.inAdvanceScale == 0 { + updater.inAdvanceScale = defaultInAdvanceScale + } + return time.Now().Unix()-updater.lastUpdateTimestamp >= int64(float64(updater.credentialExpiration)*updater.inAdvanceScale) +} + +func (updater *credentialUpdater) updateCredential() (err error) { + request, err := updater.buildRequestMethod() + if err != nil { + return + } + response, err := updater.refreshApi(request) + if err != nil { + return + } + updater.lastUpdateTimestamp = time.Now().Unix() + err = updater.responseCallBack(response) + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go new file mode 100644 index 000000000000..99c624c880fc --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go @@ -0,0 +1,7 @@ +package signers + +type SessionCredential struct { + AccessKeyId string + AccessKeySecret string + StsToken string +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go new file mode 100644 index 000000000000..bc4f35b85605 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go @@ -0,0 +1,54 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +type AccessKeySigner struct { + credential *credentials.AccessKeyCredential +} + +func (signer *AccessKeySigner) GetExtraParam() map[string]string { + return nil +} + +func NewAccessKeySigner(credential *credentials.AccessKeyCredential) *AccessKeySigner { + return &AccessKeySigner{ + credential: credential, + } +} + +func (*AccessKeySigner) GetName() string { + return "HMAC-SHA1" +} + +func (*AccessKeySigner) GetType() string { + return "" +} + +func (*AccessKeySigner) GetVersion() string { + return "1.0" +} + +func (signer *AccessKeySigner) GetAccessKeyId() (accessKeyId string, err error) { + return signer.credential.AccessKeyId, nil +} + +func (signer *AccessKeySigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.credential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_bearer_token.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_bearer_token.go new file mode 100644 index 000000000000..75b78433adbb --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_bearer_token.go @@ -0,0 +1,35 @@ +package signers + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +type BearerTokenSigner struct { + credential *credentials.BearerTokenCredential +} + +func NewBearerTokenSigner(credential *credentials.BearerTokenCredential) *BearerTokenSigner { + return &BearerTokenSigner{ + credential: credential, + } +} + +func (signer *BearerTokenSigner) GetExtraParam() map[string]string { + return map[string]string{"BearerToken": signer.credential.BearerToken} +} + +func (*BearerTokenSigner) GetName() string { + return "" +} +func (*BearerTokenSigner) GetType() string { + return "BEARERTOKEN" +} +func (*BearerTokenSigner) GetVersion() string { + return "1.0" +} +func (signer *BearerTokenSigner) GetAccessKeyId() (accessKeyId string, err error) { + return "", nil +} +func (signer *BearerTokenSigner) Sign(stringToSign, secretSuffix string) string { + return "" +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go new file mode 100644 index 000000000000..73788429e9b2 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go @@ -0,0 +1,167 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + jmespath "github.com/jmespath/go-jmespath" +) + +var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + +type EcsRamRoleSigner struct { + *credentialUpdater + sessionCredential *SessionCredential + credential *credentials.EcsRamRoleCredential + commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error) +} + +func NewEcsRamRoleSigner(credential *credentials.EcsRamRoleCredential, commonApi func(*requests.CommonRequest, interface{}) (response *responses.CommonResponse, err error)) (signer *EcsRamRoleSigner) { + signer = &EcsRamRoleSigner{ + credential: credential, + commonApi: commonApi, + } + + signer.credentialUpdater = &credentialUpdater{ + credentialExpiration: defaultDurationSeconds / 60, + buildRequestMethod: signer.buildCommonRequest, + responseCallBack: signer.refreshCredential, + refreshApi: signer.refreshApi, + } + + return signer +} + +func (*EcsRamRoleSigner) GetName() string { + return "HMAC-SHA1" +} + +func (*EcsRamRoleSigner) GetType() string { + return "" +} + +func (*EcsRamRoleSigner) GetVersion() string { + return "1.0" +} + +func (signer *EcsRamRoleSigner) GetAccessKeyId() (accessKeyId string, err error) { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + err = signer.updateCredential() + if err != nil { + return + } + } + if signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0 { + return "", nil + } + return signer.sessionCredential.AccessKeyId, nil +} + +func (signer *EcsRamRoleSigner) GetExtraParam() map[string]string { + if signer.sessionCredential == nil { + return make(map[string]string) + } + if len(signer.sessionCredential.StsToken) <= 0 { + return make(map[string]string) + } + return map[string]string{"SecurityToken": signer.sessionCredential.StsToken} +} + +func (signer *EcsRamRoleSigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.sessionCredential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +func (signer *EcsRamRoleSigner) buildCommonRequest() (request *requests.CommonRequest, err error) { + return +} + +func (signer *EcsRamRoleSigner) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + requestUrl := securityCredURL + signer.credential.RoleName + httpRequest, err := http.NewRequest(requests.GET, requestUrl, strings.NewReader("")) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + return + } + httpClient := &http.Client{} + httpResponse, err := httpClient.Do(httpRequest) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + return + } + + response = responses.NewCommonResponse() + err = responses.Unmarshal(response, httpResponse, "") + return +} + +func (signer *EcsRamRoleSigner) refreshCredential(response *responses.CommonResponse) (err error) { + if response.GetHttpStatus() != http.StatusOK { + return fmt.Errorf("refresh Ecs sts token err, httpStatus: %d, message = %s", response.GetHttpStatus(), response.GetHttpContentString()) + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, json.Unmarshal fail: %s", err.Error()) + } + code, err := jmespath.Search("Code", data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, fail to get Code: %s", err.Error()) + } + if code.(string) != "Success" { + return fmt.Errorf("refresh Ecs sts token err, Code is not Success") + } + accessKeyId, err := jmespath.Search("AccessKeyId", data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeyId: %s", err.Error()) + } + accessKeySecret, err := jmespath.Search("AccessKeySecret", data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeySecret: %s", err.Error()) + } + securityToken, err := jmespath.Search("SecurityToken", data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, fail to get SecurityToken: %s", err.Error()) + } + expiration, err := jmespath.Search("Expiration", data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, fail to get Expiration: %s", err.Error()) + } + if accessKeyId == nil || accessKeySecret == nil || securityToken == nil || expiration == nil { + return + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", expiration.(string)) + signer.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + signer.sessionCredential = &SessionCredential{ + AccessKeyId: accessKeyId.(string), + AccessKeySecret: accessKeySecret.(string), + StsToken: securityToken.(string), + } + + return +} + +func (signer *EcsRamRoleSigner) GetSessionCredential() *SessionCredential { + return signer.sessionCredential +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go new file mode 100644 index 000000000000..19273d5a69df --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go @@ -0,0 +1,148 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + jmespath "github.com/jmespath/go-jmespath" +) + +type SignerKeyPair struct { + *credentialUpdater + sessionCredential *SessionCredential + credential *credentials.RsaKeyPairCredential + commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error) +} + +func NewSignerKeyPair(credential *credentials.RsaKeyPairCredential, commonApi func(*requests.CommonRequest, interface{}) (response *responses.CommonResponse, err error)) (signer *SignerKeyPair, err error) { + signer = &SignerKeyPair{ + credential: credential, + commonApi: commonApi, + } + + signer.credentialUpdater = &credentialUpdater{ + credentialExpiration: credential.SessionExpiration, + buildRequestMethod: signer.buildCommonRequest, + responseCallBack: signer.refreshCredential, + refreshApi: signer.refreshApi, + } + + if credential.SessionExpiration > 0 { + if credential.SessionExpiration >= 900 && credential.SessionExpiration <= 3600 { + signer.credentialExpiration = credential.SessionExpiration + } else { + err = errors.NewClientError(errors.InvalidParamErrorCode, "Key Pair session duration should be in the range of 15min - 1Hr", nil) + } + } else { + signer.credentialExpiration = defaultDurationSeconds + } + return +} + +func (*SignerKeyPair) GetName() string { + return "HMAC-SHA1" +} + +func (*SignerKeyPair) GetType() string { + return "" +} + +func (*SignerKeyPair) GetVersion() string { + return "1.0" +} + +func (signer *SignerKeyPair) ensureCredential() error { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + return signer.updateCredential() + } + return nil +} + +func (signer *SignerKeyPair) GetAccessKeyId() (accessKeyId string, err error) { + err = signer.ensureCredential() + if err != nil { + return + } + if signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0 { + accessKeyId = "" + return + } + + accessKeyId = signer.sessionCredential.AccessKeyId + return +} + +func (signer *SignerKeyPair) GetExtraParam() map[string]string { + return make(map[string]string) +} + +func (signer *SignerKeyPair) Sign(stringToSign, secretSuffix string) string { + secret := signer.sessionCredential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +func (signer *SignerKeyPair) buildCommonRequest() (request *requests.CommonRequest, err error) { + request = requests.NewCommonRequest() + request.Product = "Sts" + request.Version = "2015-04-01" + request.ApiName = "GenerateSessionAccessKey" + request.Scheme = requests.HTTPS + request.SetDomain("sts.ap-northeast-1.aliyuncs.com") + request.QueryParams["PublicKeyId"] = signer.credential.PublicKeyId + request.QueryParams["DurationSeconds"] = strconv.Itoa(signer.credentialExpiration) + return +} + +func (signer *SignerKeyPair) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + signerV2 := NewSignerV2(signer.credential) + return signer.commonApi(request, signerV2) +} + +func (signer *SignerKeyPair) refreshCredential(response *responses.CommonResponse) (err error) { + if response.GetHttpStatus() != http.StatusOK { + message := "refresh session AccessKey failed" + err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), message) + return + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + return fmt.Errorf("refresh KeyPair err, json.Unmarshal fail: %s", err.Error()) + } + accessKeyId, err := jmespath.Search("SessionAccessKey.SessionAccessKeyId", data) + if err != nil { + return fmt.Errorf("refresh KeyPair err, fail to get SessionAccessKeyId: %s", err.Error()) + } + accessKeySecret, err := jmespath.Search("SessionAccessKey.SessionAccessKeySecret", data) + if err != nil { + return fmt.Errorf("refresh KeyPair err, fail to get SessionAccessKeySecret: %s", err.Error()) + } + if accessKeyId == nil || accessKeySecret == nil { + return + } + signer.sessionCredential = &SessionCredential{ + AccessKeyId: accessKeyId.(string), + AccessKeySecret: accessKeySecret.(string), + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go new file mode 100644 index 000000000000..c945c8aeb34b --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go @@ -0,0 +1,175 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + jmespath "github.com/jmespath/go-jmespath" +) + +const ( + defaultDurationSeconds = 3600 +) + +type RamRoleArnSigner struct { + *credentialUpdater + roleSessionName string + sessionCredential *SessionCredential + credential *credentials.RamRoleArnCredential + commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error) +} + +func NewRamRoleArnSigner(credential *credentials.RamRoleArnCredential, commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)) (signer *RamRoleArnSigner, err error) { + signer = &RamRoleArnSigner{ + credential: credential, + commonApi: commonApi, + } + + signer.credentialUpdater = &credentialUpdater{ + credentialExpiration: credential.RoleSessionExpiration, + buildRequestMethod: signer.buildCommonRequest, + responseCallBack: signer.refreshCredential, + refreshApi: signer.refreshApi, + } + + if len(credential.RoleSessionName) > 0 { + signer.roleSessionName = credential.RoleSessionName + } else { + signer.roleSessionName = "aliyun-go-sdk-" + strconv.FormatInt(time.Now().UnixNano()/1000, 10) + } + if credential.RoleSessionExpiration > 0 { + if credential.RoleSessionExpiration >= 900 && credential.RoleSessionExpiration <= 3600 { + signer.credentialExpiration = credential.RoleSessionExpiration + } else { + err = errors.NewClientError(errors.InvalidParamErrorCode, "Assume Role session duration should be in the range of 15min - 1Hr", nil) + } + } else { + signer.credentialExpiration = defaultDurationSeconds + } + return +} + +func (*RamRoleArnSigner) GetName() string { + return "HMAC-SHA1" +} + +func (*RamRoleArnSigner) GetType() string { + return "" +} + +func (*RamRoleArnSigner) GetVersion() string { + return "1.0" +} + +func (signer *RamRoleArnSigner) GetAccessKeyId() (accessKeyId string, err error) { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + err = signer.updateCredential() + if err != nil { + return + } + } + + if signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0 { + return "", err + } + + return signer.sessionCredential.AccessKeyId, nil +} + +func (signer *RamRoleArnSigner) GetExtraParam() map[string]string { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + signer.updateCredential() + } + if signer.sessionCredential == nil || len(signer.sessionCredential.StsToken) <= 0 { + return make(map[string]string) + } + return map[string]string{"SecurityToken": signer.sessionCredential.StsToken} +} + +func (signer *RamRoleArnSigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.sessionCredential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +func (signer *RamRoleArnSigner) buildCommonRequest() (request *requests.CommonRequest, err error) { + request = requests.NewCommonRequest() + request.Product = "Sts" + request.Version = "2015-04-01" + request.ApiName = "AssumeRole" + request.Scheme = requests.HTTPS + request.QueryParams["RoleArn"] = signer.credential.RoleArn + if signer.credential.Policy != "" { + request.QueryParams["Policy"] = signer.credential.Policy + } + request.QueryParams["RoleSessionName"] = signer.credential.RoleSessionName + request.QueryParams["DurationSeconds"] = strconv.Itoa(signer.credentialExpiration) + return +} + +func (signer *RamRoleArnSigner) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + credential := &credentials.AccessKeyCredential{ + AccessKeyId: signer.credential.AccessKeyId, + AccessKeySecret: signer.credential.AccessKeySecret, + } + signerV1 := NewAccessKeySigner(credential) + return signer.commonApi(request, signerV1) +} + +func (signer *RamRoleArnSigner) refreshCredential(response *responses.CommonResponse) (err error) { + if response.GetHttpStatus() != http.StatusOK { + message := "refresh session token failed" + err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), message) + return + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err, json.Unmarshal fail: %s", err.Error()) + } + accessKeyId, err := jmespath.Search("Credentials.AccessKeyId", data) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err, fail to get AccessKeyId: %s", err.Error()) + } + accessKeySecret, err := jmespath.Search("Credentials.AccessKeySecret", data) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err, fail to get AccessKeySecret: %s", err.Error()) + } + securityToken, err := jmespath.Search("Credentials.SecurityToken", data) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err, fail to get SecurityToken: %s", err.Error()) + } + if accessKeyId == nil || accessKeySecret == nil || securityToken == nil { + return + } + signer.sessionCredential = &SessionCredential{ + AccessKeyId: accessKeyId.(string), + AccessKeySecret: accessKeySecret.(string), + StsToken: securityToken.(string), + } + return +} + +func (signer *RamRoleArnSigner) GetSessionCredential() *SessionCredential { + return signer.sessionCredential +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go new file mode 100644 index 000000000000..d0ce36c38d33 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go @@ -0,0 +1,54 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +type StsTokenSigner struct { + credential *credentials.StsTokenCredential +} + +func NewStsTokenSigner(credential *credentials.StsTokenCredential) *StsTokenSigner { + return &StsTokenSigner{ + credential: credential, + } +} + +func (*StsTokenSigner) GetName() string { + return "HMAC-SHA1" +} + +func (*StsTokenSigner) GetType() string { + return "" +} + +func (*StsTokenSigner) GetVersion() string { + return "1.0" +} + +func (signer *StsTokenSigner) GetAccessKeyId() (accessKeyId string, err error) { + return signer.credential.AccessKeyId, nil +} + +func (signer *StsTokenSigner) GetExtraParam() map[string]string { + return map[string]string{"SecurityToken": signer.credential.AccessKeyStsToken} +} + +func (signer *StsTokenSigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.credential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go new file mode 100644 index 000000000000..973485298237 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go @@ -0,0 +1,54 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +type SignerV2 struct { + credential *credentials.RsaKeyPairCredential +} + +func (signer *SignerV2) GetExtraParam() map[string]string { + return nil +} + +func NewSignerV2(credential *credentials.RsaKeyPairCredential) *SignerV2 { + return &SignerV2{ + credential: credential, + } +} + +func (*SignerV2) GetName() string { + return "SHA256withRSA" +} + +func (*SignerV2) GetType() string { + return "PRIVATEKEY" +} + +func (*SignerV2) GetVersion() string { + return "1.0" +} + +func (signer *SignerV2) GetAccessKeyId() (accessKeyId string, err error) { + return signer.credential.PublicKeyId, err +} + +func (signer *SignerV2) Sign(stringToSign, secretSuffix string) string { + secret := signer.credential.PrivateKey + return Sha256WithRsa(stringToSign, secret) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/client.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/client.go new file mode 100644 index 000000000000..b58629fa32b2 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/client.go @@ -0,0 +1,695 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sdk + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +var debug utils.Debug + +func init() { + debug = utils.Init("sdk") +} + +// Version this value will be replaced while build: -ldflags="-X sdk.version=x.x.x" +var Version = "0.0.1" +var defaultConnectTimeout = 5 * time.Second +var defaultReadTimeout = 10 * time.Second + +var DefaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Core/%s", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), Version) + +var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) { + return fn +} + +// Client the type Client +type Client struct { + isInsecure bool + regionId string + config *Config + httpProxy string + httpsProxy string + noProxy string + userAgent map[string]string + signer auth.Signer + httpClient *http.Client + asyncTaskQueue chan func() + readTimeout time.Duration + connectTimeout time.Duration + + debug bool + isRunning bool + // void "panic(write to close channel)" cause of addAsync() after Shutdown() + asyncChanLock *sync.RWMutex +} + +func (client *Client) Init() (err error) { + panic("not support yet") +} + +func (client *Client) SetHTTPSInsecure(isInsecure bool) { + client.isInsecure = isInsecure +} + +func (client *Client) GetHTTPSInsecure() bool { + return client.isInsecure +} + +func (client *Client) SetHttpsProxy(httpsProxy string) { + client.httpsProxy = httpsProxy +} + +func (client *Client) GetHttpsProxy() string { + return client.httpsProxy +} + +func (client *Client) SetHttpProxy(httpProxy string) { + client.httpProxy = httpProxy +} + +func (client *Client) GetHttpProxy() string { + return client.httpProxy +} + +func (client *Client) SetNoProxy(noProxy string) { + client.noProxy = noProxy +} + +func (client *Client) GetNoProxy() string { + return client.noProxy +} + +// InitWithProviderChain will get credential from the providerChain, +// the RsaKeyPairCredential Only applicable to regionID `ap-northeast-1`, +// if your providerChain may return a credential type with RsaKeyPairCredential, +// please ensure your regionID is `ap-northeast-1`. +func (client *Client) InitWithProviderChain(regionId string, provider provider.Provider) (err error) { + config := client.InitClientConfig() + credential, err := provider.Resolve() + if err != nil { + return + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithOptions(regionId string, config *Config, credential auth.Credential) (err error) { + client.isRunning = true + client.asyncChanLock = new(sync.RWMutex) + client.regionId = regionId + client.config = config + client.httpClient = &http.Client{} + + if config.HttpTransport != nil { + client.httpClient.Transport = config.HttpTransport + } + + if config.Timeout > 0 { + client.httpClient.Timeout = config.Timeout + } + + if config.EnableAsync { + client.EnableAsync(config.GoRoutinePoolSize, config.MaxTaskQueueSize) + } + + client.signer, err = auth.NewSignerWithCredential(credential, client.ProcessCommonRequestWithSigner) + + return +} + +func (client *Client) SetReadTimeout(readTimeout time.Duration) { + client.readTimeout = readTimeout +} + +func (client *Client) SetConnectTimeout(connectTimeout time.Duration) { + client.connectTimeout = connectTimeout +} + +func (client *Client) GetReadTimeout() time.Duration { + return client.readTimeout +} + +func (client *Client) GetConnectTimeout() time.Duration { + return client.connectTimeout +} + +func (client *Client) getHttpProxy(scheme string) (proxy *url.URL, err error) { + if scheme == "https" { + if client.GetHttpsProxy() != "" { + proxy, err = url.Parse(client.httpsProxy) + } else if rawurl := os.Getenv("HTTPS_PROXY"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } else if rawurl := os.Getenv("https_proxy"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } + } else { + if client.GetHttpProxy() != "" { + proxy, err = url.Parse(client.httpProxy) + } else if rawurl := os.Getenv("HTTP_PROXY"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } else if rawurl := os.Getenv("http_proxy"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } + } + + return proxy, err +} + +func (client *Client) getNoProxy(scheme string) []string { + var urls []string + if client.GetNoProxy() != "" { + urls = strings.Split(client.noProxy, ",") + } else if rawurl := os.Getenv("NO_PROXY"); rawurl != "" { + urls = strings.Split(rawurl, ",") + } else if rawurl := os.Getenv("no_proxy"); rawurl != "" { + urls = strings.Split(rawurl, ",") + } + + return urls +} + +// EnableAsync enable the async task queue +func (client *Client) EnableAsync(routinePoolSize, maxTaskQueueSize int) { + client.asyncTaskQueue = make(chan func(), maxTaskQueueSize) + for i := 0; i < routinePoolSize; i++ { + go func() { + for client.isRunning { + select { + case task, notClosed := <-client.asyncTaskQueue: + if notClosed { + task() + } + } + } + }() + } +} + +func (client *Client) InitWithAccessKey(regionId, accessKeyId, accessKeySecret string) (err error) { + config := client.InitClientConfig() + credential := &credentials.BaseCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithStsToken(regionId, accessKeyId, accessKeySecret, securityToken string) (err error) { + config := client.InitClientConfig() + credential := &credentials.StsTokenCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + AccessKeyStsToken: securityToken, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (err error) { + config := client.InitClientConfig() + credential := &credentials.RamRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithRamRoleArnAndPolicy(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string) (err error) { + config := client.InitClientConfig() + credential := &credentials.RamRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + Policy: policy, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithRsaKeyPair(regionId, publicKeyId, privateKey string, sessionExpiration int) (err error) { + config := client.InitClientConfig() + credential := &credentials.RsaKeyPairCredential{ + PrivateKey: privateKey, + PublicKeyId: publicKeyId, + SessionExpiration: sessionExpiration, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithEcsRamRole(regionId, roleName string) (err error) { + config := client.InitClientConfig() + credential := &credentials.EcsRamRoleCredential{ + RoleName: roleName, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithBearerToken(regionId, bearerToken string) (err error) { + config := client.InitClientConfig() + credential := &credentials.BearerTokenCredential{ + BearerToken: bearerToken, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitClientConfig() (config *Config) { + if client.config != nil { + return client.config + } else { + return NewConfig() + } +} + +func (client *Client) DoAction(request requests.AcsRequest, response responses.AcsResponse) (err error) { + return client.DoActionWithSigner(request, response, nil) +} + +func (client *Client) buildRequestWithSigner(request requests.AcsRequest, signer auth.Signer) (httpRequest *http.Request, err error) { + // add clientVersion + request.GetHeaders()["x-sdk-core-version"] = Version + + regionId := client.regionId + if len(request.GetRegionId()) > 0 { + regionId = request.GetRegionId() + } + + // resolve endpoint + resolveParam := &endpoints.ResolveParam{ + Domain: request.GetDomain(), + Product: request.GetProduct(), + RegionId: regionId, + LocationProduct: request.GetLocationServiceCode(), + LocationEndpointType: request.GetLocationEndpointType(), + CommonApi: client.ProcessCommonRequest, + } + endpoint, err := endpoints.Resolve(resolveParam) + if err != nil { + return + } + request.SetDomain(endpoint) + if request.GetScheme() == "" { + request.SetScheme(client.config.Scheme) + } + // init request params + err = requests.InitParams(request) + if err != nil { + return + } + + // signature + var finalSigner auth.Signer + if signer != nil { + finalSigner = signer + } else { + finalSigner = client.signer + } + httpRequest, err = buildHttpRequest(request, finalSigner, regionId) + if err == nil { + userAgent := DefaultUserAgent + getSendUserAgent(client.config.UserAgent, client.userAgent, request.GetUserAgent()) + httpRequest.Header.Set("User-Agent", userAgent) + } + + return +} + +func getSendUserAgent(configUserAgent string, clientUserAgent, requestUserAgent map[string]string) string { + realUserAgent := "" + for key1, value1 := range clientUserAgent { + for key2, _ := range requestUserAgent { + if key1 == key2 { + key1 = "" + } + } + if key1 != "" { + realUserAgent += fmt.Sprintf(" %s/%s", key1, value1) + + } + } + for key, value := range requestUserAgent { + realUserAgent += fmt.Sprintf(" %s/%s", key, value) + } + if configUserAgent != "" { + return realUserAgent + fmt.Sprintf(" Extra/%s", configUserAgent) + } + return realUserAgent +} + +func (client *Client) AppendUserAgent(key, value string) { + newkey := true + + if client.userAgent == nil { + client.userAgent = make(map[string]string) + } + if strings.ToLower(key) != "core" && strings.ToLower(key) != "go" { + for tag, _ := range client.userAgent { + if tag == key { + client.userAgent[tag] = value + newkey = false + } + } + if newkey { + client.userAgent[key] = value + } + } +} + +func (client *Client) BuildRequestWithSigner(request requests.AcsRequest, signer auth.Signer) (err error) { + _, err = client.buildRequestWithSigner(request, signer) + return +} + +func (client *Client) getTimeout(request requests.AcsRequest) (time.Duration, time.Duration) { + readTimeout := defaultReadTimeout + connectTimeout := defaultConnectTimeout + + reqReadTimeout := request.GetReadTimeout() + reqConnectTimeout := request.GetConnectTimeout() + if reqReadTimeout != 0*time.Millisecond { + readTimeout = reqReadTimeout + } else if client.readTimeout != 0*time.Millisecond { + readTimeout = client.readTimeout + } + + if reqConnectTimeout != 0*time.Millisecond { + connectTimeout = reqConnectTimeout + } else if client.connectTimeout != 0*time.Millisecond { + connectTimeout = client.connectTimeout + } + return readTimeout, connectTimeout +} + +func Timeout(connectTimeout, readTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + conn, err := (&net.Dialer{ + Timeout: connectTimeout, + KeepAlive: 0 * time.Second, + DualStack: true, + }).DialContext(ctx, network, address) + + if err == nil { + conn.SetDeadline(time.Now().Add(readTimeout)) + } + + return conn, err + } +} + +func (client *Client) setTimeout(request requests.AcsRequest) { + readTimeout, connectTimeout := client.getTimeout(request) + if trans, ok := client.httpClient.Transport.(*http.Transport); ok && trans != nil { + trans.DialContext = Timeout(connectTimeout, readTimeout) + client.httpClient.Transport = trans + } else { + client.httpClient.Transport = &http.Transport{ + DialContext: Timeout(connectTimeout, readTimeout), + } + } +} + +func (client *Client) getHTTPSInsecure(request requests.AcsRequest) (insecure bool) { + if request.GetHTTPSInsecure() != nil { + insecure = *request.GetHTTPSInsecure() + } else { + insecure = client.GetHTTPSInsecure() + } + return insecure +} + +func (client *Client) DoActionWithSigner(request requests.AcsRequest, response responses.AcsResponse, signer auth.Signer) (err error) { + httpRequest, err := client.buildRequestWithSigner(request, signer) + if err != nil { + return + } + client.setTimeout(request) + proxy, err := client.getHttpProxy(httpRequest.URL.Scheme) + if err != nil { + return err + } + + noProxy := client.getNoProxy(httpRequest.URL.Scheme) + + var flag bool + for _, value := range noProxy { + if value == httpRequest.Host { + flag = true + break + } + } + + // Set whether to ignore certificate validation. + // Default InsecureSkipVerify is false. + if trans, ok := client.httpClient.Transport.(*http.Transport); ok && trans != nil { + trans.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: client.getHTTPSInsecure(request), + } + if proxy != nil && !flag { + trans.Proxy = http.ProxyURL(proxy) + } + client.httpClient.Transport = trans + } + + var httpResponse *http.Response + for retryTimes := 0; retryTimes <= client.config.MaxRetryTime; retryTimes++ { + if proxy != nil && proxy.User != nil{ + if password, passwordSet := proxy.User.Password(); passwordSet { + httpRequest.SetBasicAuth(proxy.User.Username(), password) + } + } + debug("> %s %s %s", httpRequest.Method, httpRequest.URL.RequestURI(), httpRequest.Proto) + debug("> Host: %s", httpRequest.Host) + for key, value := range httpRequest.Header { + debug("> %s: %v", key, strings.Join(value, "")) + } + debug(">") + httpResponse, err = hookDo(client.httpClient.Do)(httpRequest) + if err == nil { + debug("< %s %s", httpResponse.Proto, httpResponse.Status) + for key, value := range httpResponse.Header { + debug("< %s: %v", key, strings.Join(value, "")) + } + } + debug("<") + // receive error + if err != nil { + if !client.config.AutoRetry { + return + } else if retryTimes >= client.config.MaxRetryTime { + // timeout but reached the max retry times, return + var timeoutErrorMsg string + if strings.Contains(err.Error(), "read tcp") { + timeoutErrorMsg = fmt.Sprintf(errors.TimeoutErrorMessage, strconv.Itoa(retryTimes+1), strconv.Itoa(retryTimes+1)) + " Read timeout. Please set a valid ReadTimeout." + } else { + timeoutErrorMsg = fmt.Sprintf(errors.TimeoutErrorMessage, strconv.Itoa(retryTimes+1), strconv.Itoa(retryTimes+1)) + " Connect timeout. Please set a valid ConnectTimeout." + } + err = errors.NewClientError(errors.TimeoutErrorCode, timeoutErrorMsg, err) + return + } + } + // if status code >= 500 or timeout, will trigger retry + if client.config.AutoRetry && (err != nil || isServerError(httpResponse)) { + client.setTimeout(request) + // rewrite signatureNonce and signature + httpRequest, err = client.buildRequestWithSigner(request, signer) + // buildHttpRequest(request, finalSigner, regionId) + if err != nil { + return + } + continue + } + break + } + + err = responses.Unmarshal(response, httpResponse, request.GetAcceptFormat()) + // wrap server errors + if serverErr, ok := err.(*errors.ServerError); ok { + var wrapInfo = map[string]string{} + wrapInfo["StringToSign"] = request.GetStringToSign() + err = errors.WrapServerError(serverErr, wrapInfo) + } + return +} + +func buildHttpRequest(request requests.AcsRequest, singer auth.Signer, regionId string) (httpRequest *http.Request, err error) { + err = auth.Sign(request, singer, regionId) + if err != nil { + return + } + requestMethod := request.GetMethod() + requestUrl := request.BuildUrl() + body := request.GetBodyReader() + httpRequest, err = http.NewRequest(requestMethod, requestUrl, body) + if err != nil { + return + } + for key, value := range request.GetHeaders() { + httpRequest.Header[key] = []string{value} + } + // host is a special case + if host, containsHost := request.GetHeaders()["Host"]; containsHost { + httpRequest.Host = host + } + return +} + +func isServerError(httpResponse *http.Response) bool { + return httpResponse.StatusCode >= http.StatusInternalServerError +} + +/** +only block when any one of the following occurs: +1. the asyncTaskQueue is full, increase the queue size to avoid this +2. Shutdown() in progressing, the client is being closed +**/ +func (client *Client) AddAsyncTask(task func()) (err error) { + if client.asyncTaskQueue != nil { + client.asyncChanLock.RLock() + defer client.asyncChanLock.RUnlock() + if client.isRunning { + client.asyncTaskQueue <- task + } + } else { + err = errors.NewClientError(errors.AsyncFunctionNotEnabledCode, errors.AsyncFunctionNotEnabledMessage, nil) + } + return +} + +func (client *Client) GetConfig() *Config { + return client.config +} + +func NewClient() (client *Client, err error) { + client = &Client{} + err = client.Init() + return +} + +func NewClientWithProvider(regionId string, providers ...provider.Provider) (client *Client, err error) { + client = &Client{} + var pc provider.Provider + if len(providers) == 0 { + pc = provider.DefaultChain + } else { + pc = provider.NewProviderChain(providers) + } + err = client.InitWithProviderChain(regionId, pc) + return +} + +func NewClientWithOptions(regionId string, config *Config, credential auth.Credential) (client *Client, err error) { + client = &Client{} + err = client.InitWithOptions(regionId, config, credential) + return +} + +func NewClientWithAccessKey(regionId, accessKeyId, accessKeySecret string) (client *Client, err error) { + client = &Client{} + err = client.InitWithAccessKey(regionId, accessKeyId, accessKeySecret) + return +} + +func NewClientWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken string) (client *Client, err error) { + client = &Client{} + err = client.InitWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken) + return +} + +func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName) + return +} + +func NewClientWithRamRoleArnAndPolicy(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string) (client *Client, err error) { + client = &Client{} + err = client.InitWithRamRoleArnAndPolicy(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy) + return +} + +func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithEcsRamRole(regionId, roleName) + return +} + +func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, sessionExpiration int) (client *Client, err error) { + client = &Client{} + err = client.InitWithRsaKeyPair(regionId, publicKeyId, privateKey, sessionExpiration) + return +} + +func NewClientWithBearerToken(regionId, bearerToken string) (client *Client, err error) { + client = &Client{} + err = client.InitWithBearerToken(regionId, bearerToken) + return +} + +func (client *Client) ProcessCommonRequest(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + request.TransToAcsRequest() + response = responses.NewCommonResponse() + err = client.DoAction(request, response) + return +} + +func (client *Client) ProcessCommonRequestWithSigner(request *requests.CommonRequest, signerInterface interface{}) (response *responses.CommonResponse, err error) { + if signer, isSigner := signerInterface.(auth.Signer); isSigner { + request.TransToAcsRequest() + response = responses.NewCommonResponse() + err = client.DoActionWithSigner(request, response, signer) + return + } + panic("should not be here") +} + +func (client *Client) Shutdown() { + // lock the addAsync() + client.asyncChanLock.Lock() + defer client.asyncChanLock.Unlock() + if client.asyncTaskQueue != nil { + close(client.asyncTaskQueue) + } + client.isRunning = false +} + +// Deprecated: Use NewClientWithRamRoleArn in this package instead. +func NewClientWithStsRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) { + return NewClientWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName) +} + +// Deprecated: Use NewClientWithEcsRamRole in this package instead. +func NewClientWithStsRoleNameOnEcs(regionId string, roleName string) (client *Client, err error) { + return NewClientWithEcsRamRole(regionId, roleName) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/config.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/config.go new file mode 100644 index 000000000000..e8862e0c2ea0 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/config.go @@ -0,0 +1,91 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sdk + +import ( + "net/http" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +type Config struct { + AutoRetry bool `default:"true"` + MaxRetryTime int `default:"3"` + UserAgent string `default:""` + Debug bool `default:"false"` + Timeout time.Duration `default:"10000000000"` + HttpTransport *http.Transport `default:""` + EnableAsync bool `default:"false"` + MaxTaskQueueSize int `default:"1000"` + GoRoutinePoolSize int `default:"5"` + Scheme string `default:"HTTP"` +} + +func NewConfig() (config *Config) { + config = &Config{} + utils.InitStructWithDefaultTag(config) + return +} + +func (c *Config) WithAutoRetry(isAutoRetry bool) *Config { + c.AutoRetry = isAutoRetry + return c +} + +func (c *Config) WithMaxRetryTime(maxRetryTime int) *Config { + c.MaxRetryTime = maxRetryTime + return c +} + +func (c *Config) WithUserAgent(userAgent string) *Config { + c.UserAgent = userAgent + return c +} + +func (c *Config) WithDebug(isDebug bool) *Config { + c.Debug = isDebug + return c +} + +func (c *Config) WithTimeout(timeout time.Duration) *Config { + c.Timeout = timeout + return c +} + +func (c *Config) WithHttpTransport(httpTransport *http.Transport) *Config { + c.HttpTransport = httpTransport + return c +} + +func (c *Config) WithEnableAsync(isEnableAsync bool) *Config { + c.EnableAsync = isEnableAsync + return c +} + +func (c *Config) WithMaxTaskQueueSize(maxTaskQueueSize int) *Config { + c.MaxTaskQueueSize = maxTaskQueueSize + return c +} + +func (c *Config) WithGoRoutinePoolSize(goRoutinePoolSize int) *Config { + c.GoRoutinePoolSize = goRoutinePoolSize + return c +} + +func (c *Config) WithScheme(scheme string) *Config { + c.Scheme = scheme + return c +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go new file mode 100644 index 000000000000..60adf7d459ce --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go @@ -0,0 +1,1670 @@ + +package endpoints + +import ( + "encoding/json" + "fmt" + "sync" +) + +const endpointsJson =`{ + "products": [ + { + "code": "ecs", + "document_id": "25484", + "location_service_code": "ecs", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "ecs.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "ecs.cn-huhehaote.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "ecs.me-east-1.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "ecs.ap-southeast-3.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "ecs.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "ecs.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "ecs.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "ecs.ap-southeast-5.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "ecs.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "ecs.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "ecs-cn-hangzhou.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "chatbot", + "document_id": "60760", + "location_service_code": "beebot", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "chatbot.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "chatbot.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "chatbot.[RegionId].aliyuncs.com" + }, + { + "code": "alidns", + "document_id": "29739", + "location_service_code": "alidns", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "alidns.aliyuncs.com" + } + ], + "global_endpoint": "alidns.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "itaas", + "document_id": "55759", + "location_service_code": "itaas", + "regional_endpoints": null, + "global_endpoint": "itaas.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "csb", + "document_id": "64837", + "location_service_code": "csb", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "csb.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "csb.cn-beijing.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "csb.[RegionId].aliyuncs.com" + }, + { + "code": "slb", + "document_id": "27565", + "location_service_code": "slb", + "regional_endpoints": [ + { + "region": "cn-hongkong", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "slb.me-east-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "slb.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "slb.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "slb.ap-south-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "slb.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "slb.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "slb.cn-huhehaote.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "slb.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "slb.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "slb.ap-northeast-1.aliyuncs.com" + } + ], + "global_endpoint": "slb.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "cloudwf", + "document_id": "58111", + "location_service_code": "cloudwf", + "regional_endpoints": null, + "global_endpoint": "cloudwf.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "cloudphoto", + "document_id": "59902", + "location_service_code": "cloudphoto", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "cloudphoto.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "cloudphoto.[RegionId].aliyuncs.com" + }, + { + "code": "dds", + "document_id": "61715", + "location_service_code": "dds", + "regional_endpoints": [ + { + "region": "ap-southeast-5", + "endpoint": "mongodb.ap-southeast-5.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "mongodb.eu-west-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "mongodb.me-east-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "mongodb.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "mongodb.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "mongodb.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "mongodb.ap-southeast-3.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "mongodb.ap-south-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "mongodb.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "mongodb.cn-huhehaote.aliyuncs.com" + } + ], + "global_endpoint": "mongodb.aliyuncs.com", + "regional_endpoint_pattern": "mongodb.[RegionId].aliyuncs.com" + }, + { + "code": "dm", + "document_id": "29434", + "location_service_code": "dm", + "regional_endpoints": [ + { + "region": "ap-southeast-2", + "endpoint": "dm.ap-southeast-2.aliyuncs.com" + } + ], + "global_endpoint": "dm.aliyuncs.com", + "regional_endpoint_pattern": "dm.[RegionId].aliyuncs.com" + }, + { + "code": "ons", + "document_id": "44416", + "location_service_code": "ons", + "regional_endpoints": [ + { + "region": "cn-zhangjiakou", + "endpoint": "ons.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "ons.us-west-1.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "ons.me-east-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "ons.us-east-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "ons.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "ons.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "ons.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "ons.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "ons.cn-shenzhen.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ons.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "ons.cn-hangzhou.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "ons.eu-central-1.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "ons.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "ons.cn-beijing.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "ons.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "ons.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "ons.cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "ons.cn-qingdao.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "polardb", + "document_id": "58764", + "location_service_code": "polardb", + "regional_endpoints": [ + { + "region": "cn-qingdao", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "polardb.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "polardb.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "polardb.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "polardb.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "polardb.aliyuncs.com" + }, + { + "code": "batchcompute", + "document_id": "44717", + "location_service_code": "batchcompute", + "regional_endpoints": [ + { + "region": "us-west-1", + "endpoint": "batchcompute.us-west-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "batchcompute.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "batchcompute.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "batchcompute.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "batchcompute.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "batchcompute.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "batchcompute.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "batchcompute.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "batchcompute.cn-shenzhen.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "batchcompute.[RegionId].aliyuncs.com" + }, + { + "code": "cloudauth", + "document_id": "60687", + "location_service_code": "cloudauth", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "cloudauth.aliyuncs.com" + } + ], + "global_endpoint": "cloudauth.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "vod", + "document_id": "60574", + "location_service_code": "vod", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "vod.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "vod.ap-southeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "vod.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "vod.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "vod.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "vod.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ram", + "document_id": "28672", + "location_service_code": "ram", + "regional_endpoints": null, + "global_endpoint": "ram.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "ess", + "document_id": "25925", + "location_service_code": "ess", + "regional_endpoints": [ + { + "region": "me-east-1", + "endpoint": "ess.me-east-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "ess.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "ess.ap-south-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "ess.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "ess.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "ess.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "ess.ap-southeast-5.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "ess.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "ess.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "ess.eu-west-1.aliyuncs.com" + } + ], + "global_endpoint": "ess.aliyuncs.com", + "regional_endpoint_pattern": "ess.[RegionId].aliyuncs.com" + }, + { + "code": "live", + "document_id": "48207", + "location_service_code": "live", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "live.aliyuncs.com" + } + ], + "global_endpoint": "live.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "hpc", + "document_id": "35201", + "location_service_code": "hpc", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "hpc.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "hpc.aliyuncs.com" + } + ], + "global_endpoint": "hpc.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "rds", + "document_id": "26223", + "location_service_code": "rds", + "regional_endpoints": [ + { + "region": "me-east-1", + "endpoint": "rds.me-east-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "rds.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "rds.ap-southeast-3.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "rds.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "rds.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "rds.ap-southeast-5.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "rds.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "rds.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "rds.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "rds.ap-northeast-1.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "rds.aliyuncs.com" + } + ], + "global_endpoint": "rds.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "cloudapi", + "document_id": "43590", + "location_service_code": "apigateway", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "apigateway.cn-beijing.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "apigateway.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "apigateway.ap-south-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "apigateway.us-east-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "apigateway.cn-shanghai.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "apigateway.us-west-1.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "apigateway.ap-southeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "apigateway.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "apigateway.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "apigateway.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "apigateway.cn-huhehaote.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "apigateway.eu-west-1.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "apigateway.me-east-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "apigateway.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "apigateway.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "apigateway.ap-southeast-5.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "apigateway.cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "apigateway.cn-shenzhen.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "apigateway.ap-southeast-3.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "apigateway.[RegionId].aliyuncs.com" + }, + { + "code": "sas-api", + "document_id": "28498", + "location_service_code": "sas", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "sas.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "cs", + "document_id": "26043", + "location_service_code": "cs", + "regional_endpoints": null, + "global_endpoint": "cs.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "jaq", + "document_id": "35037", + "location_service_code": "jaq", + "regional_endpoints": null, + "global_endpoint": "jaq.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "r-kvstore", + "document_id": "60831", + "location_service_code": "redisa", + "regional_endpoints": [ + { + "region": "cn-huhehaote", + "endpoint": "r-kvstore.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "r-kvstore.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "r-kvstore.ap-south-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "r-kvstore.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "r-kvstore.me-east-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "r-kvstore.ap-northeast-1.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "r-kvstore.cn-hongkong.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "r-kvstore.ap-southeast-2.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "r-kvstore.eu-west-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "r-kvstore.ap-southeast-5.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "r-kvstore.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "r-kvstore.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "r-kvstore.aliyuncs.com" + } + ], + "global_endpoint": "r-kvstore.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "drds", + "document_id": "51111", + "location_service_code": "drds", + "regional_endpoints": [ + { + "region": "ap-southeast-1", + "endpoint": "drds.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "drds.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "drds.aliyuncs.com", + "regional_endpoint_pattern": "drds.aliyuncs.com" + }, + { + "code": "waf", + "document_id": "62847", + "location_service_code": "waf", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "wafopenapi.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "sts", + "document_id": "28756", + "location_service_code": "sts", + "regional_endpoints": null, + "global_endpoint": "sts.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "cr", + "document_id": "60716", + "location_service_code": "cr", + "regional_endpoints": null, + "global_endpoint": "cr.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "arms", + "document_id": "42924", + "location_service_code": "arms", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "arms.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "arms.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "arms.cn-hongkong.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "arms.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "arms.cn-shenzhen.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "arms.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "arms.cn-beijing.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "arms.[RegionId].aliyuncs.com" + }, + { + "code": "iot", + "document_id": "30557", + "location_service_code": "iot", + "regional_endpoints": [ + { + "region": "us-east-1", + "endpoint": "iot.us-east-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "iot.ap-northeast-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "iot.us-west-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "iot.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "iot.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "iot.ap-southeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "iot.[RegionId].aliyuncs.com" + }, + { + "code": "vpc", + "document_id": "34962", + "location_service_code": "vpc", + "regional_endpoints": [ + { + "region": "us-west-1", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "vpc.cn-huhehaote.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "vpc.me-east-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "vpc.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "vpc.ap-southeast-3.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "vpc.eu-central-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "vpc.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "vpc.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "vpc.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "vpc.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "vpc.eu-west-1.aliyuncs.com" + } + ], + "global_endpoint": "vpc.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "aegis", + "document_id": "28449", + "location_service_code": "vipaegis", + "regional_endpoints": [ + { + "region": "ap-southeast-3", + "endpoint": "aegis.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "aegis.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "aegis.cn-hangzhou.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "domain", + "document_id": "42875", + "location_service_code": "domain", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "domain.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "domain-intl.aliyuncs.com" + } + ], + "global_endpoint": "domain.aliyuncs.com", + "regional_endpoint_pattern": "domain.aliyuncs.com" + }, + { + "code": "cdn", + "document_id": "27148", + "location_service_code": "cdn", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "cdn.aliyuncs.com" + } + ], + "global_endpoint": "cdn.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "qualitycheck", + "document_id": "50807", + "location_service_code": "qualitycheck", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "qualitycheck.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "emr", + "document_id": "28140", + "location_service_code": "emr", + "regional_endpoints": [ + { + "region": "us-east-1", + "endpoint": "emr.us-east-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "emr.ap-southeast-5.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "emr.eu-central-1.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "emr.eu-west-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "emr.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "emr.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "emr.ap-south-1.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "emr.me-east-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "emr.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "emr.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "emr.cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "emr.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "emr.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "emr.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "emr.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "emr.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "emr.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "emr.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "emr.aliyuncs.com" + } + ], + "global_endpoint": "emr.aliyuncs.com", + "regional_endpoint_pattern": "emr.[RegionId].aliyuncs.com" + }, + { + "code": "httpdns", + "document_id": "52679", + "location_service_code": "httpdns", + "regional_endpoints": null, + "global_endpoint": "httpdns-api.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "push", + "document_id": "30074", + "location_service_code": "push", + "regional_endpoints": null, + "global_endpoint": "cloudpush.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "cms", + "document_id": "28615", + "location_service_code": "cms", + "regional_endpoints": [ + { + "region": "cn-qingdao", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "metrics.eu-west-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "metrics.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "metrics.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "metrics.ap-southeast-5.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "metrics.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "metrics.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "metrics.cn-hangzhou.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "nas", + "document_id": "62598", + "location_service_code": "nas", + "regional_endpoints": [ + { + "region": "ap-southeast-5", + "endpoint": "nas.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "nas.ap-south-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "nas.us-west-1.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "nas.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "nas.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "nas.ap-northeast-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "nas.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "nas.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "nas.cn-beijing.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "nas.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "nas.cn-shenzhen.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "nas.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "nas.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "nas.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "nas.cn-hongkong.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "nas.ap-southeast-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "nas.us-east-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "cds", + "document_id": "62887", + "location_service_code": "codepipeline", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "cds.cn-beijing.aliyuncs.com" + } + ], + "global_endpoint": "cds.cn-beijing.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "green", + "document_id": "28427", + "location_service_code": "green", + "regional_endpoints": [ + { + "region": "us-west-1", + "endpoint": "green.us-west-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "green.cn-beijing.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "green.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "green.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "green.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "green.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "ccc", + "document_id": "63027", + "location_service_code": "ccc", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "ccc.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ccc.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "ccc.[RegionId].aliyuncs.com" + }, + { + "code": "ros", + "document_id": "28899", + "location_service_code": "ros", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ros.aliyuncs.com" + } + ], + "global_endpoint": "ros.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "mts", + "document_id": "29212", + "location_service_code": "mts", + "regional_endpoints": [ + { + "region": "ap-northeast-1", + "endpoint": "mts.ap-northeast-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "mts.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "mts.cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "mts.cn-shenzhen.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "mts.us-west-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "mts.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "mts.eu-west-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "mts.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "mts.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "mts.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "mts.ap-southeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "mts.eu-central-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + } + ] +}` +var initOnce sync.Once +var data interface{} + +func getEndpointConfigData() interface{} { + initOnce.Do(func() { + err := json.Unmarshal([]byte(endpointsJson), &data) + if err != nil { + panic(fmt.Sprintf("init endpoint config data failed. %s", err)) + } + }) + return data +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go new file mode 100644 index 000000000000..160e62cb64e8 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go @@ -0,0 +1,43 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package endpoints + +import ( + "fmt" + "strings" + + "github.com/jmespath/go-jmespath" +) + +type LocalGlobalResolver struct { +} + +func (resolver *LocalGlobalResolver) GetName() (name string) { + name = "local global resolver" + return +} + +func (resolver *LocalGlobalResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + // get the global endpoints configs + endpointExpression := fmt.Sprintf("products[?code=='%s'].global_endpoint", strings.ToLower(param.Product)) + endpointData, err := jmespath.Search(endpointExpression, getEndpointConfigData()) + if err == nil && endpointData != nil && len(endpointData.([]interface{})) > 0 { + endpoint = endpointData.([]interface{})[0].(string) + support = len(endpoint) > 0 + return + } + support = false + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go new file mode 100644 index 000000000000..7fee64d42ae7 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go @@ -0,0 +1,48 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package endpoints + +import ( + "fmt" + "strings" + + "github.com/jmespath/go-jmespath" +) + +type LocalRegionalResolver struct { +} + +func (resolver *LocalRegionalResolver) GetName() (name string) { + name = "local regional resolver" + return +} + +func (resolver *LocalRegionalResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + // get the regional endpoints configs + regionalExpression := fmt.Sprintf("products[?code=='%s'].regional_endpoints", strings.ToLower(param.Product)) + regionalData, err := jmespath.Search(regionalExpression, getEndpointConfigData()) + if err == nil && regionalData != nil && len(regionalData.([]interface{})) > 0 { + endpointExpression := fmt.Sprintf("[0][?region=='%s'].endpoint", strings.ToLower(param.RegionId)) + var endpointData interface{} + endpointData, err = jmespath.Search(endpointExpression, regionalData) + if err == nil && endpointData != nil && len(endpointData.([]interface{})) > 0 { + endpoint = endpointData.([]interface{})[0].(string) + support = len(endpoint) > 0 + return + } + } + support = false + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go new file mode 100644 index 000000000000..cc354cc4d97e --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go @@ -0,0 +1,176 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package endpoints + +import ( + "encoding/json" + "sync" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" +) + +const ( + // EndpointCacheExpireTime ... + EndpointCacheExpireTime = 3600 //Seconds +) + +// Cache caches endpoint for specific product and region +type Cache struct { + sync.RWMutex + cache map[string]interface{} +} + +// Get ... +func (c *Cache) Get(k string) (v interface{}) { + c.RLock() + v = c.cache[k] + c.RUnlock() + return +} + +// Set ... +func (c *Cache) Set(k string, v interface{}) { + c.Lock() + c.cache[k] = v + c.Unlock() +} + +var lastClearTimePerProduct = &Cache{cache: make(map[string]interface{})} +var endpointCache = &Cache{cache: make(map[string]interface{})} + +// LocationResolver ... +type LocationResolver struct { +} + +func (resolver *LocationResolver) GetName() (name string) { + name = "location resolver" + return +} + +// TryResolve resolves endpoint giving product and region +func (resolver *LocationResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + if len(param.LocationProduct) <= 0 { + support = false + return + } + + //get from cache + cacheKey := param.Product + "#" + param.RegionId + var ok bool + endpoint, ok = endpointCache.Get(cacheKey).(string) + + if ok && len(endpoint) > 0 && !CheckCacheIsExpire(cacheKey) { + support = true + return + } + + //get from remote + getEndpointRequest := requests.NewCommonRequest() + + getEndpointRequest.Product = "Location" + getEndpointRequest.Version = "2015-06-12" + getEndpointRequest.ApiName = "DescribeEndpoints" + getEndpointRequest.Domain = "location-readonly.aliyuncs.com" + getEndpointRequest.Method = "GET" + getEndpointRequest.Scheme = requests.HTTPS + + getEndpointRequest.QueryParams["Id"] = param.RegionId + getEndpointRequest.QueryParams["ServiceCode"] = param.LocationProduct + if len(param.LocationEndpointType) > 0 { + getEndpointRequest.QueryParams["Type"] = param.LocationEndpointType + } else { + getEndpointRequest.QueryParams["Type"] = "openAPI" + } + + response, err := param.CommonApi(getEndpointRequest) + if err != nil { + support = false + return + } + + if !response.IsSuccess() { + support = false + return + } + + var getEndpointResponse GetEndpointResponse + err = json.Unmarshal([]byte(response.GetHttpContentString()), &getEndpointResponse) + if err != nil { + support = false + return + } + + if !getEndpointResponse.Success || getEndpointResponse.Endpoints == nil { + support = false + return + } + if len(getEndpointResponse.Endpoints.Endpoint) <= 0 { + support = false + return + } + if len(getEndpointResponse.Endpoints.Endpoint[0].Endpoint) > 0 { + endpoint = getEndpointResponse.Endpoints.Endpoint[0].Endpoint + endpointCache.Set(cacheKey, endpoint) + lastClearTimePerProduct.Set(cacheKey, time.Now().Unix()) + support = true + return + } + + support = false + return +} + +// CheckCacheIsExpire ... +func CheckCacheIsExpire(cacheKey string) bool { + lastClearTime, ok := lastClearTimePerProduct.Get(cacheKey).(int64) + if !ok { + return true + } + + if lastClearTime <= 0 { + lastClearTime = time.Now().Unix() + lastClearTimePerProduct.Set(cacheKey, lastClearTime) + } + + now := time.Now().Unix() + elapsedTime := now - lastClearTime + if elapsedTime > EndpointCacheExpireTime { + return true + } + + return false +} + +// GetEndpointResponse ... +type GetEndpointResponse struct { + Endpoints *EndpointsObj + RequestId string + Success bool +} + +// EndpointsObj ... +type EndpointsObj struct { + Endpoint []EndpointObj +} + +// EndpointObj ... +type EndpointObj struct { + // Protocols map[string]string + Type string + Namespace string + Id string + SerivceCode string + Endpoint string +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go new file mode 100644 index 000000000000..e39f533676b5 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go @@ -0,0 +1,48 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package endpoints + +import ( + "fmt" + "strings" +) + +const keyFormatter = "%s::%s" + +var endpointMapping = make(map[string]string) + +// AddEndpointMapping Use product id and region id as key to store the endpoint into inner map +func AddEndpointMapping(regionId, productId, endpoint string) (err error) { + key := fmt.Sprintf(keyFormatter, strings.ToLower(regionId), strings.ToLower(productId)) + endpointMapping[key] = endpoint + return nil +} + +// MappingResolver the mapping resolver type +type MappingResolver struct { +} + +// GetName get the resolver name: "mapping resolver" +func (resolver *MappingResolver) GetName() (name string) { + name = "mapping resolver" + return +} + +// TryResolve use Product and RegionId as key to find endpoint from inner map +func (resolver *MappingResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + key := fmt.Sprintf(keyFormatter, strings.ToLower(param.RegionId), strings.ToLower(param.Product)) + endpoint, contains := endpointMapping[key] + return endpoint, contains, nil +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go new file mode 100644 index 000000000000..5e1e305309d1 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go @@ -0,0 +1,98 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package endpoints + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +var debug utils.Debug + +func init() { + debug = utils.Init("sdk") +} + +const ( + ResolveEndpointUserGuideLink = "" +) + +var once sync.Once +var resolvers []Resolver + +type Resolver interface { + TryResolve(param *ResolveParam) (endpoint string, support bool, err error) + GetName() (name string) +} + +// Resolve resolve endpoint with params +// It will resolve with each supported resolver until anyone resolved +func Resolve(param *ResolveParam) (endpoint string, err error) { + supportedResolvers := getAllResolvers() + var lastErr error + for _, resolver := range supportedResolvers { + endpoint, supported, resolveErr := resolver.TryResolve(param) + if resolveErr != nil { + lastErr = resolveErr + } + + if supported { + debug("resolve endpoint with %s\n", param) + debug("\t%s by resolver(%s)\n", endpoint, resolver.GetName()) + return endpoint, nil + } + } + + // not support + errorMsg := fmt.Sprintf(errors.CanNotResolveEndpointErrorMessage, param, ResolveEndpointUserGuideLink) + err = errors.NewClientError(errors.CanNotResolveEndpointErrorCode, errorMsg, lastErr) + return +} + +func getAllResolvers() []Resolver { + once.Do(func() { + resolvers = []Resolver{ + &SimpleHostResolver{}, + &MappingResolver{}, + &LocationResolver{}, + &LocalRegionalResolver{}, + &LocalGlobalResolver{}, + } + }) + return resolvers +} + +type ResolveParam struct { + Domain string + Product string + RegionId string + LocationProduct string + LocationEndpointType string + CommonApi func(request *requests.CommonRequest) (response *responses.CommonResponse, err error) `json:"-"` +} + +func (param *ResolveParam) String() string { + jsonBytes, err := json.Marshal(param) + if err != nil { + return fmt.Sprint("ResolveParam.String() process error:", err) + } + return string(jsonBytes) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go new file mode 100644 index 000000000000..9ba2346c6b1a --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go @@ -0,0 +1,33 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package endpoints + +// SimpleHostResolver the simple host resolver type +type SimpleHostResolver struct { +} + +// GetName get the resolver name: "simple host resolver" +func (resolver *SimpleHostResolver) GetName() (name string) { + name = "simple host resolver" + return +} + +// TryResolve if the Domain exist in param, use it as endpoint +func (resolver *SimpleHostResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + if support = len(param.Domain) > 0; support { + endpoint = param.Domain + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/client_error.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/client_error.go new file mode 100644 index 000000000000..1e2d9c0040e1 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/client_error.go @@ -0,0 +1,92 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package errors + +import "fmt" + +const ( + DefaultClientErrorStatus = 400 + DefaultClientErrorCode = "SDK.ClientError" + + UnsupportedCredentialErrorCode = "SDK.UnsupportedCredential" + UnsupportedCredentialErrorMessage = "Specified credential (type = %s) is not supported, please check" + + CanNotResolveEndpointErrorCode = "SDK.CanNotResolveEndpoint" + CanNotResolveEndpointErrorMessage = "Can not resolve endpoint(param = %s), please check your accessKey with secret, and read the user guide\n %s" + + UnsupportedParamPositionErrorCode = "SDK.UnsupportedParamPosition" + UnsupportedParamPositionErrorMessage = "Specified param position (%s) is not supported, please upgrade sdk and retry" + + AsyncFunctionNotEnabledCode = "SDK.AsyncFunctionNotEnabled" + AsyncFunctionNotEnabledMessage = "Async function is not enabled in client, please invoke 'client.EnableAsync' function" + + UnknownRequestTypeErrorCode = "SDK.UnknownRequestType" + UnknownRequestTypeErrorMessage = "Unknown Request Type: %s" + + MissingParamErrorCode = "SDK.MissingParam" + InvalidParamErrorCode = "SDK.InvalidParam" + + JsonUnmarshalErrorCode = "SDK.JsonUnmarshalError" + JsonUnmarshalErrorMessage = "Failed to unmarshal response, but you can get the data via response.GetHttpStatusCode() and response.GetHttpContentString()" + + TimeoutErrorCode = "SDK.TimeoutError" + TimeoutErrorMessage = "The request timed out %s times(%s for retry), perhaps we should have the threshold raised a little?" +) + +type ClientError struct { + errorCode string + message string + originError error +} + +func NewClientError(errorCode, message string, originErr error) Error { + return &ClientError{ + errorCode: errorCode, + message: message, + originError: originErr, + } +} + +func (err *ClientError) Error() string { + clientErrMsg := fmt.Sprintf("[%s] %s", err.ErrorCode(), err.message) + if err.originError != nil { + return clientErrMsg + "\ncaused by:\n" + err.originError.Error() + } + return clientErrMsg +} + +func (err *ClientError) OriginError() error { + return err.originError +} + +func (*ClientError) HttpStatus() int { + return DefaultClientErrorStatus +} + +func (err *ClientError) ErrorCode() string { + if err.errorCode == "" { + return DefaultClientErrorCode + } else { + return err.errorCode + } +} + +func (err *ClientError) Message() string { + return err.message +} + +func (err *ClientError) String() string { + return err.Error() +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/error.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/error.go new file mode 100644 index 000000000000..49962f3b5ec7 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/error.go @@ -0,0 +1,23 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package errors + +type Error interface { + error + HttpStatus() int + ErrorCode() string + Message() string + OriginError() error +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/server_error.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/server_error.go new file mode 100644 index 000000000000..1b7810414a5e --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/server_error.go @@ -0,0 +1,123 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package errors + +import ( + "encoding/json" + "fmt" + + "github.com/jmespath/go-jmespath" +) + +var wrapperList = []ServerErrorWrapper{ + &SignatureDostNotMatchWrapper{}, +} + +type ServerError struct { + httpStatus int + requestId string + hostId string + errorCode string + recommend string + message string + comment string +} + +type ServerErrorWrapper interface { + tryWrap(error *ServerError, wrapInfo map[string]string) bool +} + +func (err *ServerError) Error() string { + return fmt.Sprintf("SDK.ServerError\nErrorCode: %s\nRecommend: %s\nRequestId: %s\nMessage: %s", + err.errorCode, err.comment+err.recommend, err.requestId, err.message) +} + +func NewServerError(httpStatus int, responseContent, comment string) Error { + result := &ServerError{ + httpStatus: httpStatus, + message: responseContent, + comment: comment, + } + + var data interface{} + err := json.Unmarshal([]byte(responseContent), &data) + if err == nil { + requestId, _ := jmespath.Search("RequestId", data) + hostId, _ := jmespath.Search("HostId", data) + errorCode, _ := jmespath.Search("Code", data) + recommend, _ := jmespath.Search("Recommend", data) + message, _ := jmespath.Search("Message", data) + + if requestId != nil { + result.requestId = requestId.(string) + } + if hostId != nil { + result.hostId = hostId.(string) + } + if errorCode != nil { + result.errorCode = errorCode.(string) + } + if recommend != nil { + result.recommend = recommend.(string) + } + if message != nil { + result.message = message.(string) + } + } + + return result +} + +func WrapServerError(originError *ServerError, wrapInfo map[string]string) *ServerError { + for _, wrapper := range wrapperList { + ok := wrapper.tryWrap(originError, wrapInfo) + if ok { + return originError + } + } + return originError +} + +func (err *ServerError) HttpStatus() int { + return err.httpStatus +} + +func (err *ServerError) ErrorCode() string { + return err.errorCode +} + +func (err *ServerError) Message() string { + return err.message +} + +func (err *ServerError) OriginError() error { + return nil +} + +func (err *ServerError) HostId() string { + return err.hostId +} + +func (err *ServerError) RequestId() string { + return err.requestId +} + +func (err *ServerError) Recommend() string { + return err.recommend +} + +func (err *ServerError) Comment() string { + return err.comment +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go new file mode 100644 index 000000000000..4b09d7d71c99 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go @@ -0,0 +1,45 @@ +package errors + +import ( + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +const SignatureDostNotMatchErrorCode = "SignatureDoesNotMatch" +const IncompleteSignatureErrorCode = "IncompleteSignature" +const MessageContain = "server string to sign is:" + +var debug utils.Debug + +func init() { + debug = utils.Init("sdk") +} + +type SignatureDostNotMatchWrapper struct { +} + +func (*SignatureDostNotMatchWrapper) tryWrap(error *ServerError, wrapInfo map[string]string) (ok bool) { + clientStringToSign := wrapInfo["StringToSign"] + if (error.errorCode == SignatureDostNotMatchErrorCode || error.errorCode == IncompleteSignatureErrorCode) && clientStringToSign != "" { + message := error.message + if strings.Contains(message, MessageContain) { + str := strings.Split(message, MessageContain) + serverStringToSign := str[1] + + if clientStringToSign == serverStringToSign { + // user secret is error + error.recommend = "InvalidAccessKeySecret: Please check you AccessKeySecret" + } else { + debug("Client StringToSign: %s", clientStringToSign) + debug("Server StringToSign: %s", serverStringToSign) + error.recommend = "This may be a bug with the SDK and we hope you can submit this question in the " + + "github issue(https://github.com/aliyun/alibaba-cloud-sdk-go/issues), thanks very much" + } + } + ok = true + return + } + ok = false + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/acs_request.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/acs_request.go new file mode 100644 index 000000000000..ad961c8fd2ce --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/acs_request.go @@ -0,0 +1,368 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package requests + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" +) + +const ( + RPC = "RPC" + ROA = "ROA" + + HTTP = "HTTP" + HTTPS = "HTTPS" + + DefaultHttpPort = "80" + + GET = "GET" + PUT = "PUT" + POST = "POST" + DELETE = "DELETE" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + + Json = "application/json" + Xml = "application/xml" + Raw = "application/octet-stream" + Form = "application/x-www-form-urlencoded" + + Header = "Header" + Query = "Query" + Body = "Body" + Path = "Path" + + HeaderSeparator = "\n" +) + +// interface +type AcsRequest interface { + GetScheme() string + GetMethod() string + GetDomain() string + GetPort() string + GetRegionId() string + GetHeaders() map[string]string + GetQueryParams() map[string]string + GetFormParams() map[string]string + GetContent() []byte + GetBodyReader() io.Reader + GetStyle() string + GetProduct() string + GetVersion() string + GetActionName() string + GetAcceptFormat() string + GetLocationServiceCode() string + GetLocationEndpointType() string + GetReadTimeout() time.Duration + GetConnectTimeout() time.Duration + SetReadTimeout(readTimeout time.Duration) + SetConnectTimeout(connectTimeout time.Duration) + SetHTTPSInsecure(isInsecure bool) + GetHTTPSInsecure() *bool + + GetUserAgent() map[string]string + + SetStringToSign(stringToSign string) + GetStringToSign() string + + SetDomain(domain string) + SetContent(content []byte) + SetScheme(scheme string) + BuildUrl() string + BuildQueries() string + + addHeaderParam(key, value string) + addQueryParam(key, value string) + addFormParam(key, value string) + addPathParam(key, value string) +} + +// base class +type baseRequest struct { + Scheme string + Method string + Domain string + Port string + RegionId string + ReadTimeout time.Duration + ConnectTimeout time.Duration + isInsecure *bool + + userAgent map[string]string + product string + version string + + actionName string + + AcceptFormat string + + QueryParams map[string]string + Headers map[string]string + FormParams map[string]string + Content []byte + + locationServiceCode string + locationEndpointType string + + queries string + + stringToSign string +} + +func (request *baseRequest) GetQueryParams() map[string]string { + return request.QueryParams +} + +func (request *baseRequest) GetFormParams() map[string]string { + return request.FormParams +} + +func (request *baseRequest) GetReadTimeout() time.Duration { + return request.ReadTimeout +} + +func (request *baseRequest) GetConnectTimeout() time.Duration { + return request.ConnectTimeout +} + +func (request *baseRequest) SetReadTimeout(readTimeout time.Duration) { + request.ReadTimeout = readTimeout +} + +func (request *baseRequest) SetConnectTimeout(connectTimeout time.Duration) { + request.ConnectTimeout = connectTimeout +} + +func (request *baseRequest) GetHTTPSInsecure() *bool { + return request.isInsecure +} + +func (request *baseRequest) SetHTTPSInsecure(isInsecure bool) { + request.isInsecure = &isInsecure +} + +func (request *baseRequest) GetContent() []byte { + return request.Content +} + +func (request *baseRequest) GetVersion() string { + return request.version +} + +func (request *baseRequest) GetActionName() string { + return request.actionName +} + +func (request *baseRequest) SetContent(content []byte) { + request.Content = content +} + +func (request *baseRequest) GetUserAgent() map[string]string { + return request.userAgent +} + +func (request *baseRequest) AppendUserAgent(key, value string) { + newkey := true + if request.userAgent == nil { + request.userAgent = make(map[string]string) + } + if strings.ToLower(key) != "core" && strings.ToLower(key) != "go" { + for tag, _ := range request.userAgent { + if tag == key { + request.userAgent[tag] = value + newkey = false + } + } + if newkey { + request.userAgent[key] = value + } + } +} + +func (request *baseRequest) addHeaderParam(key, value string) { + request.Headers[key] = value +} + +func (request *baseRequest) addQueryParam(key, value string) { + request.QueryParams[key] = value +} + +func (request *baseRequest) addFormParam(key, value string) { + request.FormParams[key] = value +} + +func (request *baseRequest) GetAcceptFormat() string { + return request.AcceptFormat +} + +func (request *baseRequest) GetLocationServiceCode() string { + return request.locationServiceCode +} + +func (request *baseRequest) GetLocationEndpointType() string { + return request.locationEndpointType +} + +func (request *baseRequest) GetProduct() string { + return request.product +} + +func (request *baseRequest) GetScheme() string { + return request.Scheme +} + +func (request *baseRequest) SetScheme(scheme string) { + request.Scheme = scheme +} + +func (request *baseRequest) GetMethod() string { + return request.Method +} + +func (request *baseRequest) GetDomain() string { + return request.Domain +} + +func (request *baseRequest) SetDomain(host string) { + request.Domain = host +} + +func (request *baseRequest) GetPort() string { + return request.Port +} + +func (request *baseRequest) GetRegionId() string { + return request.RegionId +} + +func (request *baseRequest) GetHeaders() map[string]string { + return request.Headers +} + +func (request *baseRequest) SetContentType(contentType string) { + request.addHeaderParam("Content-Type", contentType) +} + +func (request *baseRequest) GetContentType() (contentType string, contains bool) { + contentType, contains = request.Headers["Content-Type"] + return +} + +func (request *baseRequest) SetStringToSign(stringToSign string) { + request.stringToSign = stringToSign +} + +func (request *baseRequest) GetStringToSign() string { + return request.stringToSign +} + +func defaultBaseRequest() (request *baseRequest) { + request = &baseRequest{ + Scheme: "", + AcceptFormat: "JSON", + Method: GET, + QueryParams: make(map[string]string), + Headers: map[string]string{ + "x-sdk-client": "golang/1.0.0", + "x-sdk-invoke-type": "normal", + "Accept-Encoding": "identity", + }, + FormParams: make(map[string]string), + } + return +} + +func InitParams(request AcsRequest) (err error) { + requestValue := reflect.ValueOf(request).Elem() + err = flatRepeatedList(requestValue, request, "", "") + return +} + +func flatRepeatedList(dataValue reflect.Value, request AcsRequest, position, prefix string) (err error) { + dataType := dataValue.Type() + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + name, containsNameTag := field.Tag.Lookup("name") + fieldPosition := position + if fieldPosition == "" { + fieldPosition, _ = field.Tag.Lookup("position") + } + typeTag, containsTypeTag := field.Tag.Lookup("type") + if containsNameTag { + if !containsTypeTag { + // simple param + key := prefix + name + value := dataValue.Field(i).String() + err = addParam(request, fieldPosition, key, value) + if err != nil { + return + } + } else if typeTag == "Repeated" { + // repeated param + repeatedFieldValue := dataValue.Field(i) + if repeatedFieldValue.Kind() != reflect.Slice { + // possible value: {"[]string", "*[]struct"}, we must call Elem() in the last condition + repeatedFieldValue = repeatedFieldValue.Elem() + } + if repeatedFieldValue.IsValid() && !repeatedFieldValue.IsNil() { + for m := 0; m < repeatedFieldValue.Len(); m++ { + elementValue := repeatedFieldValue.Index(m) + key := prefix + name + "." + strconv.Itoa(m+1) + if elementValue.Type().Kind().String() == "string" { + value := elementValue.String() + err = addParam(request, fieldPosition, key, value) + if err != nil { + return + } + } else { + err = flatRepeatedList(elementValue, request, fieldPosition, key+".") + if err != nil { + return + } + } + } + } + } + } + } + return +} + +func addParam(request AcsRequest, position, name, value string) (err error) { + if len(value) > 0 { + switch position { + case Header: + request.addHeaderParam(name, value) + case Query: + request.addQueryParam(name, value) + case Path: + request.addPathParam(name, value) + case Body: + request.addFormParam(name, value) + default: + errMsg := fmt.Sprintf(errors.UnsupportedParamPositionErrorMessage, position) + err = errors.NewClientError(errors.UnsupportedParamPositionErrorCode, errMsg, nil) + } + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/common_request.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/common_request.go new file mode 100644 index 000000000000..1fbfee1d75a5 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/common_request.go @@ -0,0 +1,106 @@ +package requests + +import ( + "bytes" + "fmt" + "io" + "sort" + "strings" +) + +type CommonRequest struct { + *baseRequest + + Version string + ApiName string + Product string + + // roa params + PathPattern string + PathParams map[string]string + + Ontology AcsRequest +} + +func NewCommonRequest() (request *CommonRequest) { + request = &CommonRequest{ + baseRequest: defaultBaseRequest(), + } + request.Headers["x-sdk-invoke-type"] = "common" + request.PathParams = make(map[string]string) + return +} + +func (request *CommonRequest) String() string { + request.TransToAcsRequest() + + resultBuilder := bytes.Buffer{} + + mapOutput := func(m map[string]string) { + if len(m) > 0 { + sortedKeys := make([]string, 0) + for k := range m { + sortedKeys = append(sortedKeys, k) + } + + // sort 'string' key in increasing order + sort.Strings(sortedKeys) + + for _, key := range sortedKeys { + resultBuilder.WriteString(key + ": " + m[key] + "\n") + } + } + } + + // Request Line + resultBuilder.WriteString(fmt.Sprintf("%s %s %s/1.1\n", request.Method, request.BuildQueries(), strings.ToUpper(request.Scheme))) + + // Headers + resultBuilder.WriteString("Host" + ": " + request.Domain + "\n") + mapOutput(request.Headers) + + resultBuilder.WriteString("\n") + // Body + if len(request.Content) > 0 { + resultBuilder.WriteString(string(request.Content) + "\n") + } else { + mapOutput(request.FormParams) + } + + return resultBuilder.String() +} + +func (request *CommonRequest) TransToAcsRequest() { + if len(request.PathPattern) > 0 { + roaRequest := &RoaRequest{} + roaRequest.initWithCommonRequest(request) + request.Ontology = roaRequest + } else { + rpcRequest := &RpcRequest{} + rpcRequest.baseRequest = request.baseRequest + rpcRequest.product = request.Product + rpcRequest.version = request.Version + rpcRequest.actionName = request.ApiName + request.Ontology = rpcRequest + } +} + +func (request *CommonRequest) BuildUrl() string { + return request.Ontology.BuildUrl() +} + +func (request *CommonRequest) BuildQueries() string { + return request.Ontology.BuildQueries() +} + +func (request *CommonRequest) GetBodyReader() io.Reader { + return request.Ontology.GetBodyReader() +} + +func (request *CommonRequest) GetStyle() string { + return request.Ontology.GetStyle() +} + +func (request *CommonRequest) addPathParam(key, value string) { + request.PathParams[key] = value +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/roa_request.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/roa_request.go new file mode 100644 index 000000000000..12e02cc4f517 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/roa_request.go @@ -0,0 +1,152 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package requests + +import ( + "bytes" + "fmt" + "io" + "net/url" + "sort" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +type RoaRequest struct { + *baseRequest + pathPattern string + PathParams map[string]string +} + +func (*RoaRequest) GetStyle() string { + return ROA +} + +func (request *RoaRequest) GetBodyReader() io.Reader { + if request.FormParams != nil && len(request.FormParams) > 0 { + formString := utils.GetUrlFormedMap(request.FormParams) + return strings.NewReader(formString) + } else if len(request.Content) > 0 { + return bytes.NewReader(request.Content) + } else { + return nil + } +} + +// for sign method, need not url encoded +func (request *RoaRequest) BuildQueries() string { + return request.buildQueries() +} + +func (request *RoaRequest) buildPath() string { + path := request.pathPattern + for key, value := range request.PathParams { + path = strings.Replace(path, "["+key+"]", value, 1) + } + return path +} + +func (request *RoaRequest) buildQueries() string { + // replace path params with value + path := request.buildPath() + queryParams := request.QueryParams + // sort QueryParams by key + var queryKeys []string + for key := range queryParams { + queryKeys = append(queryKeys, key) + } + sort.Strings(queryKeys) + + // append urlBuilder + urlBuilder := bytes.Buffer{} + urlBuilder.WriteString(path) + if len(queryKeys) > 0 { + urlBuilder.WriteString("?") + } + for i := 0; i < len(queryKeys); i++ { + queryKey := queryKeys[i] + urlBuilder.WriteString(queryKey) + if value := queryParams[queryKey]; len(value) > 0 { + urlBuilder.WriteString("=") + urlBuilder.WriteString(value) + } + if i < len(queryKeys)-1 { + urlBuilder.WriteString("&") + } + } + result := urlBuilder.String() + result = popStandardUrlencode(result) + return result +} + +func (request *RoaRequest) buildQueryString() string { + queryParams := request.QueryParams + // sort QueryParams by key + q := url.Values{} + for key, value := range queryParams { + q.Add(key, value) + } + return q.Encode() +} + +func popStandardUrlencode(stringToSign string) (result string) { + result = strings.Replace(stringToSign, "+", "%20", -1) + result = strings.Replace(result, "*", "%2A", -1) + result = strings.Replace(result, "%7E", "~", -1) + return +} + +func (request *RoaRequest) BuildUrl() string { + // for network trans, need url encoded + scheme := strings.ToLower(request.Scheme) + domain := request.Domain + port := request.Port + path := request.buildPath() + url := fmt.Sprintf("%s://%s:%s%s", scheme, domain, port, path) + querystring := request.buildQueryString() + if len(querystring) > 0 { + url = fmt.Sprintf("%s?%s", url, querystring) + } + return url +} + +func (request *RoaRequest) addPathParam(key, value string) { + request.PathParams[key] = value +} + +func (request *RoaRequest) InitWithApiInfo(product, version, action, uriPattern, serviceCode, endpointType string) { + request.baseRequest = defaultBaseRequest() + request.PathParams = make(map[string]string) + request.Headers["x-acs-version"] = version + request.pathPattern = uriPattern + request.locationServiceCode = serviceCode + request.locationEndpointType = endpointType + request.product = product + //request.version = version + //request.actionName = action +} + +func (request *RoaRequest) initWithCommonRequest(commonRequest *CommonRequest) { + request.baseRequest = commonRequest.baseRequest + request.PathParams = commonRequest.PathParams + request.product = commonRequest.Product + //request.version = commonRequest.Version + request.Headers["x-acs-version"] = commonRequest.Version + //request.actionName = commonRequest.ApiName + request.pathPattern = commonRequest.PathPattern + request.locationServiceCode = "" + request.locationEndpointType = "" +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go new file mode 100644 index 000000000000..01be6fd04e6c --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go @@ -0,0 +1,79 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package requests + +import ( + "fmt" + "io" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +type RpcRequest struct { + *baseRequest +} + +func (request *RpcRequest) init() { + request.baseRequest = defaultBaseRequest() + request.Method = POST +} + +func (*RpcRequest) GetStyle() string { + return RPC +} + +func (request *RpcRequest) GetBodyReader() io.Reader { + if request.FormParams != nil && len(request.FormParams) > 0 { + formString := utils.GetUrlFormedMap(request.FormParams) + return strings.NewReader(formString) + } else { + return strings.NewReader("") + } +} + +func (request *RpcRequest) BuildQueries() string { + request.queries = "/?" + utils.GetUrlFormedMap(request.QueryParams) + return request.queries +} + +func (request *RpcRequest) BuildUrl() string { + url := fmt.Sprintf("%s://%s", strings.ToLower(request.Scheme), request.Domain) + if len(request.Port) > 0 { + url = fmt.Sprintf("%s:%s", url, request.Port) + } + return url + request.BuildQueries() +} + +func (request *RpcRequest) GetVersion() string { + return request.version +} + +func (request *RpcRequest) GetActionName() string { + return request.actionName +} + +func (request *RpcRequest) addPathParam(key, value string) { + panic("not support") +} + +func (request *RpcRequest) InitWithApiInfo(product, version, action, serviceCode, endpointType string) { + request.init() + request.product = product + request.version = version + request.actionName = action + request.locationServiceCode = serviceCode + request.locationEndpointType = endpointType +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/types.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/types.go new file mode 100644 index 000000000000..28af63ea1014 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/types.go @@ -0,0 +1,53 @@ +package requests + +import "strconv" + +type Integer string + +func NewInteger(integer int) Integer { + return Integer(strconv.Itoa(integer)) +} + +func (integer Integer) HasValue() bool { + return integer != "" +} + +func (integer Integer) GetValue() (int, error) { + return strconv.Atoi(string(integer)) +} + +func NewInteger64(integer int64) Integer { + return Integer(strconv.FormatInt(integer, 10)) +} + +func (integer Integer) GetValue64() (int64, error) { + return strconv.ParseInt(string(integer), 10, 0) +} + +type Boolean string + +func NewBoolean(bool bool) Boolean { + return Boolean(strconv.FormatBool(bool)) +} + +func (boolean Boolean) HasValue() bool { + return boolean != "" +} + +func (boolean Boolean) GetValue() (bool, error) { + return strconv.ParseBool(string(boolean)) +} + +type Float string + +func NewFloat(f float64) Float { + return Float(strconv.FormatFloat(f, 'f', 6, 64)) +} + +func (float Float) HasValue() bool { + return float != "" +} + +func (float Float) GetValue() (float64, error) { + return strconv.ParseFloat(string(float), 64) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/json_parser.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/json_parser.go new file mode 100644 index 000000000000..4c9570198150 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/json_parser.go @@ -0,0 +1,332 @@ +package responses + +import ( + "encoding/json" + "io" + "math" + "strconv" + "strings" + "sync" + "unsafe" + + jsoniter "github.com/json-iterator/go" +) + +const maxUint = ^uint(0) +const maxInt = int(maxUint >> 1) +const minInt = -maxInt - 1 + +var jsonParser jsoniter.API +var initJson = &sync.Once{} + +func initJsonParserOnce() { + initJson.Do(func() { + registerBetterFuzzyDecoder() + jsonParser = jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + }.Froze() + }) +} + +func registerBetterFuzzyDecoder() { + jsoniter.RegisterTypeDecoder("string", &nullableFuzzyStringDecoder{}) + jsoniter.RegisterTypeDecoder("bool", &fuzzyBoolDecoder{}) + jsoniter.RegisterTypeDecoder("float32", &nullableFuzzyFloat32Decoder{}) + jsoniter.RegisterTypeDecoder("float64", &nullableFuzzyFloat64Decoder{}) + jsoniter.RegisterTypeDecoder("int", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(maxInt) || val < float64(minInt) { + iter.ReportError("fuzzy decode int", "exceed range") + return + } + *((*int)(ptr)) = int(val) + } else { + *((*int)(ptr)) = iter.ReadInt() + } + }}) + jsoniter.RegisterTypeDecoder("uint", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(maxUint) || val < 0 { + iter.ReportError("fuzzy decode uint", "exceed range") + return + } + *((*uint)(ptr)) = uint(val) + } else { + *((*uint)(ptr)) = iter.ReadUint() + } + }}) + jsoniter.RegisterTypeDecoder("int8", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt8) || val < float64(math.MinInt8) { + iter.ReportError("fuzzy decode int8", "exceed range") + return + } + *((*int8)(ptr)) = int8(val) + } else { + *((*int8)(ptr)) = iter.ReadInt8() + } + }}) + jsoniter.RegisterTypeDecoder("uint8", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint8) || val < 0 { + iter.ReportError("fuzzy decode uint8", "exceed range") + return + } + *((*uint8)(ptr)) = uint8(val) + } else { + *((*uint8)(ptr)) = iter.ReadUint8() + } + }}) + jsoniter.RegisterTypeDecoder("int16", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt16) || val < float64(math.MinInt16) { + iter.ReportError("fuzzy decode int16", "exceed range") + return + } + *((*int16)(ptr)) = int16(val) + } else { + *((*int16)(ptr)) = iter.ReadInt16() + } + }}) + jsoniter.RegisterTypeDecoder("uint16", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint16) || val < 0 { + iter.ReportError("fuzzy decode uint16", "exceed range") + return + } + *((*uint16)(ptr)) = uint16(val) + } else { + *((*uint16)(ptr)) = iter.ReadUint16() + } + }}) + jsoniter.RegisterTypeDecoder("int32", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt32) || val < float64(math.MinInt32) { + iter.ReportError("fuzzy decode int32", "exceed range") + return + } + *((*int32)(ptr)) = int32(val) + } else { + *((*int32)(ptr)) = iter.ReadInt32() + } + }}) + jsoniter.RegisterTypeDecoder("uint32", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint32) || val < 0 { + iter.ReportError("fuzzy decode uint32", "exceed range") + return + } + *((*uint32)(ptr)) = uint32(val) + } else { + *((*uint32)(ptr)) = iter.ReadUint32() + } + }}) + jsoniter.RegisterTypeDecoder("int64", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt64) || val < float64(math.MinInt64) { + iter.ReportError("fuzzy decode int64", "exceed range") + return + } + *((*int64)(ptr)) = int64(val) + } else { + *((*int64)(ptr)) = iter.ReadInt64() + } + }}) + jsoniter.RegisterTypeDecoder("uint64", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint64) || val < 0 { + iter.ReportError("fuzzy decode uint64", "exceed range") + return + } + *((*uint64)(ptr)) = uint64(val) + } else { + *((*uint64)(ptr)) = iter.ReadUint64() + } + }}) +} + +type nullableFuzzyStringDecoder struct { +} + +func (decoder *nullableFuzzyStringDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + switch valueType { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + *((*string)(ptr)) = string(number) + case jsoniter.StringValue: + *((*string)(ptr)) = iter.ReadString() + case jsoniter.BoolValue: + *((*string)(ptr)) = strconv.FormatBool(iter.ReadBool()) + case jsoniter.NilValue: + iter.ReadNil() + *((*string)(ptr)) = "" + default: + iter.ReportError("fuzzyStringDecoder", "not number or string or bool") + } +} + +type fuzzyBoolDecoder struct { +} + +func (decoder *fuzzyBoolDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + switch valueType { + case jsoniter.BoolValue: + *((*bool)(ptr)) = iter.ReadBool() + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + num, err := number.Int64() + if err != nil { + iter.ReportError("fuzzyBoolDecoder", "get value from json.number failed") + } + if num == 0 { + *((*bool)(ptr)) = false + } else { + *((*bool)(ptr)) = true + } + case jsoniter.StringValue: + strValue := strings.ToLower(iter.ReadString()) + if strValue == "true" { + *((*bool)(ptr)) = true + } else if strValue == "false" || strValue == "" { + *((*bool)(ptr)) = false + } else { + iter.ReportError("fuzzyBoolDecoder", "unsupported bool value: "+strValue) + } + case jsoniter.NilValue: + iter.ReadNil() + *((*bool)(ptr)) = false + default: + iter.ReportError("fuzzyBoolDecoder", "not number or string or nil") + } +} + +type nullableFuzzyIntegerDecoder struct { + fun func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) +} + +func (decoder *nullableFuzzyIntegerDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + str = string(number) + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + str = "0" + } + case jsoniter.BoolValue: + if iter.ReadBool() { + str = "1" + } else { + str = "0" + } + case jsoniter.NilValue: + iter.ReadNil() + str = "0" + default: + iter.ReportError("fuzzyIntegerDecoder", "not number or string") + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + isFloat := strings.IndexByte(str, '.') != -1 + decoder.fun(isFloat, ptr, newIter) + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } +} + +type nullableFuzzyFloat32Decoder struct { +} + +func (decoder *nullableFuzzyFloat32Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + *((*float32)(ptr)) = iter.ReadFloat32() + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + *((*float32)(ptr)) = 0 + return + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + *((*float32)(ptr)) = newIter.ReadFloat32() + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } + case jsoniter.BoolValue: + // support bool to float32 + if iter.ReadBool() { + *((*float32)(ptr)) = 1 + } else { + *((*float32)(ptr)) = 0 + } + case jsoniter.NilValue: + iter.ReadNil() + *((*float32)(ptr)) = 0 + default: + iter.ReportError("nullableFuzzyFloat32Decoder", "not number or string") + } +} + +type nullableFuzzyFloat64Decoder struct { +} + +func (decoder *nullableFuzzyFloat64Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + *((*float64)(ptr)) = iter.ReadFloat64() + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + *((*float64)(ptr)) = 0 + return + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + *((*float64)(ptr)) = newIter.ReadFloat64() + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } + case jsoniter.BoolValue: + // support bool to float64 + if iter.ReadBool() { + *((*float64)(ptr)) = 1 + } else { + *((*float64)(ptr)) = 0 + } + case jsoniter.NilValue: + // support empty string + iter.ReadNil() + *((*float64)(ptr)) = 0 + default: + iter.ReportError("nullableFuzzyFloat64Decoder", "not number or string") + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/response.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/response.go new file mode 100644 index 000000000000..dd6ae5b4c308 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/response.go @@ -0,0 +1,152 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package responses + +import ( + "bytes" + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +type AcsResponse interface { + IsSuccess() bool + GetHttpStatus() int + GetHttpHeaders() map[string][]string + GetHttpContentString() string + GetHttpContentBytes() []byte + GetOriginHttpResponse() *http.Response + parseFromHttpResponse(httpResponse *http.Response) error +} + +var debug utils.Debug + +func init() { + debug = utils.Init("sdk") +} +// Unmarshal object from http response body to target Response +func Unmarshal(response AcsResponse, httpResponse *http.Response, format string) (err error) { + err = response.parseFromHttpResponse(httpResponse) + if err != nil { + return + } + if !response.IsSuccess() { + err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), "") + return + } + + if _, isCommonResponse := response.(*CommonResponse); isCommonResponse { + // common response need not unmarshal + return + } + + if len(response.GetHttpContentBytes()) == 0 { + return + } + + if strings.ToUpper(format) == "JSON" { + initJsonParserOnce() + err = jsonParser.Unmarshal(response.GetHttpContentBytes(), response) + if err != nil { + err = errors.NewClientError(errors.JsonUnmarshalErrorCode, errors.JsonUnmarshalErrorMessage, err) + } + } else if strings.ToUpper(format) == "XML" { + err = xml.Unmarshal(response.GetHttpContentBytes(), response) + } + return +} + +type BaseResponse struct { + httpStatus int + httpHeaders map[string][]string + httpContentString string + httpContentBytes []byte + originHttpResponse *http.Response +} + +func (baseResponse *BaseResponse) GetHttpStatus() int { + return baseResponse.httpStatus +} + +func (baseResponse *BaseResponse) GetHttpHeaders() map[string][]string { + return baseResponse.httpHeaders +} + +func (baseResponse *BaseResponse) GetHttpContentString() string { + return baseResponse.httpContentString +} + +func (baseResponse *BaseResponse) GetHttpContentBytes() []byte { + return baseResponse.httpContentBytes +} + +func (baseResponse *BaseResponse) GetOriginHttpResponse() *http.Response { + return baseResponse.originHttpResponse +} + +func (baseResponse *BaseResponse) IsSuccess() bool { + if baseResponse.GetHttpStatus() >= 200 && baseResponse.GetHttpStatus() < 300 { + return true + } + + return false +} + +func (baseResponse *BaseResponse) parseFromHttpResponse(httpResponse *http.Response) (err error) { + defer httpResponse.Body.Close() + body, err := ioutil.ReadAll(httpResponse.Body) + if err != nil { + return + } + debug("%s", string(body)) + baseResponse.httpStatus = httpResponse.StatusCode + baseResponse.httpHeaders = httpResponse.Header + baseResponse.httpContentBytes = body + baseResponse.httpContentString = string(body) + baseResponse.originHttpResponse = httpResponse + return +} + +func (baseResponse *BaseResponse) String() string { + resultBuilder := bytes.Buffer{} + // statusCode + // resultBuilder.WriteString("\n") + resultBuilder.WriteString(fmt.Sprintf("%s %s\n", baseResponse.originHttpResponse.Proto, baseResponse.originHttpResponse.Status)) + // httpHeaders + //resultBuilder.WriteString("Headers:\n") + for key, value := range baseResponse.httpHeaders { + resultBuilder.WriteString(key + ": " + strings.Join(value, ";") + "\n") + } + resultBuilder.WriteString("\n") + // content + //resultBuilder.WriteString("Content:\n") + resultBuilder.WriteString(baseResponse.httpContentString + "\n") + return resultBuilder.String() +} + +type CommonResponse struct { + *BaseResponse +} + +func NewCommonResponse() (response *CommonResponse) { + return &CommonResponse{ + BaseResponse: &BaseResponse{}, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/debug.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/debug.go new file mode 100644 index 000000000000..09440d27be91 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/debug.go @@ -0,0 +1,36 @@ +package utils + +import ( + "fmt" + "os" + "strings" +) + +type Debug func(format string, v ...interface{}) + +var hookGetEnv = func() string { + return os.Getenv("DEBUG") +} + +var hookPrint = func(input string) { + fmt.Println(input) +} + +func Init(flag string) Debug { + enable := false + + env := hookGetEnv() + parts := strings.Split(env, ",") + for _, part := range parts { + if part == flag { + enable = true + break + } + } + + return func(format string, v ...interface{}) { + if enable { + hookPrint(fmt.Sprintf(format, v...)) + } + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go new file mode 100644 index 000000000000..378e5010640a --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go @@ -0,0 +1,87 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "crypto/md5" + "encoding/base64" + "encoding/hex" + "net/url" + "reflect" + "strconv" + "time" + + "github.com/satori/go.uuid" +) + +func GetUUIDV4() (uuidHex string) { + uuidV4 := uuid.NewV4() + uuidHex = hex.EncodeToString(uuidV4.Bytes()) + return +} + +func GetMD5Base64(bytes []byte) (base64Value string) { + md5Ctx := md5.New() + md5Ctx.Write(bytes) + md5Value := md5Ctx.Sum(nil) + base64Value = base64.StdEncoding.EncodeToString(md5Value) + return +} + +func GetTimeInFormatISO8601() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("2006-01-02T15:04:05Z") +} + +func GetTimeInFormatRFC2616() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("Mon, 02 Jan 2006 15:04:05 GMT") +} + +func GetUrlFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +func InitStructWithDefaultTag(bean interface{}) { + configType := reflect.TypeOf(bean) + for i := 0; i < configType.Elem().NumField(); i++ { + field := configType.Elem().Field(i) + defaultValue := field.Tag.Get("default") + if defaultValue == "" { + continue + } + setter := reflect.ValueOf(bean).Elem().Field(i) + switch field.Type.String() { + case "int": + intValue, _ := strconv.ParseInt(defaultValue, 10, 64) + setter.SetInt(intValue) + case "time.Duration": + intValue, _ := strconv.ParseInt(defaultValue, 10, 64) + setter.SetInt(intValue) + case "string": + setter.SetString(defaultValue) + case "bool": + boolValue, _ := strconv.ParseBool(defaultValue) + setter.SetBool(boolValue) + } + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/client.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/client.go new file mode 100644 index 000000000000..64ae84c93618 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/client.go @@ -0,0 +1,81 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" +) + +// Client is the sdk client struct, each func corresponds to an OpenAPI +type Client struct { + sdk.Client +} + +// NewClient creates a sdk client with environment variables +func NewClient() (client *Client, err error) { + client = &Client{} + err = client.Init() + return +} + +// NewClientWithOptions creates a sdk client with regionId/sdkConfig/credential +// this is the common api to create a sdk client +func NewClientWithOptions(regionId string, config *sdk.Config, credential auth.Credential) (client *Client, err error) { + client = &Client{} + err = client.InitWithOptions(regionId, config, credential) + return +} + +// NewClientWithAccessKey is a shortcut to create sdk client with accesskey +// usage: https://help.aliyun.com/document_detail/66217.html +func NewClientWithAccessKey(regionId, accessKeyId, accessKeySecret string) (client *Client, err error) { + client = &Client{} + err = client.InitWithAccessKey(regionId, accessKeyId, accessKeySecret) + return +} + +// NewClientWithStsToken is a shortcut to create sdk client with sts token +// usage: https://help.aliyun.com/document_detail/66222.html +func NewClientWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken string) (client *Client, err error) { + client = &Client{} + err = client.InitWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken) + return +} + +// NewClientWithRamRoleArn is a shortcut to create sdk client with ram roleArn +// usage: https://help.aliyun.com/document_detail/66222.html +func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName) + return +} + +// NewClientWithEcsRamRole is a shortcut to create sdk client with ecs ram role +// usage: https://help.aliyun.com/document_detail/66223.html +func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithEcsRamRole(regionId, roleName) + return +} + +// NewClientWithRsaKeyPair is a shortcut to create sdk client with rsa key pair +// attention: rsa key pair auth is only Japan regions available +func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, sessionExpiration int) (client *Client, err error) { + client = &Client{} + err = client.InitWithRsaKeyPair(regionId, publicKeyId, privateKey, sessionExpiration) + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoint.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoint.go new file mode 100644 index 000000000000..06b3d6ed28c0 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoint.go @@ -0,0 +1,111 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeEndpoint invokes the location.DescribeEndpoint API synchronously +// api document: https://help.aliyun.com/api/location/describeendpoint.html +func (client *Client) DescribeEndpoint(request *DescribeEndpointRequest) (response *DescribeEndpointResponse, err error) { + response = CreateDescribeEndpointResponse() + err = client.DoAction(request, response) + return +} + +// DescribeEndpointWithChan invokes the location.DescribeEndpoint API asynchronously +// api document: https://help.aliyun.com/api/location/describeendpoint.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeEndpointWithChan(request *DescribeEndpointRequest) (<-chan *DescribeEndpointResponse, <-chan error) { + responseChan := make(chan *DescribeEndpointResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeEndpoint(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeEndpointWithCallback invokes the location.DescribeEndpoint API asynchronously +// api document: https://help.aliyun.com/api/location/describeendpoint.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeEndpointWithCallback(request *DescribeEndpointRequest, callback func(response *DescribeEndpointResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeEndpointResponse + var err error + defer close(result) + response, err = client.DescribeEndpoint(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeEndpointRequest is the request struct for api DescribeEndpoint +type DescribeEndpointRequest struct { + *requests.RpcRequest + Password string `position:"Query" name:"Password"` + ServiceCode string `position:"Query" name:"ServiceCode"` + Id string `position:"Query" name:"Id"` +} + +// DescribeEndpointResponse is the response struct for api DescribeEndpoint +type DescribeEndpointResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + Endpoint string `json:"Endpoint" xml:"Endpoint"` + Id string `json:"Id" xml:"Id"` + Namespace string `json:"Namespace" xml:"Namespace"` + SerivceCode string `json:"SerivceCode" xml:"SerivceCode"` + Type string `json:"Type" xml:"Type"` + Protocols ProtocolsInDescribeEndpoint `json:"Protocols" xml:"Protocols"` +} + +// CreateDescribeEndpointRequest creates a request to invoke DescribeEndpoint API +func CreateDescribeEndpointRequest() (request *DescribeEndpointRequest) { + request = &DescribeEndpointRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "DescribeEndpoint", "location", "openAPI") + return +} + +// CreateDescribeEndpointResponse creates a response to parse from DescribeEndpoint response +func CreateDescribeEndpointResponse() (response *DescribeEndpointResponse) { + response = &DescribeEndpointResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoints.go new file mode 100644 index 000000000000..95c6e7873985 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoints.go @@ -0,0 +1,107 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeEndpoints invokes the location.DescribeEndpoints API synchronously +// api document: https://help.aliyun.com/api/location/describeendpoints.html +func (client *Client) DescribeEndpoints(request *DescribeEndpointsRequest) (response *DescribeEndpointsResponse, err error) { + response = CreateDescribeEndpointsResponse() + err = client.DoAction(request, response) + return +} + +// DescribeEndpointsWithChan invokes the location.DescribeEndpoints API asynchronously +// api document: https://help.aliyun.com/api/location/describeendpoints.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeEndpointsWithChan(request *DescribeEndpointsRequest) (<-chan *DescribeEndpointsResponse, <-chan error) { + responseChan := make(chan *DescribeEndpointsResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeEndpoints(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeEndpointsWithCallback invokes the location.DescribeEndpoints API asynchronously +// api document: https://help.aliyun.com/api/location/describeendpoints.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeEndpointsWithCallback(request *DescribeEndpointsRequest, callback func(response *DescribeEndpointsResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeEndpointsResponse + var err error + defer close(result) + response, err = client.DescribeEndpoints(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeEndpointsRequest is the request struct for api DescribeEndpoints +type DescribeEndpointsRequest struct { + *requests.RpcRequest + ServiceCode string `position:"Query" name:"ServiceCode"` + Id string `position:"Query" name:"Id"` + Type string `position:"Query" name:"Type"` +} + +// DescribeEndpointsResponse is the response struct for api DescribeEndpoints +type DescribeEndpointsResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + Success bool `json:"Success" xml:"Success"` + Endpoints Endpoints `json:"Endpoints" xml:"Endpoints"` +} + +// CreateDescribeEndpointsRequest creates a request to invoke DescribeEndpoints API +func CreateDescribeEndpointsRequest() (request *DescribeEndpointsRequest) { + request = &DescribeEndpointsRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "DescribeEndpoints", "location", "openAPI") + return +} + +// CreateDescribeEndpointsResponse creates a response to parse from DescribeEndpoints response +func CreateDescribeEndpointsResponse() (response *DescribeEndpointsResponse) { + response = &DescribeEndpointsResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_regions.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_regions.go new file mode 100644 index 000000000000..824829dc7798 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_regions.go @@ -0,0 +1,105 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeRegions invokes the location.DescribeRegions API synchronously +// api document: https://help.aliyun.com/api/location/describeregions.html +func (client *Client) DescribeRegions(request *DescribeRegionsRequest) (response *DescribeRegionsResponse, err error) { + response = CreateDescribeRegionsResponse() + err = client.DoAction(request, response) + return +} + +// DescribeRegionsWithChan invokes the location.DescribeRegions API asynchronously +// api document: https://help.aliyun.com/api/location/describeregions.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeRegionsWithChan(request *DescribeRegionsRequest) (<-chan *DescribeRegionsResponse, <-chan error) { + responseChan := make(chan *DescribeRegionsResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeRegions(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeRegionsWithCallback invokes the location.DescribeRegions API asynchronously +// api document: https://help.aliyun.com/api/location/describeregions.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeRegionsWithCallback(request *DescribeRegionsRequest, callback func(response *DescribeRegionsResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeRegionsResponse + var err error + defer close(result) + response, err = client.DescribeRegions(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeRegionsRequest is the request struct for api DescribeRegions +type DescribeRegionsRequest struct { + *requests.RpcRequest + Password string `position:"Query" name:"Password"` +} + +// DescribeRegionsResponse is the response struct for api DescribeRegions +type DescribeRegionsResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + TotalCount int `json:"TotalCount" xml:"TotalCount"` + RegionIds RegionIds `json:"RegionIds" xml:"RegionIds"` +} + +// CreateDescribeRegionsRequest creates a request to invoke DescribeRegions API +func CreateDescribeRegionsRequest() (request *DescribeRegionsRequest) { + request = &DescribeRegionsRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "DescribeRegions", "location", "openAPI") + return +} + +// CreateDescribeRegionsResponse creates a response to parse from DescribeRegions response +func CreateDescribeRegionsResponse() (response *DescribeRegionsResponse) { + response = &DescribeRegionsResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_services.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_services.go new file mode 100644 index 000000000000..af185ecfe96b --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_services.go @@ -0,0 +1,105 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeServices invokes the location.DescribeServices API synchronously +// api document: https://help.aliyun.com/api/location/describeservices.html +func (client *Client) DescribeServices(request *DescribeServicesRequest) (response *DescribeServicesResponse, err error) { + response = CreateDescribeServicesResponse() + err = client.DoAction(request, response) + return +} + +// DescribeServicesWithChan invokes the location.DescribeServices API asynchronously +// api document: https://help.aliyun.com/api/location/describeservices.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeServicesWithChan(request *DescribeServicesRequest) (<-chan *DescribeServicesResponse, <-chan error) { + responseChan := make(chan *DescribeServicesResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeServices(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeServicesWithCallback invokes the location.DescribeServices API asynchronously +// api document: https://help.aliyun.com/api/location/describeservices.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeServicesWithCallback(request *DescribeServicesRequest, callback func(response *DescribeServicesResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeServicesResponse + var err error + defer close(result) + response, err = client.DescribeServices(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeServicesRequest is the request struct for api DescribeServices +type DescribeServicesRequest struct { + *requests.RpcRequest + Password string `position:"Query" name:"Password"` +} + +// DescribeServicesResponse is the response struct for api DescribeServices +type DescribeServicesResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + TotalCount int `json:"TotalCount" xml:"TotalCount"` + Services Services `json:"Services" xml:"Services"` +} + +// CreateDescribeServicesRequest creates a request to invoke DescribeServices API +func CreateDescribeServicesRequest() (request *DescribeServicesRequest) { + request = &DescribeServicesRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "DescribeServices", "location", "openAPI") + return +} + +// CreateDescribeServicesResponse creates a response to parse from DescribeServices response +func CreateDescribeServicesResponse() (response *DescribeServicesResponse) { + response = &DescribeServicesResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints.go new file mode 100644 index 000000000000..6b11d0303271 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints.go @@ -0,0 +1,107 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// ListEndpoints invokes the location.ListEndpoints API synchronously +// api document: https://help.aliyun.com/api/location/listendpoints.html +func (client *Client) ListEndpoints(request *ListEndpointsRequest) (response *ListEndpointsResponse, err error) { + response = CreateListEndpointsResponse() + err = client.DoAction(request, response) + return +} + +// ListEndpointsWithChan invokes the location.ListEndpoints API asynchronously +// api document: https://help.aliyun.com/api/location/listendpoints.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ListEndpointsWithChan(request *ListEndpointsRequest) (<-chan *ListEndpointsResponse, <-chan error) { + responseChan := make(chan *ListEndpointsResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.ListEndpoints(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// ListEndpointsWithCallback invokes the location.ListEndpoints API asynchronously +// api document: https://help.aliyun.com/api/location/listendpoints.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ListEndpointsWithCallback(request *ListEndpointsRequest, callback func(response *ListEndpointsResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *ListEndpointsResponse + var err error + defer close(result) + response, err = client.ListEndpoints(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// ListEndpointsRequest is the request struct for api ListEndpoints +type ListEndpointsRequest struct { + *requests.RpcRequest + Namespace string `position:"Query" name:"Namespace"` + Id string `position:"Query" name:"Id"` + SerivceCode string `position:"Query" name:"SerivceCode"` +} + +// ListEndpointsResponse is the response struct for api ListEndpoints +type ListEndpointsResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + Success bool `json:"Success" xml:"Success"` + EndpointList EndpointListInListEndpoints `json:"EndpointList" xml:"EndpointList"` +} + +// CreateListEndpointsRequest creates a request to invoke ListEndpoints API +func CreateListEndpointsRequest() (request *ListEndpointsRequest) { + request = &ListEndpointsRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "ListEndpoints", "location", "openAPI") + return +} + +// CreateListEndpointsResponse creates a response to parse from ListEndpoints response +func CreateListEndpointsResponse() (response *ListEndpointsResponse) { + response = &ListEndpointsResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints_by_ip.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints_by_ip.go new file mode 100644 index 000000000000..e5d753038b58 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints_by_ip.go @@ -0,0 +1,105 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// ListEndpointsByIp invokes the location.ListEndpointsByIp API synchronously +// api document: https://help.aliyun.com/api/location/listendpointsbyip.html +func (client *Client) ListEndpointsByIp(request *ListEndpointsByIpRequest) (response *ListEndpointsByIpResponse, err error) { + response = CreateListEndpointsByIpResponse() + err = client.DoAction(request, response) + return +} + +// ListEndpointsByIpWithChan invokes the location.ListEndpointsByIp API asynchronously +// api document: https://help.aliyun.com/api/location/listendpointsbyip.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ListEndpointsByIpWithChan(request *ListEndpointsByIpRequest) (<-chan *ListEndpointsByIpResponse, <-chan error) { + responseChan := make(chan *ListEndpointsByIpResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.ListEndpointsByIp(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// ListEndpointsByIpWithCallback invokes the location.ListEndpointsByIp API asynchronously +// api document: https://help.aliyun.com/api/location/listendpointsbyip.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ListEndpointsByIpWithCallback(request *ListEndpointsByIpRequest, callback func(response *ListEndpointsByIpResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *ListEndpointsByIpResponse + var err error + defer close(result) + response, err = client.ListEndpointsByIp(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// ListEndpointsByIpRequest is the request struct for api ListEndpointsByIp +type ListEndpointsByIpRequest struct { + *requests.RpcRequest + Ip string `position:"Query" name:"Ip"` +} + +// ListEndpointsByIpResponse is the response struct for api ListEndpointsByIp +type ListEndpointsByIpResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + Success bool `json:"Success" xml:"Success"` + EndpointList EndpointListInListEndpointsByIp `json:"EndpointList" xml:"EndpointList"` +} + +// CreateListEndpointsByIpRequest creates a request to invoke ListEndpointsByIp API +func CreateListEndpointsByIpRequest() (request *ListEndpointsByIpRequest) { + request = &ListEndpointsByIpRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "ListEndpointsByIp", "location", "openAPI") + return +} + +// CreateListEndpointsByIpResponse creates a response to parse from ListEndpointsByIp response +func CreateListEndpointsByIpResponse() (response *ListEndpointsByIpResponse) { + response = &ListEndpointsByIpResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint.go new file mode 100644 index 000000000000..1856a795a70a --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint.go @@ -0,0 +1,26 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Endpoint is a nested struct in location response +type Endpoint struct { + Endpoint string `json:"Endpoint" xml:"Endpoint"` + Id string `json:"Id" xml:"Id"` + Namespace string `json:"Namespace" xml:"Namespace"` + SerivceCode string `json:"SerivceCode" xml:"SerivceCode"` + Type string `json:"Type" xml:"Type"` + Protocols ProtocolsInDescribeEndpoints `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints.go new file mode 100644 index 000000000000..b41f43f54ef2 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// EndpointListInListEndpoints is a nested struct in location response +type EndpointListInListEndpoints struct { + ItemEndpoint []ItemEndpoint `json:"ItemEndpoint" xml:"ItemEndpoint"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints_by_ip.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints_by_ip.go new file mode 100644 index 000000000000..75a1ea0cb3b6 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints_by_ip.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// EndpointListInListEndpointsByIp is a nested struct in location response +type EndpointListInListEndpointsByIp struct { + ItemEndpoint []ItemEndpoint `json:"ItemEndpoint" xml:"ItemEndpoint"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoints.go new file mode 100644 index 000000000000..e55882a65aa1 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoints.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Endpoints is a nested struct in location response +type Endpoints struct { + Endpoint []Endpoint `json:"Endpoint" xml:"Endpoint"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_item_endpoint.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_item_endpoint.go new file mode 100644 index 000000000000..099768f1dae9 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_item_endpoint.go @@ -0,0 +1,26 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ItemEndpoint is a nested struct in location response +type ItemEndpoint struct { + Endpoint string `json:"Endpoint" xml:"Endpoint"` + Product string `json:"Product" xml:"Product"` + Namespace string `json:"Namespace" xml:"Namespace"` + Id string `json:"Id" xml:"Id"` + Type string `json:"Type" xml:"Type"` + Protocols ProtocolsInListEndpointsByIp `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoint.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoint.go new file mode 100644 index 000000000000..d1d2d794d088 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoint.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ProtocolsInDescribeEndpoint is a nested struct in location response +type ProtocolsInDescribeEndpoint struct { + Protocols []string `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoints.go new file mode 100644 index 000000000000..c79d4da87e95 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoints.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ProtocolsInDescribeEndpoints is a nested struct in location response +type ProtocolsInDescribeEndpoints struct { + Protocols []string `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints.go new file mode 100644 index 000000000000..610069146919 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ProtocolsInListEndpoints is a nested struct in location response +type ProtocolsInListEndpoints struct { + Protocols []string `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints_by_ip.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints_by_ip.go new file mode 100644 index 000000000000..962d2c04c3e6 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints_by_ip.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ProtocolsInListEndpointsByIp is a nested struct in location response +type ProtocolsInListEndpointsByIp struct { + Protocols []string `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_region_ids.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_region_ids.go new file mode 100644 index 000000000000..0f1fbe3bd79c --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_region_ids.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// RegionIds is a nested struct in location response +type RegionIds struct { + RegionIds []string `json:"RegionIds" xml:"RegionIds"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_services.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_services.go new file mode 100644 index 000000000000..b3e934c59845 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_services.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Services is a nested struct in location response +type Services struct { + Services []string `json:"Services" xml:"Services"` +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go new file mode 100644 index 000000000000..ee90591b2eda --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go @@ -0,0 +1,97 @@ +package oss + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "hash" + "io" + "net/http" + "sort" + "strings" +) + +// headerSorter defines the key-value structure for storing the sorted data in signHeader. +type headerSorter struct { + Keys []string + Vals []string +} + +// signHeader signs the header and sets it as the authorization header. +func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) { + // Get the final authorization string + authorizationStr := "OSS " + conn.config.AccessKeyID + ":" + conn.getSignedStr(req, canonicalizedResource) + + // Give the parameter "Authorization" value + req.Header.Set(HTTPHeaderAuthorization, authorizationStr) +} + +func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string) string { + // Find out the "x-oss-"'s address in header of the request + temp := make(map[string]string) + + for k, v := range req.Header { + if strings.HasPrefix(strings.ToLower(k), "x-oss-") { + temp[strings.ToLower(k)] = v[0] + } + } + hs := newHeaderSorter(temp) + + // Sort the temp by the ascending order + hs.Sort() + + // Get the canonicalizedOSSHeaders + canonicalizedOSSHeaders := "" + for i := range hs.Keys { + canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" + } + + // Give other parameters values + // when sign URL, date is expires + date := req.Header.Get(HTTPHeaderDate) + contentType := req.Header.Get(HTTPHeaderContentType) + contentMd5 := req.Header.Get(HTTPHeaderContentMD5) + + signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(conn.config.AccessKeySecret)) + io.WriteString(h, signStr) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + return signedStr +} + +// newHeaderSorter is an additional function for function SignHeader. +func newHeaderSorter(m map[string]string) *headerSorter { + hs := &headerSorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]string, 0, len(m)), + } + + for k, v := range m { + hs.Keys = append(hs.Keys, k) + hs.Vals = append(hs.Vals, v) + } + return hs +} + +// Sort is an additional function for function SignHeader. +func (hs *headerSorter) Sort() { + sort.Sort(hs) +} + +// Len is an additional function for function SignHeader. +func (hs *headerSorter) Len() int { + return len(hs.Vals) +} + +// Less is an additional function for function SignHeader. +func (hs *headerSorter) Less(i, j int) bool { + return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 +} + +// Swap is an additional function for function SignHeader. +func (hs *headerSorter) Swap(i, j int) { + hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] + hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go new file mode 100644 index 000000000000..067855e0993f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go @@ -0,0 +1,973 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/xml" + "fmt" + "hash" + "hash/crc64" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" +) + +// Bucket implements the operations of object. +type Bucket struct { + Client Client + BucketName string +} + +// PutObject creates a new object and it will overwrite the original one if it exists already. +// +// objectKey the object key in UTF-8 encoding. The length must be between 1 and 1023, and cannot start with "/" or "\". +// reader io.Reader instance for reading the data for uploading +// options the options for uploading the object. The valid options here are CacheControl, ContentDisposition, ContentEncoding +// Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details. +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error { + opts := addContentType(options, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// PutObjectFromFile creates a new object from the local file. +// +// objectKey object key. +// filePath the local file path to upload. +// options the options for uploading the object. Refer to the parameter options in PutObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + opts := addContentType(options, filePath, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: fd, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// DoPutObject does the actual upload work. +// +// request the request instance for uploading an object. +// options the options for uploading an object. +// +// Response the response from OSS. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) { + isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType) + if !isOptSet { + options = addContentType(options, request.ObjectKey) + } + + listener := getProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener) + if err != nil { + return nil, err + } + + if bucket.getConfig().IsEnableCRC { + err = checkCRC(resp, "DoPutObject") + if err != nil { + return resp, err + } + } + + err = checkRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// GetObject downloads the object. +// +// objectKey the object key. +// options the options for downloading the object. The valid values are: Range, IfModifiedSince, IfUnmodifiedSince, IfMatch, +// IfNoneMatch, AcceptEncoding. For more details, please check out: +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return nil, err + } + + return result.Response, nil +} + +// GetObjectToFile downloads the data to a local file. +// +// objectKey the object key to download. +// filePath the local file to store the object data. +// options the options for downloading the object. Refer to the parameter options in method GetObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // Calls the API to actually download the object. Returns the result instance. + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return err + } + defer result.Response.Close() + + // If the local file does not exist, create a new one. If it exists, overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Copy the data to the local file path. + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // Compares the CRC value + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil) + acceptEncoding := "" + if encodeOpt != nil { + acceptEncoding = encodeOpt.(string) + } + if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = checkCRC(result.Response, "GetObjectToFile") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// DoGetObject is the actual API that gets the object. It's the internal function called by other public APIs. +// +// request the request to download the object. +// options the options for downloading the file. Checks out the parameter options in method GetObject. +// +// GetObjectResult the result instance of getting the object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) { + params, _ := getRawParams(options) + resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // CRC + var crcCalc hash.Hash64 + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + if bucket.getConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(crcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // Progress + listener := getProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil) + + return result, nil +} + +// CopyObject copies the object inside the bucket. +// +// srcObjectKey the source object to copy. +// destObjectKey the target object to copy. +// options options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch, +// CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective. +// Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details : +// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + params := map[string]interface{}{} + resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// CopyObjectTo copies the object to another bucket. +// +// srcObjectKey source object key. The source bucket is Bucket.BucketName . +// destBucketName target bucket name. +// destObjectKey target object name. +// options copy options, check out parameter options in function CopyObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) { + return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +// +// CopyObjectFrom copies the object to another bucket. +// +// srcBucketName source bucket name. +// srcObjectKey source object name. +// destObjectKey target object name. The target bucket name is Bucket.BucketName. +// options copy options. Check out parameter options in function CopyObject. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + destBucketName := bucket.BucketName + var out CopyObjectResult + srcBucket, err := bucket.Client.Bucket(srcBucketName) + if err != nil { + return out, err + } + + return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return out, err + } + params := map[string]interface{}{} + resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// AppendObject uploads the data in the way of appending an existing or new object. +// +// AppendObject the parameter appendPosition specifies which postion (in the target object) to append. For the first append (to a non-existing file), +// the appendPosition should be 0. The appendPosition in the subsequent calls will be the current object length. +// For example, the first appendObject's appendPosition is 0 and it uploaded 65536 bytes data, then the second call's position is 65536. +// The response header x-oss-next-append-position after each successful request also specifies the next call's append position (so the caller need not to maintain this information). +// +// objectKey the target object to append to. +// reader io.Reader. The read instance for reading the data to append. +// appendPosition the start position to append. +// destObjectProperties the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding, +// Expires, ServerSideEncryption, ObjectACL. +// +// int64 the next append position, it's valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) { + request := &AppendObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + Position: appendPosition, + } + + result, err := bucket.DoAppendObject(request, options) + if err != nil { + return appendPosition, err + } + + return result.NextPosition, err +} + +// DoAppendObject is the actual API that does the object append. +// +// request the request object for appending object. +// options the options for appending object. +// +// AppendObjectResult the result object for appending object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) { + params := map[string]interface{}{} + params["append"] = nil + params["position"] = strconv.FormatInt(request.Position, 10) + headers := make(map[string]string) + + opts := addContentType(options, request.ObjectKey) + handleOptions(headers, opts) + + var initCRC uint64 + isCRCSet, initCRCOpt, _ := isOptionSet(options, initCRC64) + if isCRCSet { + initCRC = initCRCOpt.(uint64) + } + + listener := getProgressListener(options) + + handleOptions(headers, opts) + resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers, + request.Reader, initCRC, listener) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64) + result := &AppendObjectResult{ + NextPosition: nextPosition, + CRC: resp.ServerCRC, + } + + if bucket.getConfig().IsEnableCRC && isCRCSet { + err = checkCRC(resp, "AppendObject") + if err != nil { + return result, err + } + } + + return result, nil +} + +// DeleteObject deletes the object. +// +// objectKey the object key to delete. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObject(objectKey string) error { + params := map[string]interface{}{} + resp, err := bucket.do("DELETE", objectKey, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// DeleteObjects deletes multiple objects. +// +// objectKeys the object keys to delete. +// options the options for deleting objects. +// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used. +// +// DeleteObjectsResult the result object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) { + out := DeleteObjectsResult{} + dxml := deleteXML{} + for _, key := range objectKeys { + dxml.Objects = append(dxml.Objects, DeleteObject{Key: key}) + } + isQuiet, _ := findOption(options, deleteObjectsQuiet, false) + dxml.Quiet = isQuiet.(bool) + + bs, err := xml.Marshal(dxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + sum := md5.Sum(bs) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + options = append(options, ContentMD5(b64)) + + params := map[string]interface{}{} + params["delete"] = nil + params["encoding-type"] = "url" + + resp, err := bucket.do("POST", "", params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + if !dxml.Quiet { + if err = xmlUnmarshal(resp.Body, &out); err == nil { + err = decodeDeleteObjectsResult(&out) + } + } + return out, err +} + +// IsObjectExist checks if the object exists. +// +// bool flag of object's existence (true:exists; false:non-exist) when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) IsObjectExist(objectKey string) (bool, error) { + _, err := bucket.GetObjectMeta(objectKey) + if err == nil { + return true, nil + } + + switch err.(type) { + case ServiceError: + if err.(ServiceError).StatusCode == 404 { + return false, nil + } + } + + return false, err +} + +// ListObjects lists the objects under the current bucket. +// +// options it contains all the filters for listing objects. +// It could specify a prefix filter on object keys, the max keys count to return and the object key marker and the delimiter for grouping object names. +// The key marker means the returned objects' key must be greater than it in lexicographic order. +// +// For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21, +// my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns +// my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns +// my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects. +// The three filters could be used together to achieve filter and paging functionality. +// If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders). +// But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties. +// For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects. +// But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix. +// +// For common usage scenario, check out sample/list_object.go. +// +// ListObjectsResponse the return value after operation succeeds (only valid when error is nil). +// +func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) { + var out ListObjectsResult + + options = append(options, EncodingType("url")) + params, err := getRawParams(options) + if err != nil { + return out, err + } + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectsResult(&out) + return out, err +} + +// SetObjectMeta sets the metadata of the Object. +// +// objectKey object +// options options for setting the metadata. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, and custom metadata. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error { + options = append(options, MetadataDirective(MetaReplace)) + _, err := bucket.CopyObject(objectKey, objectKey, options...) + return err +} + +// GetObjectDetailedMeta gets the object's detailed metadata +// +// objectKey object key. +// options the constraints of the object. Only when the object meets the requirements this method will return the metadata. Otherwise returns error. Valid options are IfModifiedSince, IfUnmodifiedSince, +// IfMatch, IfNoneMatch. For more details check out https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html +// +// http.Header object meta when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) { + params := map[string]interface{}{} + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// GetObjectMeta gets object metadata. +// +// GetObjectMeta is more lightweight than GetObjectDetailedMeta as it only returns basic metadata including ETag +// size, LastModified. The size information is in the HTTP header Content-Length. +// +// objectKey object key +// +// http.Header the object's metadata, valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.Header, error) { + params := map[string]interface{}{} + params["objectMeta"] = nil + //resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil) + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// SetObjectACL updates the object's ACL. +// +// Only the bucket's owner could update object's ACL which priority is higher than bucket's ACL. +// For example, if the bucket ACL is private and object's ACL is public-read-write. +// Then object's ACL is used and it means all users could read or write that object. +// When the object's ACL is not set, then bucket's ACL is used as the object's ACL. +// +// Object read operations include GetObject, HeadObject, CopyObject and UploadPartCopy on the source object; +// Object write operations include PutObject, PostObject, AppendObject, DeleteObject, DeleteMultipleObjects, +// CompleteMultipartUpload and CopyObject on target object. +// +// objectKey the target object key (to set the ACL on) +// objectAcl object ACL. Valid options are PrivateACL, PublicReadACL, PublicReadWriteACL. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType) error { + options := []Option{ObjectACL(objectACL)} + params := map[string]interface{}{} + params["acl"] = nil + resp, err := bucket.do("PUT", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetObjectACL gets object's ACL +// +// objectKey the object to get ACL from. +// +// GetObjectACLResult the result object when error is nil. GetObjectACLResult.Acl is the object ACL. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectACL(objectKey string) (GetObjectACLResult, error) { + var out GetObjectACLResult + params := map[string]interface{}{} + params["acl"] = nil + resp, err := bucket.do("GET", objectKey, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// PutSymlink creates a symlink (to point to an existing object) +// +// Symlink cannot point to another symlink. +// When creating a symlink, it does not check the existence of the target file, and does not check if the target file is symlink. +// Neither it checks the caller's permission on the target file. All these checks are deferred to the actual GetObject call via this symlink. +// If trying to add an existing file, as long as the caller has the write permission, the existing one will be overwritten. +// If the x-oss-meta- is specified, it will be added as the metadata of the symlink file. +// +// symObjectKey the symlink object's key. +// targetObjectKey the target object key to point to. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error { + options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey))) + params := map[string]interface{}{} + params["symlink"] = nil + resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetSymlink gets the symlink object with the specified key. +// If the symlink object does not exist, returns 404. +// +// objectKey the symlink object's key. +// +// error it's nil if no error, otherwise it's an error object. +// When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object. +// +func (bucket Bucket) GetSymlink(objectKey string) (http.Header, error) { + params := map[string]interface{}{} + params["symlink"] = nil + resp, err := bucket.do("GET", objectKey, params, nil, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + targetObjectKey := resp.Headers.Get(HTTPHeaderOssSymlinkTarget) + targetObjectKey, err = url.QueryUnescape(targetObjectKey) + if err != nil { + return resp.Headers, err + } + resp.Headers.Set(HTTPHeaderOssSymlinkTarget, targetObjectKey) + return resp.Headers, err +} + +// RestoreObject restores the object from the archive storage. +// +// An archive object is in cold status by default and it cannot be accessed. +// When restore is called on the cold object, it will become available for access after some time. +// If multiple restores are called on the same file when the object is being restored, server side does nothing for additional calls but returns success. +// By default, the restored object is available for access for one day. After that it will be unavailable again. +// But if another RestoreObject are called after the file is restored, then it will extend one day's access time of that object, up to 7 days. +// +// objectKey object key to restore. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) RestoreObject(objectKey string) error { + params := map[string]interface{}{} + params["restore"] = nil + resp, err := bucket.do("POST", objectKey, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// SignURL signs the URL. Users could access the object directly with this URL without getting the AK. +// +// objectKey the target object to sign. +// signURLConfig the configuration for the signed URL +// +// string returns the signed URL, when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) { + if expiredInSec < 0 { + return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec) + } + expiration := time.Now().Unix() + expiredInSec + + params, err := getRawParams(options) + if err != nil { + return "", err + } + + headers := make(map[string]string) + err = handleOptions(headers, options) + if err != nil { + return "", err + } + + return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil +} + +// PutObjectWithURL uploads an object with the URL. If the object exists, it will be overwritten. +// PutObjectWithURL It will not generate minetype according to the key name. +// +// signedURL signed URL. +// reader io.Reader the read instance for reading the data for the upload. +// options the options for uploading the data. The valid options are CacheControl, ContentDisposition, ContentEncoding, +// Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details: +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error { + resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// PutObjectFromFileWithURL uploads an object from a local file with the signed URL. +// PutObjectFromFileWithURL It does not generate mimetype according to object key's name or the local file name. +// +// signedURL the signed URL. +// filePath local file path, such as dirfile.txt, for uploading. +// options options for uploading, same as the options in PutObject function. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + resp, err := bucket.DoPutObjectWithURL(signedURL, fd, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// DoPutObjectWithURL is the actual API that does the upload with URL work(internal for SDK) +// +// signedURL the signed URL. +// reader io.Reader the read instance for getting the data to upload. +// options options for uploading. +// +// Response the response object which contains the HTTP response. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) { + listener := getProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener) + if err != nil { + return nil, err + } + + if bucket.getConfig().IsEnableCRC { + err = checkCRC(resp, "DoPutObjectWithURL") + if err != nil { + return resp, err + } + } + + err = checkRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// GetObjectWithURL downloads the object and returns the reader instance, with the signed URL. +// +// signedURL the signed URL. +// options options for downloading the object. Valid options are IfModifiedSince, IfUnmodifiedSince, IfMatch, +// IfNoneMatch, AcceptEncoding. For more information, check out the following link: +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser the reader object for getting the data from response. It needs be closed after the usage. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return nil, err + } + return result.Response, nil +} + +// GetObjectToFileWithURL downloads the object into a local file with the signed URL. +// +// signedURL the signed URL +// filePath the local file path to download to. +// options the options for downloading object. Check out the parameter options in function GetObject for the reference. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // Get the object's content + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return err + } + defer result.Response.Close() + + // If the file does not exist, create one. If exists, then overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Save the data to the file. + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // Compare the CRC value. If CRC values do not match, return error. + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil) + acceptEncoding := "" + if encodeOpt != nil { + acceptEncoding = encodeOpt.(string) + } + + if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = checkCRC(result.Response, "GetObjectToFileWithURL") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// DoGetObjectWithURL is the actual API that downloads the file with the signed URL. +// +// signedURL the signed URL. +// options the options for getting object. Check out parameter options in GetObject for the reference. +// +// GetObjectResult the result object when the error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) { + params, _ := getRawParams(options) + resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // CRC + var crcCalc hash.Hash64 + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + if bucket.getConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(crcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // Progress + listener := getProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil) + + return result, nil +} + +// +// ProcessObject apply process on the specified image file. +// +// The supported process includes resize, rotate, crop, watermark, format, +// udf, customized style, etc. +// +// +// objectKey object key to process. +// process process string, such as "image/resize,w_100|sys/saveas,o_dGVzdC5qcGc,b_dGVzdA" +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) ProcessObject(objectKey string, process string) (ProcessObjectResult, error) { + var out ProcessObjectResult + params := map[string]interface{}{} + params["x-oss-process"] = nil + processData := fmt.Sprintf("%v=%v", "x-oss-process", process) + data := strings.NewReader(processData) + resp, err := bucket.do("POST", objectKey, params, nil, data, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = jsonUnmarshal(resp.Body, &out) + return out, err +} + +// Private +func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + return bucket.Client.Conn.Do(method, bucket.BucketName, objectName, + params, headers, data, 0, listener) +} + +func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + return bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener) +} + +func (bucket Bucket) getConfig() *Config { + return bucket.Client.Config +} + +func addContentType(options []Option, keys ...string) []Option { + typ := TypeByExtension("") + for _, key := range keys { + typ = TypeByExtension(key) + if typ != "" { + break + } + } + + if typ == "" { + typ = "application/octet-stream" + } + + opts := []Option{ContentType(typ)} + opts = append(opts, options...) + + return opts +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go new file mode 100644 index 000000000000..d29351c87539 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go @@ -0,0 +1,775 @@ +// Package oss implements functions for access oss service. +// It has two main struct Client and Bucket. +package oss + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" + "strings" + "time" +) + +// Client SDK's entry point. It's for bucket related options such as create/delete/set bucket (such as set/get ACL/lifecycle/referer/logging/website). +// Object related operations are done by Bucket class. +// Users use oss.New to create Client instance. +// +type ( + // Client OSS client + Client struct { + Config *Config // OSS client configuration + Conn *Conn // Send HTTP request + HTTPClient *http.Client //http.Client to use - if nil will make its own + } + + // ClientOption client option such as UseCname, Timeout, SecurityToken. + ClientOption func(*Client) +) + +// New creates a new client. +// +// endpoint the OSS datacenter endpoint such as http://oss-cn-hangzhou.aliyuncs.com . +// accessKeyId access key Id. +// accessKeySecret access key secret. +// +// Client creates the new client instance, the returned value is valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) { + // Configuration + config := getDefaultOssConfig() + config.Endpoint = endpoint + config.AccessKeyID = accessKeyID + config.AccessKeySecret = accessKeySecret + + // URL parse + url := &urlMaker{} + url.Init(config.Endpoint, config.IsCname, config.IsUseProxy) + + // HTTP connect + conn := &Conn{config: config, url: url} + + // OSS client + client := &Client{ + Config: config, + Conn: conn, + } + + // Client options parse + for _, option := range options { + option(client) + } + + // Create HTTP connection + err := conn.init(config, url, client.HTTPClient) + + return client, err +} + +// Bucket gets the bucket instance. +// +// bucketName the bucket name. +// Bucket the bucket object, when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) Bucket(bucketName string) (*Bucket, error) { + return &Bucket{ + client, + bucketName, + }, nil +} + +// CreateBucket creates a bucket. +// +// bucketName the bucket name, it's globably unique and immutable. The bucket name can only consist of lowercase letters, numbers and dash ('-'). +// It must start with lowercase letter or number and the length can only be between 3 and 255. +// options options for creating the bucket, with optional ACL. The ACL could be ACLPrivate, ACLPublicRead, and ACLPublicReadWrite. By default it's ACLPrivate. +// It could also be specified with StorageClass option, which supports StorageStandard, StorageIA(infrequent access), StorageArchive. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) CreateBucket(bucketName string, options ...Option) error { + headers := make(map[string]string) + handleOptions(headers, options) + + buffer := new(bytes.Buffer) + + isOptSet, val, _ := isOptionSet(options, storageClass) + if isOptSet { + cbConfig := createBucketConfiguration{StorageClass: val.(StorageClassType)} + bs, err := xml.Marshal(cbConfig) + if err != nil { + return err + } + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers[HTTPHeaderContentType] = contentType + } + + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// ListBuckets lists buckets of the current account under the given endpoint, with optional filters. +// +// options specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter. +// And marker makes sure the returned buckets' name are greater than it in lexicographic order. +// Maxkeys limits the max keys to return, and by default it's 100 and up to 1000. +// For the common usage scenario, please check out list_bucket.go in the sample. +// ListBucketsResponse the response object if error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) { + var out ListBucketsResult + + params, err := getRawParams(options) + if err != nil { + return out, err + } + + resp, err := client.do("GET", "", params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// IsBucketExist checks if the bucket exists +// +// bucketName the bucket name. +// +// bool true if it exists, and it's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) IsBucketExist(bucketName string) (bool, error) { + listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1)) + if err != nil { + return false, err + } + + if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName { + return true, nil + } + return false, nil +} + +// DeleteBucket deletes the bucket. Only empty bucket can be deleted (no object and parts). +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucket(bucketName string) error { + params := map[string]interface{}{} + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLocation gets the bucket location. +// +// Checks out the following link for more information : +// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html +// +// bucketName the bucket name +// +// string bucket's datacenter location +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLocation(bucketName string) (string, error) { + params := map[string]interface{}{} + params["location"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var LocationConstraint string + err = xmlUnmarshal(resp.Body, &LocationConstraint) + return LocationConstraint, err +} + +// SetBucketACL sets bucket's ACL. +// +// bucketName the bucket name +// bucketAcl the bucket ACL: ACLPrivate, ACLPublicRead and ACLPublicReadWrite. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error { + headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)} + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketACL gets the bucket ACL. +// +// bucketName the bucket name. +// +// GetBucketAclResponse the result object, and it's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) { + var out GetBucketACLResult + params := map[string]interface{}{} + params["acl"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketLifecycle sets the bucket's lifecycle. +// +// For more information, checks out following link: +// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html +// +// bucketName the bucket name. +// rules the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively. +// Check out sample/bucket_lifecycle.go for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error { + lxml := lifecycleXML{Rules: convLifecycleRule(rules)} + bs, err := xml.Marshal(lxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketLifecycle deletes the bucket's lifecycle. +// +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketLifecycle(bucketName string) error { + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLifecycle gets the bucket's lifecycle settings. +// +// bucketName the bucket name. +// +// GetBucketLifecycleResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) { + var out GetBucketLifecycleResult + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketReferer sets the bucket's referer whitelist and the flag if allowing empty referrer. +// +// To avoid stealing link on OSS data, OSS supports the HTTP referrer header. A whitelist referrer could be set either by API or web console, as well as +// the allowing empty referrer flag. Note that this applies to requests from webbrowser only. +// For example, for a bucket os-example and its referrer http://www.aliyun.com, all requests from this URL could access the bucket. +// For more information, please check out this link : +// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html +// +// bucketName the bucket name. +// referers the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards. +// The sample could be found in sample/bucket_referer.go +// allowEmptyReferer the flag of allowing empty referrer. By default it's true. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error { + rxml := RefererXML{} + rxml.AllowEmptyReferer = allowEmptyReferer + if referers == nil { + rxml.RefererList = append(rxml.RefererList, "") + } else { + for _, referer := range referers { + rxml.RefererList = append(rxml.RefererList, referer) + } + } + + bs, err := xml.Marshal(rxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReferer gets the bucket's referrer white list. +// +// bucketName the bucket name. +// +// GetBucketRefererResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) { + var out GetBucketRefererResult + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketLogging sets the bucket logging settings. +// +// OSS could automatically store the access log. Only the bucket owner could enable the logging. +// Once enabled, OSS would save all the access log into hourly log files in a specified bucket. +// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html +// +// bucketName bucket name to enable the log. +// targetBucket the target bucket name to store the log files. +// targetPrefix the log files' prefix. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string, + isEnable bool) error { + var err error + var bs []byte + if isEnable { + lxml := LoggingXML{} + lxml.LoggingEnabled.TargetBucket = targetBucket + lxml.LoggingEnabled.TargetPrefix = targetPrefix + bs, err = xml.Marshal(lxml) + } else { + lxml := loggingXMLEmpty{} + bs, err = xml.Marshal(lxml) + } + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketLogging deletes the logging configuration to disable the logging on the bucket. +// +// bucketName the bucket name to disable the logging. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketLogging(bucketName string) error { + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLogging gets the bucket's logging settings +// +// bucketName the bucket name +// GetBucketLoggingResponse the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) { + var out GetBucketLoggingResult + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketWebsite sets the bucket's static website's index and error page. +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// indexDocument index page. +// errorDocument error page. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error { + wxml := WebsiteXML{} + wxml.IndexDocument.Suffix = indexDocument + wxml.ErrorDocument.Key = errorDocument + + bs, err := xml.Marshal(wxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketWebsite deletes the bucket's static web site settings. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketWebsite(bucketName string) error { + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketWebsite gets the bucket's default page (index page) and the error page. +// +// bucketName the bucket name +// +// GetBucketWebsiteResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) { + var out GetBucketWebsiteResult + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketCORS sets the bucket's CORS rules +// +// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html +// +// bucketName the bucket name +// corsRules the CORS rules to set. The related sample code is in sample/bucket_cors.go. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error { + corsxml := CORSXML{} + for _, v := range corsRules { + cr := CORSRule{} + cr.AllowedMethod = v.AllowedMethod + cr.AllowedOrigin = v.AllowedOrigin + cr.AllowedHeader = v.AllowedHeader + cr.ExposeHeader = v.ExposeHeader + cr.MaxAgeSeconds = v.MaxAgeSeconds + corsxml.CORSRules = append(corsxml.CORSRules, cr) + } + + bs, err := xml.Marshal(corsxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketCORS deletes the bucket's static website settings. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketCORS(bucketName string) error { + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketCORS gets the bucket's CORS settings. +// +// bucketName the bucket name. +// GetBucketCORSResult the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) { + var out GetBucketCORSResult + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketInfo gets the bucket information. +// +// bucketName the bucket name. +// GetBucketInfoResult the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) { + var out GetBucketInfoResult + params := map[string]interface{}{} + params["bucketInfo"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// UseCname sets the flag of using CName. By default it's false. +// +// isUseCname true: the endpoint has the CName, false: the endpoint does not have cname. Default is false. +// +func UseCname(isUseCname bool) ClientOption { + return func(client *Client) { + client.Config.IsCname = isUseCname + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// Timeout sets the HTTP timeout in seconds. +// +// connectTimeoutSec HTTP timeout in seconds. Default is 10 seconds. 0 means infinite (not recommended) +// readWriteTimeout HTTP read or write's timeout in seconds. Default is 20 seconds. 0 means infinite. +// +func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption { + return func(client *Client) { + client.Config.HTTPTimeout.ConnectTimeout = + time.Second * time.Duration(connectTimeoutSec) + client.Config.HTTPTimeout.ReadWriteTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.HeaderTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.IdleConnTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.LongTimeout = + time.Second * time.Duration(readWriteTimeout*10) + } +} + +// SecurityToken sets the temporary user's SecurityToken. +// +// token STS token +// +func SecurityToken(token string) ClientOption { + return func(client *Client) { + client.Config.SecurityToken = strings.TrimSpace(token) + } +} + +// EnableMD5 enables MD5 validation. +// +// isEnableMD5 true: enable MD5 validation; false: disable MD5 validation. +// +func EnableMD5(isEnableMD5 bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableMD5 = isEnableMD5 + } +} + +// MD5ThresholdCalcInMemory sets the memory usage threshold for computing the MD5, default is 16MB. +// +// threshold the memory threshold in bytes. When the uploaded content is more than 16MB, the temp file is used for computing the MD5. +// +func MD5ThresholdCalcInMemory(threshold int64) ClientOption { + return func(client *Client) { + client.Config.MD5Threshold = threshold + } +} + +// EnableCRC enables the CRC checksum. Default is true. +// +// isEnableCRC true: enable CRC checksum; false: disable the CRC checksum. +// +func EnableCRC(isEnableCRC bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableCRC = isEnableCRC + } +} + +// UserAgent specifies UserAgent. The default is aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2). +// +// userAgent the user agent string. +// +func UserAgent(userAgent string) ClientOption { + return func(client *Client) { + client.Config.UserAgent = userAgent + } +} + +// Proxy sets the proxy (optional). The default is not using proxy. +// +// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 . +// +func Proxy(proxyHost string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// AuthProxy sets the proxy information with user name and password. +// +// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 . +// proxyUser the proxy user name. +// proxyPassword the proxy password. +// +func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Config.IsAuthProxy = true + client.Config.ProxyUser = proxyUser + client.Config.ProxyPassword = proxyPassword + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// +// HTTPClient sets the http.Client in use to the one passed in +// +func HTTPClient(HTTPClient *http.Client) ClientOption { + return func(client *Client) { + client.HTTPClient = HTTPClient + } +} + +// Private +func (client Client) do(method, bucketName string, params map[string]interface{}, + headers map[string]string, data io.Reader) (*Response, error) { + return client.Conn.Do(method, bucketName, "", params, + headers, data, 0, nil) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go new file mode 100644 index 000000000000..f5db93e24124 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go @@ -0,0 +1,77 @@ +package oss + +import ( + "time" +) + +// HTTPTimeout defines HTTP timeout. +type HTTPTimeout struct { + ConnectTimeout time.Duration + ReadWriteTimeout time.Duration + HeaderTimeout time.Duration + LongTimeout time.Duration + IdleConnTimeout time.Duration +} + +type HTTPMaxConns struct { + MaxIdleConns int + MaxIdleConnsPerHost int +} + +// Config defines oss configuration +type Config struct { + Endpoint string // OSS endpoint + AccessKeyID string // AccessId + AccessKeySecret string // AccessKey + RetryTimes uint // Retry count by default it's 5. + UserAgent string // SDK name/version/system information + IsDebug bool // Enable debug mode. Default is false. + Timeout uint // Timeout in seconds. By default it's 60. + SecurityToken string // STS Token + IsCname bool // If cname is in the endpoint. + HTTPTimeout HTTPTimeout // HTTP timeout + HTTPMaxConns HTTPMaxConns // Http max connections + IsUseProxy bool // Flag of using proxy. + ProxyHost string // Flag of using proxy host. + IsAuthProxy bool // Flag of needing authentication. + ProxyUser string // Proxy user + ProxyPassword string // Proxy password + IsEnableMD5 bool // Flag of enabling MD5 for upload. + MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used. + IsEnableCRC bool // Flag of enabling CRC for upload. +} + +// getDefaultOssConfig gets the default configuration. +func getDefaultOssConfig() *Config { + config := Config{} + + config.Endpoint = "" + config.AccessKeyID = "" + config.AccessKeySecret = "" + config.RetryTimes = 5 + config.IsDebug = false + config.UserAgent = userAgent() + config.Timeout = 60 // Seconds + config.SecurityToken = "" + config.IsCname = false + + config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s + config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s + config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s + config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s + config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s + config.HTTPMaxConns.MaxIdleConns = 100 + config.HTTPMaxConns.MaxIdleConnsPerHost = 100 + + config.IsUseProxy = false + config.ProxyHost = "" + config.IsAuthProxy = false + config.ProxyUser = "" + config.ProxyPassword = "" + + config.MD5Threshold = 16 * 1024 * 1024 // 16MB + config.IsEnableMD5 = false + config.IsEnableCRC = true + + return &config +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go new file mode 100644 index 000000000000..74f768e5a2c4 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go @@ -0,0 +1,620 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" +) + +// Conn defines OSS Conn +type Conn struct { + config *Config + url *urlMaker + client *http.Client +} + +var signKeyList = []string{"acl", "uploads", "location", "cors", "logging", "website", "referer", "lifecycle", "delete", "append", "tagging", "objectMeta", "uploadId", "partNumber", "security-token", "position", "img", "style", "styleName", "replication", "replicationProgress", "replicationLocation", "cname", "bucketInfo", "comp", "qos", "live", "status", "vod", "startTime", "endTime", "symlink", "x-oss-process", "response-content-type", "response-content-language", "response-expires", "response-cache-control", "response-content-disposition", "response-content-encoding", "udf", "udfName", "udfImage", "udfId", "udfImageDesc", "udfApplication", "comp", "udfApplicationLog", "restore", "callback", "callback-var"} + +// init initializes Conn +func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error { + if client == nil { + // New transport + transport := newTransport(conn, config) + + // Proxy + if conn.config.IsUseProxy { + proxyURL, err := url.Parse(config.ProxyHost) + if err != nil { + return err + } + transport.Proxy = http.ProxyURL(proxyURL) + } + client = &http.Client{Transport: transport} + } + + conn.config = config + conn.url = urlMaker + conn.client = client + + return nil +} + +// Do sends request and returns the response +func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + urlParams := conn.getURLParams(params) + subResource := conn.getSubResource(params) + uri := conn.url.getURL(bucketName, objectName, urlParams) + resource := conn.url.getResource(bucketName, objectName, subResource) + return conn.doRequest(method, uri, resource, headers, data, initCRC, listener) +} + +// DoURL sends the request with signed URL and returns the response result. +func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + // Get URI from signedURL + uri, err := url.ParseRequestURI(signedURL) + if err != nil { + return nil, err + } + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderHost, conn.config.Endpoint) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + // Transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength) + publishProgress(listener, event) + + resp, err := conn.client.Do(req) + if err != nil { + // Transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength) + publishProgress(listener, event) + return nil, err + } + + // Transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) getURLParams(params map[string]interface{}) string { + // Sort + keys := make([]string, 0, len(params)) + for k := range params { + keys = append(keys, k) + } + sort.Strings(keys) + + // Serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(url.QueryEscape(k)) + if params[k] != nil { + buf.WriteString("=" + url.QueryEscape(params[k].(string))) + } + } + + return buf.String() +} + +func (conn Conn) getSubResource(params map[string]interface{}) string { + // Sort + keys := make([]string, 0, len(params)) + for k := range params { + if conn.isParamSign(k) { + keys = append(keys, k) + } + } + sort.Strings(keys) + + // Serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(k) + if params[k] != nil { + buf.WriteString("=" + params[k].(string)) + } + } + + return buf.String() +} + +func (conn Conn) isParamSign(paramKey string) bool { + for _, k := range signKeyList { + if paramKey == k { + return true + } + } + return false +} + +func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + method = strings.ToUpper(method) + req := &http.Request{ + Method: method, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(HTTPHeaderDate, date) + req.Header.Set(HTTPHeaderHost, conn.config.Endpoint) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + if conn.config.SecurityToken != "" { + req.Header.Set(HTTPHeaderOssSecurityToken, conn.config.SecurityToken) + } + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + conn.signHeader(req, canonicalizedResource) + + // Transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength) + publishProgress(listener, event) + + resp, err := conn.client.Do(req) + if err != nil { + // Transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength) + publishProgress(listener, event) + return nil, err + } + + // Transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string { + if conn.config.SecurityToken != "" { + params[HTTPParamSecurityToken] = conn.config.SecurityToken + } + subResource := conn.getSubResource(params) + canonicalizedResource := conn.url.getResource(bucketName, objectName, subResource) + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + Header: make(http.Header), + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10)) + req.Header.Set(HTTPHeaderHost, conn.config.Endpoint) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + signedStr := conn.getSignedStr(req, canonicalizedResource) + + params[HTTPParamExpires] = strconv.FormatInt(expiration, 10) + params[HTTPParamAccessKeyID] = conn.config.AccessKeyID + params[HTTPParamSignature] = signedStr + + urlParams := conn.getURLParams(params) + return conn.url.getSignURL(bucketName, objectName, urlParams) +} + +// handleBody handles request body +func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64, + listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) { + var file *os.File + var crc hash.Hash64 + reader := body + + // Length + switch v := body.(type) { + case *bytes.Buffer: + req.ContentLength = int64(v.Len()) + case *bytes.Reader: + req.ContentLength = int64(v.Len()) + case *strings.Reader: + req.ContentLength = int64(v.Len()) + case *os.File: + req.ContentLength = tryGetFileSize(v) + case *io.LimitedReader: + req.ContentLength = int64(v.N) + } + req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10)) + + // MD5 + if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" { + md5 := "" + reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold) + req.Header.Set(HTTPHeaderContentMD5, md5) + } + + // CRC + if reader != nil && conn.config.IsEnableCRC { + crc = NewCRC(crcTable(), initCRC) + reader = TeeReader(reader, crc, req.ContentLength, listener, tracker) + } + + // HTTP body + rc, ok := reader.(io.ReadCloser) + if !ok && reader != nil { + rc = ioutil.NopCloser(reader) + } + req.Body = rc + + return file, crc +} + +func tryGetFileSize(f *os.File) int64 { + fInfo, _ := f.Stat() + return fInfo.Size() +} + +// handleResponse handles response +func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) { + var cliCRC uint64 + var srvCRC uint64 + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + // 4xx and 5xx indicate that the operation has error occurred + var respBody []byte + respBody, err := readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + err = ServiceError{ + StatusCode: statusCode, + RequestID: resp.Header.Get(HTTPHeaderOssRequestID), + } + } else { + // Response contains storage service error object, unmarshal + srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, + resp.Header.Get(HTTPHeaderOssRequestID)) + if errIn != nil { // error unmarshaling the error response + err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID)) + } else { + err = srvErr + } + } + + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body + }, err + } else if statusCode >= 300 && statusCode <= 307 { + // OSS use 3xx, but response has no body + err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status) + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: resp.Body, + }, err + } + + if conn.config.IsEnableCRC && crc != nil { + cliCRC = crc.Sum64() + } + srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64) + + // 2xx, successful + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: resp.Body, + ClientCRC: cliCRC, + ServerCRC: srvCRC, + }, nil +} + +func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) { + if contentLen == 0 || contentLen > md5Threshold { + // Huge body, use temporary file + tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix) + if tempFile != nil { + io.Copy(tempFile, body) + tempFile.Seek(0, os.SEEK_SET) + md5 := md5.New() + io.Copy(md5, tempFile) + sum := md5.Sum(nil) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + tempFile.Seek(0, os.SEEK_SET) + reader = tempFile + } + } else { + // Small body, use memory + buf, _ := ioutil.ReadAll(body) + sum := md5.Sum(buf) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + reader = bytes.NewReader(buf) + } + return +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) { + var storageErr ServiceError + + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + storageErr.RawMessage = string(body) + return storageErr, nil +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func jsonUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return json.Unmarshal(data, v) +} + +// timeoutConn handles HTTP timeout +type timeoutConn struct { + conn net.Conn + timeout time.Duration + longTimeout time.Duration +} + +func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn { + conn.SetReadDeadline(time.Now().Add(longTimeout)) + return &timeoutConn{ + conn: conn, + timeout: timeout, + longTimeout: longTimeout, + } +} + +func (c *timeoutConn) Read(b []byte) (n int, err error) { + c.SetReadDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Read(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Write(b []byte) (n int, err error) { + c.SetWriteDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Write(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Close() error { + return c.conn.Close() +} + +func (c *timeoutConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *timeoutConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *timeoutConn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *timeoutConn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *timeoutConn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +// UrlMaker builds URL and resource +const ( + urlTypeCname = 1 + urlTypeIP = 2 + urlTypeAliyun = 3 +) + +type urlMaker struct { + Scheme string // HTTP or HTTPS + NetLoc string // Host or IP + Type int // 1 CNAME, 2 IP, 3 ALIYUN + IsProxy bool // Proxy +} + +// Init parses endpoint +func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) { + if strings.HasPrefix(endpoint, "http://") { + um.Scheme = "http" + um.NetLoc = endpoint[len("http://"):] + } else if strings.HasPrefix(endpoint, "https://") { + um.Scheme = "https" + um.NetLoc = endpoint[len("https://"):] + } else { + um.Scheme = "http" + um.NetLoc = endpoint + } + + host, _, err := net.SplitHostPort(um.NetLoc) + if err != nil { + host = um.NetLoc + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + } + + ip := net.ParseIP(host) + if ip != nil { + um.Type = urlTypeIP + } else if isCname { + um.Type = urlTypeCname + } else { + um.Type = urlTypeAliyun + } + um.IsProxy = isProxy +} + +// getURL gets URL +func (um urlMaker) getURL(bucket, object, params string) *url.URL { + host, path := um.buildURL(bucket, object) + addr := "" + if params == "" { + addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path) + } else { + addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) + } + uri, _ := url.ParseRequestURI(addr) + return uri +} + +// getSignURL gets sign URL +func (um urlMaker) getSignURL(bucket, object, params string) string { + host, path := um.buildURL(bucket, object) + return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) +} + +// buildURL builds URL +func (um urlMaker) buildURL(bucket, object string) (string, string) { + var host = "" + var path = "" + + object = url.QueryEscape(object) + object = strings.Replace(object, "+", "%20", -1) + + if um.Type == urlTypeCname { + host = um.NetLoc + path = "/" + object + } else if um.Type == urlTypeIP { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = um.NetLoc + path = fmt.Sprintf("/%s/%s", bucket, object) + } + } else { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = bucket + "." + um.NetLoc + path = "/" + object + } + } + + return host, path +} + +// getResource gets canonicalized resource +func (um urlMaker) getResource(bucketName, objectName, subResource string) string { + if subResource != "" { + subResource = "?" + subResource + } + if bucketName == "" { + return fmt.Sprintf("/%s%s", bucketName, subResource) + } + return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go new file mode 100644 index 000000000000..b5ee30bdf91c --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go @@ -0,0 +1,145 @@ +package oss + +import "os" + +// ACLType bucket/object ACL +type ACLType string + +const ( + // ACLPrivate definition : private read and write + ACLPrivate ACLType = "private" + + // ACLPublicRead definition : public read and private write + ACLPublicRead ACLType = "public-read" + + // ACLPublicReadWrite definition : public read and public write + ACLPublicReadWrite ACLType = "public-read-write" + + // ACLDefault Object. It's only applicable for object. + ACLDefault ACLType = "default" +) + +// MetadataDirectiveType specifying whether use the metadata of source object when copying object. +type MetadataDirectiveType string + +const ( + // MetaCopy the target object's metadata is copied from the source one + MetaCopy MetadataDirectiveType = "COPY" + + // MetaReplace the target object's metadata is created as part of the copy request (not same as the source one) + MetaReplace MetadataDirectiveType = "REPLACE" +) + +// StorageClassType bucket storage type +type StorageClassType string + +const ( + // StorageStandard standard + StorageStandard StorageClassType = "Standard" + + // StorageIA infrequent access + StorageIA StorageClassType = "IA" + + // StorageArchive archive + StorageArchive StorageClassType = "Archive" +) + +// PayerType the type of request payer +type PayerType string + +const ( + // Requester the requester who send the request + Requester PayerType = "requester" +) + +// HTTPMethod HTTP request method +type HTTPMethod string + +const ( + // HTTPGet HTTP GET + HTTPGet HTTPMethod = "GET" + + // HTTPPut HTTP PUT + HTTPPut HTTPMethod = "PUT" + + // HTTPHead HTTP HEAD + HTTPHead HTTPMethod = "HEAD" + + // HTTPPost HTTP POST + HTTPPost HTTPMethod = "POST" + + // HTTPDelete HTTP DELETE + HTTPDelete HTTPMethod = "DELETE" +) + +// HTTP headers +const ( + HTTPHeaderAcceptEncoding string = "Accept-Encoding" + HTTPHeaderAuthorization = "Authorization" + HTTPHeaderCacheControl = "Cache-Control" + HTTPHeaderContentDisposition = "Content-Disposition" + HTTPHeaderContentEncoding = "Content-Encoding" + HTTPHeaderContentLength = "Content-Length" + HTTPHeaderContentMD5 = "Content-MD5" + HTTPHeaderContentType = "Content-Type" + HTTPHeaderContentLanguage = "Content-Language" + HTTPHeaderDate = "Date" + HTTPHeaderEtag = "ETag" + HTTPHeaderExpires = "Expires" + HTTPHeaderHost = "Host" + HTTPHeaderLastModified = "Last-Modified" + HTTPHeaderRange = "Range" + HTTPHeaderLocation = "Location" + HTTPHeaderOrigin = "Origin" + HTTPHeaderServer = "Server" + HTTPHeaderUserAgent = "User-Agent" + HTTPHeaderIfModifiedSince = "If-Modified-Since" + HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since" + HTTPHeaderIfMatch = "If-Match" + HTTPHeaderIfNoneMatch = "If-None-Match" + + HTTPHeaderOssACL = "X-Oss-Acl" + HTTPHeaderOssMetaPrefix = "X-Oss-Meta-" + HTTPHeaderOssObjectACL = "X-Oss-Object-Acl" + HTTPHeaderOssSecurityToken = "X-Oss-Security-Token" + HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption" + HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id" + HTTPHeaderOssCopySource = "X-Oss-Copy-Source" + HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range" + HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match" + HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match" + HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since" + HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since" + HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive" + HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position" + HTTPHeaderOssRequestID = "X-Oss-Request-Id" + HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma" + HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target" + HTTPHeaderOssStorageClass = "X-Oss-Storage-Class" + HTTPHeaderOssCallback = "X-Oss-Callback" + HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var" + HTTPHeaderOSSRequester = "X-Oss-Request-Payer" +) + +// HTTP Param +const ( + HTTPParamExpires = "Expires" + HTTPParamAccessKeyID = "OSSAccessKeyId" + HTTPParamSignature = "Signature" + HTTPParamSecurityToken = "security-token" +) + +// Other constants +const ( + MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB + MinPartSize = 100 * 1024 // Min part size, 100KB + + FilePermMode = os.FileMode(0664) // Default file permission + + TempFilePrefix = "oss-go-temp-" // Temp file prefix + TempFileSuffix = ".temp" // Temp file suffix + + CheckpointFileSuffix = ".cp" // Checkpoint file suffix + + Version = "1.9.2" // Go SDK version +) diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go new file mode 100644 index 000000000000..c96694f2850d --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go @@ -0,0 +1,123 @@ +package oss + +import ( + "hash" + "hash/crc64" +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint64 + tab *crc64.Table +} + +// NewCRC creates a new hash.Hash64 computing the CRC64 checksum +// using the polynomial represented by the Table. +func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} } + +// Size returns the number of bytes sum will return. +func (d *digest) Size() int { return crc64.Size } + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (d *digest) BlockSize() int { return 1 } + +// Reset resets the hash to its initial state. +func (d *digest) Reset() { d.crc = 0 } + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (d *digest) Write(p []byte) (n int, err error) { + d.crc = crc64.Update(d.crc, d.tab, p) + return len(p), nil +} + +// Sum64 returns CRC64 value. +func (d *digest) Sum64() uint64 { return d.crc } + +// Sum returns hash value. +func (d *digest) Sum(in []byte) []byte { + s := d.Sum64() + return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// gf2Dim dimension of GF(2) vectors (length of CRC) +const gf2Dim int = 64 + +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + for i := 0; vec != 0; i++ { + if vec&1 != 0 { + sum ^= mat[i] + } + + vec >>= 1 + } + return sum +} + +func gf2MatrixSquare(square []uint64, mat []uint64) { + for n := 0; n < gf2Dim; n++ { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// CRC64Combine combines CRC64 +func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 { + var even [gf2Dim]uint64 // Even-power-of-two zeros operator + var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator + + // Degenerate case + if len2 == 0 { + return crc1 + } + + // Put operator for one zero bit in odd + odd[0] = crc64.ECMA // CRC64 polynomial + var row uint64 = 1 + for n := 1; n < gf2Dim; n++ { + odd[n] = row + row <<= 1 + } + + // Put operator for two zero bits in even + gf2MatrixSquare(even[:], odd[:]) + + // Put operator for four zero bits in odd + gf2MatrixSquare(odd[:], even[:]) + + // Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even + for { + // Apply zeros operator for this bit of len2 + gf2MatrixSquare(even[:], odd[:]) + + if len2&1 != 0 { + crc1 = gf2MatrixTimes(even[:], crc1) + } + + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + + // Another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd[:], even[:]) + if len2&1 != 0 { + crc1 = gf2MatrixTimes(odd[:], crc1) + } + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + } + + // Return combined CRC + crc1 ^= crc2 + return crc1 +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go new file mode 100644 index 000000000000..f0f0857bd3ce --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go @@ -0,0 +1,568 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strconv" +) + +// DownloadFile downloads files with multipart download. +// +// objectKey the object key. +// filePath the local file to download from objectKey in OSS. +// partSize the part size in bytes. +// options object's constraints, check out GetObject for the reference. +// +// error it's nil when the call succeeds, otherwise it's an error object. +// +func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < 1 { + return errors.New("oss: part size smaller than 1") + } + + uRange, err := getRangeConfig(options) + if err != nil { + return err + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, filePath) + if cpFilePath != "" { + return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange) + } + } + + return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange) +} + +func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destFile string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) + absPath, _ := filepath.Abs(destFile) + cpFileName := getCpFileName(src, absPath) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// getRangeConfig gets the download range from the options. +func getRangeConfig(options []Option) (*unpackedRange, error) { + rangeOpt, err := findOption(options, HTTPHeaderRange, nil) + if err != nil || rangeOpt == nil { + return nil, err + } + return parseRange(rangeOpt.(string)) +} + +// ----- concurrent download without checkpoint ----- + +// downloadWorkerArg is download worker's parameters +type downloadWorkerArg struct { + bucket *Bucket + key string + filePath string + options []Option + hook downloadPartHook + enableCRC bool +} + +// downloadPartHook is hook for test +type downloadPartHook func(part downloadPart) error + +var downloadPartHooker downloadPartHook = defaultDownloadPartHook + +func defaultDownloadPartHook(part downloadPart) error { + return nil +} + +// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject. +type defaultDownloadProgressListener struct { +} + +// ProgressChanged no-ops +func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) { +} + +// downloadWorker +func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) { + for part := range jobs { + if err := arg.hook(part); err != nil { + failed <- err + break + } + + // Resolve options + r := Range(part.Start, part.End) + p := Progress(&defaultDownloadProgressListener{}) + opts := make([]Option, len(arg.options)+2) + // Append orderly, can not be reversed! + opts = append(opts, arg.options...) + opts = append(opts, r, p) + + rd, err := arg.bucket.GetObject(arg.key, opts...) + if err != nil { + failed <- err + break + } + defer rd.Close() + + var crcCalc hash.Hash64 + if arg.enableCRC { + crcCalc = crc64.New(crcTable()) + contentLen := part.End - part.Start + 1 + rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil)) + } + defer rd.Close() + + select { + case <-die: + return + default: + } + + fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode) + if err != nil { + failed <- err + break + } + + _, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET) + if err != nil { + fd.Close() + failed <- err + break + } + + _, err = io.Copy(fd, rd) + if err != nil { + fd.Close() + failed <- err + break + } + + if arg.enableCRC { + part.CRC64 = crcCalc.Sum64() + } + + fd.Close() + results <- part + } +} + +// downloadScheduler +func downloadScheduler(jobs chan downloadPart, parts []downloadPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// downloadPart defines download part +type downloadPart struct { + Index int // Part number, starting from 0 + Start int64 // Start index + End int64 // End index + Offset int64 // Offset + CRC64 uint64 // CRC check value of part +} + +// getDownloadParts gets download parts +func getDownloadParts(objectSize, partSize int64, uRange *unpackedRange) []downloadPart { + parts := []downloadPart{} + part := downloadPart{} + i := 0 + start, end := adjustRange(uRange, objectSize) + for offset := start; offset < end; offset += partSize { + part.Index = i + part.Start = offset + part.End = GetPartEnd(offset, end, partSize) + part.Offset = start + part.CRC64 = 0 + parts = append(parts, part) + i++ + } + return parts +} + +// getObjectBytes gets object bytes length +func getObjectBytes(parts []downloadPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// combineCRCInParts caculates the total CRC of continuous parts +func combineCRCInParts(dps []downloadPart) uint64 { + if dps == nil || len(dps) == 0 { + return 0 + } + + crc := dps[0].CRC64 + for i := 1; i < len(dps); i++ { + crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1)) + } + + return crc +} + +// downloadFile downloads file concurrently without checkpoint. +func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *unpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := getProgressListener(options) + + payerOptions := []Option{} + payer := getPayer(options) + if payer != "" { + payerOptions = append(payerOptions, RequestPayer(PayerType(payer))) + } + + // If the file does not exist, create one. If exists, the download will overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + enableCRC := false + expectedCRC := (uint64)(0) + if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) { + enableCRC = true + expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0) + } + } + + // Get the parts of the file + parts := getDownloadParts(objectSize, partSize, uRange) + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes) + publishProgress(listener, event) + + // Start the download workers + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // Download parts concurrently + go downloadScheduler(jobs, parts) + + // Waiting for parts download finished + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + completedBytes += (part.End - part.Start + 1) + parts[part.Index].CRC64 = part.CRC64 + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + + if enableCRC { + actualCRC := combineCRCInParts(parts) + err = checkDownloadCRC(actualCRC, expectedCRC) + if err != nil { + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// ----- Concurrent download with chcekpoint ----- + +const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3" + +type downloadCheckpoint struct { + Magic string // Magic + MD5 string // Checkpoint content MD5 + FilePath string // Local file + Object string // Key + ObjStat objectStat // Object status + Parts []downloadPart // All download parts + PartStat []bool // Parts' download status + Start int64 // Start point of the file + End int64 // End point of the file + enableCRC bool // Whether has CRC check + CRC uint64 // CRC check value +} + +type objectStat struct { + Size int64 // Object size + LastModified string // Last modified time + Etag string // Etag +} + +// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated. +func (cp downloadCheckpoint) isValid(meta http.Header, uRange *unpackedRange) (bool, error) { + // Compare the CP's Magic and the MD5 + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return false, err + } + + // Compare the object size, last modified time and etag + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + // Check the download range + if uRange != nil { + start, end := adjustRange(uRange, objectSize) + if start != cp.Start || end != cp.End { + return false, nil + } + } + + return true, nil +} + +// load checkpoint from local file +func (cp *downloadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump funciton dumps to file +func (cp *downloadCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialize + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// todoParts gets unfinished parts +func (cp downloadCheckpoint) todoParts() []downloadPart { + dps := []downloadPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// getCompletedBytes gets completed size +func (cp downloadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// prepare initiates download tasks +func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error { + // CP + cp.Magic = downloadCpMagic + cp.FilePath = filePath + cp.Object = objectKey + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) { + cp.enableCRC = true + cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0) + } + } + + // Parts + cp.Parts = getDownloadParts(objectSize, partSize, uRange) + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + + return nil +} + +func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error { + os.Remove(cpFilePath) + return os.Rename(downFilepath, cp.FilePath) +} + +// downloadFileWithCp downloads files with checkpoint. +func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *unpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := getProgressListener(options) + + payerOptions := []Option{} + payer := getPayer(options) + if payer != "" { + payerOptions = append(payerOptions, RequestPayer(PayerType(payer))) + } + + // Load checkpoint data. + dcp := downloadCheckpoint{} + err := dcp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Get the object detailed meta. + meta, err := bucket.GetObjectDetailedMeta(objectKey, payerOptions...) + if err != nil { + return err + } + + // Load error or data invalid. Re-initialize the download. + valid, err := dcp.isValid(meta, uRange) + if err != nil || !valid { + if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // Create the file if not exists. Otherwise the parts download will overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // Unfinished parts + parts := dcp.todoParts() + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := dcp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size) + publishProgress(listener, event) + + // Start the download workers routine + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // Concurrently downloads parts + go downloadScheduler(jobs, parts) + + // Wait for the parts download finished + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + dcp.PartStat[part.Index] = true + dcp.Parts[part.Index].CRC64 = part.CRC64 + dcp.dump(cpFilePath) + completedBytes += (part.End - part.Start + 1) + event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size) + publishProgress(listener, event) + + if dcp.enableCRC { + actualCRC := combineCRCInParts(dcp.Parts) + err = checkDownloadCRC(actualCRC, dcp.CRC) + if err != nil { + return err + } + } + + return dcp.complete(cpFilePath, tempFilePath) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go new file mode 100644 index 000000000000..6d7b4e0fa827 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go @@ -0,0 +1,94 @@ +package oss + +import ( + "encoding/xml" + "fmt" + "net/http" + "strings" +) + +// ServiceError contains fields of the error response from Oss Service REST API. +type ServiceError struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` // The error code returned from OSS to the caller + Message string `xml:"Message"` // The detail error message from OSS + RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request + HostID string `xml:"HostId"` // The OSS server cluster's Id + Endpoint string `xml:"Endpoint"` + RawMessage string // The raw messages from OSS + StatusCode int // HTTP status code +} + +// Error implements interface error +func (e ServiceError) Error() string { + if e.Endpoint == "" { + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s", + e.StatusCode, e.Code, e.Message, e.RequestID) + } + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint) +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int // The expected HTTP stats code returned from OSS + got int // The actual HTTP status code from OSS +} + +// Error implements interface error +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("oss: status code from service response is %s; was expecting %s", + got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by oss. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// checkRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func checkRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} + +// CRCCheckError is returned when crc check is inconsistent between client and server +type CRCCheckError struct { + clientCRC uint64 // Calculated CRC64 in client + serverCRC uint64 // Calculated CRC64 in server + operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc + requestID string // The request id of this operation +} + +// Error implements interface error +func (e CRCCheckError) Error() string { + return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s", + e.operation, e.clientCRC, e.serverCRC, e.requestID) +} + +func checkDownloadCRC(clientCRC, serverCRC uint64) error { + if clientCRC == serverCRC { + return nil + } + return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""} +} + +func checkCRC(resp *Response, operation string) error { + if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC { + return nil + } + return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)} +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go new file mode 100644 index 000000000000..11485973d2e2 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go @@ -0,0 +1,245 @@ +package oss + +import ( + "mime" + "path" + "strings" +) + +var extToMimeType = map[string]string{ + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", + ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", + ".apk": "application/vnd.android.package-archive", + ".hqx": "application/mac-binhex40", + ".cpt": "application/mac-compactpro", + ".doc": "application/msword", + ".ogg": "application/ogg", + ".pdf": "application/pdf", + ".rtf": "text/rtf", + ".mif": "application/vnd.mif", + ".xls": "application/vnd.ms-excel", + ".ppt": "application/vnd.ms-powerpoint", + ".odc": "application/vnd.oasis.opendocument.chart", + ".odb": "application/vnd.oasis.opendocument.database", + ".odf": "application/vnd.oasis.opendocument.formula", + ".odg": "application/vnd.oasis.opendocument.graphics", + ".otg": "application/vnd.oasis.opendocument.graphics-template", + ".odi": "application/vnd.oasis.opendocument.image", + ".odp": "application/vnd.oasis.opendocument.presentation", + ".otp": "application/vnd.oasis.opendocument.presentation-template", + ".ods": "application/vnd.oasis.opendocument.spreadsheet", + ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", + ".odt": "application/vnd.oasis.opendocument.text", + ".odm": "application/vnd.oasis.opendocument.text-master", + ".ott": "application/vnd.oasis.opendocument.text-template", + ".oth": "application/vnd.oasis.opendocument.text-web", + ".sxw": "application/vnd.sun.xml.writer", + ".stw": "application/vnd.sun.xml.writer.template", + ".sxc": "application/vnd.sun.xml.calc", + ".stc": "application/vnd.sun.xml.calc.template", + ".sxd": "application/vnd.sun.xml.draw", + ".std": "application/vnd.sun.xml.draw.template", + ".sxi": "application/vnd.sun.xml.impress", + ".sti": "application/vnd.sun.xml.impress.template", + ".sxg": "application/vnd.sun.xml.writer.global", + ".sxm": "application/vnd.sun.xml.math", + ".sis": "application/vnd.symbian.install", + ".wbxml": "application/vnd.wap.wbxml", + ".wmlc": "application/vnd.wap.wmlc", + ".wmlsc": "application/vnd.wap.wmlscriptc", + ".bcpio": "application/x-bcpio", + ".torrent": "application/x-bittorrent", + ".bz2": "application/x-bzip2", + ".vcd": "application/x-cdlink", + ".pgn": "application/x-chess-pgn", + ".cpio": "application/x-cpio", + ".csh": "application/x-csh", + ".dvi": "application/x-dvi", + ".spl": "application/x-futuresplash", + ".gtar": "application/x-gtar", + ".hdf": "application/x-hdf", + ".jar": "application/x-java-archive", + ".jnlp": "application/x-java-jnlp-file", + ".js": "application/x-javascript", + ".ksp": "application/x-kspread", + ".chrt": "application/x-kchart", + ".kil": "application/x-killustrator", + ".latex": "application/x-latex", + ".rpm": "application/x-rpm", + ".sh": "application/x-sh", + ".shar": "application/x-shar", + ".swf": "application/x-shockwave-flash", + ".sit": "application/x-stuffit", + ".sv4cpio": "application/x-sv4cpio", + ".sv4crc": "application/x-sv4crc", + ".tar": "application/x-tar", + ".tcl": "application/x-tcl", + ".tex": "application/x-tex", + ".man": "application/x-troff-man", + ".me": "application/x-troff-me", + ".ms": "application/x-troff-ms", + ".ustar": "application/x-ustar", + ".src": "application/x-wais-source", + ".zip": "application/zip", + ".m3u": "audio/x-mpegurl", + ".ra": "audio/x-pn-realaudio", + ".wav": "audio/x-wav", + ".wma": "audio/x-ms-wma", + ".wax": "audio/x-ms-wax", + ".pdb": "chemical/x-pdb", + ".xyz": "chemical/x-xyz", + ".bmp": "image/bmp", + ".gif": "image/gif", + ".ief": "image/ief", + ".png": "image/png", + ".wbmp": "image/vnd.wap.wbmp", + ".ras": "image/x-cmu-raster", + ".pnm": "image/x-portable-anymap", + ".pbm": "image/x-portable-bitmap", + ".pgm": "image/x-portable-graymap", + ".ppm": "image/x-portable-pixmap", + ".rgb": "image/x-rgb", + ".xbm": "image/x-xbitmap", + ".xpm": "image/x-xpixmap", + ".xwd": "image/x-xwindowdump", + ".css": "text/css", + ".rtx": "text/richtext", + ".tsv": "text/tab-separated-values", + ".jad": "text/vnd.sun.j2me.app-descriptor", + ".wml": "text/vnd.wap.wml", + ".wmls": "text/vnd.wap.wmlscript", + ".etx": "text/x-setext", + ".mxu": "video/vnd.mpegurl", + ".flv": "video/x-flv", + ".wm": "video/x-ms-wm", + ".wmv": "video/x-ms-wmv", + ".wmx": "video/x-ms-wmx", + ".wvx": "video/x-ms-wvx", + ".avi": "video/x-msvideo", + ".movie": "video/x-sgi-movie", + ".ice": "x-conference/x-cooltalk", + ".3gp": "video/3gpp", + ".ai": "application/postscript", + ".aif": "audio/x-aiff", + ".aifc": "audio/x-aiff", + ".aiff": "audio/x-aiff", + ".asc": "text/plain", + ".atom": "application/atom+xml", + ".au": "audio/basic", + ".bin": "application/octet-stream", + ".cdf": "application/x-netcdf", + ".cgm": "image/cgm", + ".class": "application/octet-stream", + ".dcr": "application/x-director", + ".dif": "video/x-dv", + ".dir": "application/x-director", + ".djv": "image/vnd.djvu", + ".djvu": "image/vnd.djvu", + ".dll": "application/octet-stream", + ".dmg": "application/octet-stream", + ".dms": "application/octet-stream", + ".dtd": "application/xml-dtd", + ".dv": "video/x-dv", + ".dxr": "application/x-director", + ".eps": "application/postscript", + ".exe": "application/octet-stream", + ".ez": "application/andrew-inset", + ".gram": "application/srgs", + ".grxml": "application/srgs+xml", + ".gz": "application/x-gzip", + ".htm": "text/html", + ".html": "text/html", + ".ico": "image/x-icon", + ".ics": "text/calendar", + ".ifb": "text/calendar", + ".iges": "model/iges", + ".igs": "model/iges", + ".jp2": "image/jp2", + ".jpe": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".kar": "audio/midi", + ".lha": "application/octet-stream", + ".lzh": "application/octet-stream", + ".m4a": "audio/mp4a-latm", + ".m4p": "audio/mp4a-latm", + ".m4u": "video/vnd.mpegurl", + ".m4v": "video/x-m4v", + ".mac": "image/x-macpaint", + ".mathml": "application/mathml+xml", + ".mesh": "model/mesh", + ".mid": "audio/midi", + ".midi": "audio/midi", + ".mov": "video/quicktime", + ".mp2": "audio/mpeg", + ".mp3": "audio/mpeg", + ".mp4": "video/mp4", + ".mpe": "video/mpeg", + ".mpeg": "video/mpeg", + ".mpg": "video/mpeg", + ".mpga": "audio/mpeg", + ".msh": "model/mesh", + ".nc": "application/x-netcdf", + ".oda": "application/oda", + ".ogv": "video/ogv", + ".pct": "image/pict", + ".pic": "image/pict", + ".pict": "image/pict", + ".pnt": "image/x-macpaint", + ".pntg": "image/x-macpaint", + ".ps": "application/postscript", + ".qt": "video/quicktime", + ".qti": "image/x-quicktime", + ".qtif": "image/x-quicktime", + ".ram": "audio/x-pn-realaudio", + ".rdf": "application/rdf+xml", + ".rm": "application/vnd.rn-realmedia", + ".roff": "application/x-troff", + ".sgm": "text/sgml", + ".sgml": "text/sgml", + ".silo": "model/mesh", + ".skd": "application/x-koan", + ".skm": "application/x-koan", + ".skp": "application/x-koan", + ".skt": "application/x-koan", + ".smi": "application/smil", + ".smil": "application/smil", + ".snd": "audio/basic", + ".so": "application/octet-stream", + ".svg": "image/svg+xml", + ".t": "application/x-troff", + ".texi": "application/x-texinfo", + ".texinfo": "application/x-texinfo", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".tr": "application/x-troff", + ".txt": "text/plain", + ".vrml": "model/vrml", + ".vxml": "application/voicexml+xml", + ".webm": "video/webm", + ".wrl": "model/vrml", + ".xht": "application/xhtml+xml", + ".xhtml": "application/xhtml+xml", + ".xml": "application/xml", + ".xsl": "application/xml", + ".xslt": "application/xslt+xml", + ".xul": "application/vnd.mozilla.xul+xml", +} + +// TypeByExtension returns the MIME type associated with the file extension ext. +// gets the file's MIME type for HTTP header Content-Type +func TypeByExtension(filePath string) string { + typ := mime.TypeByExtension(path.Ext(filePath)) + if typ == "" { + typ = extToMimeType[strings.ToLower(path.Ext(filePath))] + } + return typ +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go new file mode 100644 index 000000000000..51f1c31e3da8 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go @@ -0,0 +1,68 @@ +package oss + +import ( + "hash" + "io" + "net/http" +) + +// Response defines HTTP response from OSS +type Response struct { + StatusCode int + Headers http.Header + Body io.ReadCloser + ClientCRC uint64 + ServerCRC uint64 +} + +func (r *Response) Read(p []byte) (n int, err error) { + return r.Body.Read(p) +} + +func (r *Response) Close() error { + return r.Body.Close() +} + +// PutObjectRequest is the request of DoPutObject +type PutObjectRequest struct { + ObjectKey string + Reader io.Reader +} + +// GetObjectRequest is the request of DoGetObject +type GetObjectRequest struct { + ObjectKey string +} + +// GetObjectResult is the result of DoGetObject +type GetObjectResult struct { + Response *Response + ClientCRC hash.Hash64 + ServerCRC uint64 +} + +// AppendObjectRequest is the requtest of DoAppendObject +type AppendObjectRequest struct { + ObjectKey string + Reader io.Reader + Position int64 +} + +// AppendObjectResult is the result of DoAppendObject +type AppendObjectResult struct { + NextPosition int64 + CRC uint64 +} + +// UploadPartRequest is the request of DoUploadPart +type UploadPartRequest struct { + InitResult *InitiateMultipartUploadResult + Reader io.Reader + PartSize int64 + PartNumber int +} + +// UploadPartResult is the result of DoUploadPart +type UploadPartResult struct { + Part UploadPart +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go new file mode 100644 index 000000000000..e2597c24e705 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go @@ -0,0 +1,468 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" +) + +// CopyFile is multipart copy object +// +// srcBucketName source bucket name +// srcObjectKey source object name +// destObjectKey target object name in the form of bucketname.objectkey +// partSize the part size in byte. +// options object's contraints. Check out function InitiateMultipartUpload. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error { + destBucketName := bucket.BucketName + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (1024KB, 5GB]") + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey) + if cpFilePath != "" { + return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines) + } + } + + return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey, + partSize, options, routines) +} + +func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) + src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) + cpFileName := getCpFileName(src, dest) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// ----- Concurrently copy without checkpoint --------- + +// copyWorkerArg defines the copy worker arguments +type copyWorkerArg struct { + bucket *Bucket + imur InitiateMultipartUploadResult + srcBucketName string + srcObjectKey string + options []Option + hook copyPartHook +} + +// copyPartHook is the hook for testing purpose +type copyPartHook func(part copyPart) error + +var copyPartHooker copyPartHook = defaultCopyPartHook + +func defaultCopyPartHook(part copyPart) error { + return nil +} + +// copyWorker copies worker +func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(chunk); err != nil { + failed <- err + break + } + chunkSize := chunk.End - chunk.Start + 1 + part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey, + chunk.Start, chunkSize, chunk.Number, arg.options...) + if err != nil { + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// copyScheduler +func copyScheduler(jobs chan copyPart, parts []copyPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// copyPart structure +type copyPart struct { + Number int // Part number (from 1 to 10,000) + Start int64 // The start index in the source file. + End int64 // The end index in the source file +} + +// getCopyParts calculates copy parts +func getCopyParts(objectSize, partSize int64) []copyPart { + parts := []copyPart{} + part := copyPart{} + i := 0 + for offset := int64(0); offset < objectSize; offset += partSize { + part.Number = i + 1 + part.Start = offset + part.End = GetPartEnd(offset, objectSize, partSize) + parts = append(parts, part) + i++ + } + return parts +} + +// getSrcObjectBytes gets the source file size +func getSrcObjectBytes(parts []copyPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// copyFile is a concurrently copy without checkpoint +func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := getProgressListener(options) + + payerOptions := []Option{} + payer := getPayer(options) + if payer != "" { + payerOptions = append(payerOptions, RequestPayer(PayerType(payer))) + } + + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + // Get copy parts + parts := getCopyParts(objectSize, partSize) + // Initialize the multipart upload + imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getSrcObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes) + publishProgress(listener, event) + + // Start to copy workers + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // Start the scheduler + go copyScheduler(jobs, parts) + + // Wait for the parts finished. + completed := 0 + ups := make([]UploadPart, len(parts)) + for completed < len(parts) { + select { + case part := <-results: + completed++ + ups[part.PartNumber-1] = part + completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + descBucket.AbortMultipartUpload(imur, payerOptions...) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + + // Complete the multipart upload + _, err = descBucket.CompleteMultipartUpload(imur, ups, payerOptions...) + if err != nil { + bucket.AbortMultipartUpload(imur, payerOptions...) + return err + } + return nil +} + +// ----- Concurrently copy with checkpoint ----- + +const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A" + +type copyCheckpoint struct { + Magic string // Magic + MD5 string // CP content MD5 + SrcBucketName string // Source bucket + SrcObjectKey string // Source object + DestBucketName string // Target bucket + DestObjectKey string // Target object + CopyID string // Copy ID + ObjStat objectStat // Object stat + Parts []copyPart // Copy parts + CopyParts []UploadPart // The uploaded parts + PartStat []bool // The part status +} + +// isValid checks if the data is valid which means CP is valid and object is not updated. +func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) { + // Compare CP's magic number and the MD5. + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return false, err + } + + // Compare the object size and last modified time and etag. + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + return true, nil +} + +// load loads from the checkpoint file +func (cp *copyCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// update updates the parts status +func (cp *copyCheckpoint) update(part UploadPart) { + cp.CopyParts[part.PartNumber-1] = part + cp.PartStat[part.PartNumber-1] = true +} + +// dump dumps the CP to the file +func (cp *copyCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialization + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// todoParts returns unfinished parts +func (cp copyCheckpoint) todoParts() []copyPart { + dps := []copyPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// getCompletedBytes returns finished bytes count +func (cp copyCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// prepare initializes the multipart upload +func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string, + partSize int64, options []Option) error { + // CP + cp.Magic = copyCpMagic + cp.SrcBucketName = srcBucket.BucketName + cp.SrcObjectKey = srcObjectKey + cp.DestBucketName = destBucket.BucketName + cp.DestObjectKey = destObjectKey + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + // Parts + cp.Parts = getCopyParts(objectSize, partSize) + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + cp.CopyParts = make([]UploadPart, len(cp.Parts)) + + // Init copy + imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + cp.CopyID = imur.UploadID + + return nil +} + +func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { + imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName, + Key: cp.DestObjectKey, UploadID: cp.CopyID} + _, err := bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// copyFileWithCp is concurrently copy with checkpoint +func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, cpFilePath string, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := getProgressListener(options) + + payerOptions := []Option{} + payer := getPayer(options) + if payer != "" { + payerOptions = append(payerOptions, RequestPayer(PayerType(payer))) + } + + // Load CP data + ccp := copyCheckpoint{} + err = ccp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Make sure the object is not updated. + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, payerOptions...) + if err != nil { + return err + } + + // Load error or the CP data is invalid---reinitialize + valid, err := ccp.isValid(meta) + if err != nil || !valid { + if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // Unfinished parts + parts := ccp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: destBucketName, + Key: destObjectKey, + UploadID: ccp.CopyID} + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ccp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size) + publishProgress(listener, event) + + // Start the worker coroutines + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, payerOptions, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // Start the scheduler + go copyScheduler(jobs, parts) + + // Wait for the parts completed. + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + ccp.update(part) + ccp.dump(cpFilePath) + completedBytes += (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size) + publishProgress(listener, event) + + return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, payerOptions) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go new file mode 100644 index 000000000000..b5a3a05b5317 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go @@ -0,0 +1,290 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" + "net/url" + "os" + "sort" + "strconv" +) + +// InitiateMultipartUpload initializes multipart upload +// +// objectKey object name +// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, Meta, check out the following link: +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html +// +// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) { + var imur InitiateMultipartUploadResult + opts := addContentType(options, objectKey) + params := map[string]interface{}{} + params["uploads"] = nil + resp, err := bucket.do("POST", objectKey, params, opts, nil, nil) + if err != nil { + return imur, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &imur) + return imur, err +} + +// UploadPart uploads parts +// +// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts. +// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file. +// And thus with the same part number and upload Id, another part upload will overwrite the data. +// Except the last one, minimal part size is 100KB. There's no limit on the last part size. +// +// imur the returned value of InitiateMultipartUpload. +// reader io.Reader the reader for the part's data. +// size the part size. +// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error. +// +// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader, + partSize int64, partNumber int, options ...Option) (UploadPart, error) { + request := &UploadPartRequest{ + InitResult: &imur, + Reader: reader, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// UploadPartFromFile uploads part from the file. +// +// imur the return value of a successful InitiateMultipartUpload. +// filePath the local file path to upload. +// startPosition the start position in the local file. +// partSize the part size. +// partNumber the part number (from 1 to 10,000) +// +// UploadPart the return value consists of PartNumber and ETag. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var part = UploadPart{} + fd, err := os.Open(filePath) + if err != nil { + return part, err + } + defer fd.Close() + fd.Seek(startPosition, os.SEEK_SET) + + request := &UploadPartRequest{ + InitResult: &imur, + Reader: fd, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// DoUploadPart does the actual part upload. +// +// request part upload request +// +// UploadPartResult the result of uploading part. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) { + listener := getProgressListener(options) + options = append(options, ContentLength(request.PartSize)) + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(request.PartNumber) + params["uploadId"] = request.InitResult.UploadID + resp, err := bucket.do("PUT", request.InitResult.Key, params, options, + &io.LimitedReader{R: request.Reader, N: request.PartSize}, listener) + if err != nil { + return &UploadPartResult{}, err + } + defer resp.Body.Close() + + part := UploadPart{ + ETag: resp.Headers.Get(HTTPHeaderEtag), + PartNumber: request.PartNumber, + } + + if bucket.getConfig().IsEnableCRC { + err = checkCRC(resp, "DoUploadPart") + if err != nil { + return &UploadPartResult{part}, err + } + } + + return &UploadPartResult{part}, nil +} + +// UploadPartCopy uploads part copy +// +// imur the return value of InitiateMultipartUpload +// copySrc source Object name +// startPosition the part's start index in the source file +// partSize the part size +// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error. +// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error. +// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html +// +// UploadPart the return value consists of PartNumber and ETag. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var out UploadPartCopyResult + var part UploadPart + + opts := []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)), + CopySourceRange(startPosition, partSize)} + opts = append(opts, options...) + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(partNumber) + params["uploadId"] = imur.UploadID + resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil) + if err != nil { + return part, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return part, err + } + part.ETag = out.ETag + part.PartNumber = partNumber + + return part, nil +} + +// CompleteMultipartUpload completes the multipart upload. +// +// imur the return value of InitiateMultipartUpload. +// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy. +// +// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult, + parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) { + var out CompleteMultipartUploadResult + + sort.Sort(uploadParts(parts)) + cxml := completeMultipartUploadXML{} + cxml.Part = parts + bs, err := xml.Marshal(cxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// AbortMultipartUpload aborts the multipart upload. +// +// imur the return value of InitiateMultipartUpload. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error { + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// ListUploadedParts lists the uploaded parts. +// +// imur the return value of InitiateMultipartUpload. +// +// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) { + var out ListUploadedPartsResult + options = append(options, EncodingType("url")) + + params := map[string]interface{}{} + params, err := getRawParams(options) + if err != nil { + return out, err + } + + params["uploadId"] = imur.UploadID + resp, err := bucket.do("GET", imur.Key, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListUploadedPartsResult(&out) + return out, err +} + +// ListMultipartUploads lists all ongoing multipart upload tasks +// +// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order; +// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys. +// +// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) { + var out ListMultipartUploadResult + + options = append(options, EncodingType("url")) + params, err := getRawParams(options) + if err != nil { + return out, err + } + params["uploads"] = nil + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListMultipartUploadResult(&out) + return out, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go new file mode 100644 index 000000000000..5952f8ae3b24 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go @@ -0,0 +1,433 @@ +package oss + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "time" +) + +type optionType string + +const ( + optionParam optionType = "HTTPParameter" // URL parameter + optionHTTP optionType = "HTTPHeader" // HTTP header + optionArg optionType = "FuncArgument" // Function argument +) + +const ( + deleteObjectsQuiet = "delete-objects-quiet" + routineNum = "x-routine-num" + checkpointConfig = "x-cp-config" + initCRC64 = "init-crc64" + progressListener = "x-progress-listener" + storageClass = "storage-class" +) + +type ( + optionValue struct { + Value interface{} + Type optionType + } + + // Option HTTP option + Option func(map[string]optionValue) error +) + +// ACL is an option to set X-Oss-Acl header +func ACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssACL, string(acl)) +} + +// ContentType is an option to set Content-Type header +func ContentType(value string) Option { + return setHeader(HTTPHeaderContentType, value) +} + +// ContentLength is an option to set Content-Length header +func ContentLength(length int64) Option { + return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10)) +} + +// CacheControl is an option to set Cache-Control header +func CacheControl(value string) Option { + return setHeader(HTTPHeaderCacheControl, value) +} + +// ContentDisposition is an option to set Content-Disposition header +func ContentDisposition(value string) Option { + return setHeader(HTTPHeaderContentDisposition, value) +} + +// ContentEncoding is an option to set Content-Encoding header +func ContentEncoding(value string) Option { + return setHeader(HTTPHeaderContentEncoding, value) +} + +// ContentLanguage is an option to set Content-Language header +func ContentLanguage(value string) Option { + return setHeader(HTTPHeaderContentLanguage, value) +} + +// ContentMD5 is an option to set Content-MD5 header +func ContentMD5(value string) Option { + return setHeader(HTTPHeaderContentMD5, value) +} + +// Expires is an option to set Expires header +func Expires(t time.Time) Option { + return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat)) +} + +// Meta is an option to set Meta header +func Meta(key, value string) Option { + return setHeader(HTTPHeaderOssMetaPrefix+key, value) +} + +// Range is an option to set Range header, [start, end] +func Range(start, end int64) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end)) +} + +// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048 +func NormalizedRange(nr string) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr))) +} + +// AcceptEncoding is an option to set Accept-Encoding header +func AcceptEncoding(value string) Option { + return setHeader(HTTPHeaderAcceptEncoding, value) +} + +// IfModifiedSince is an option to set If-Modified-Since header +func IfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat)) +} + +// IfUnmodifiedSince is an option to set If-Unmodified-Since header +func IfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// IfMatch is an option to set If-Match header +func IfMatch(value string) Option { + return setHeader(HTTPHeaderIfMatch, value) +} + +// IfNoneMatch is an option to set IfNoneMatch header +func IfNoneMatch(value string) Option { + return setHeader(HTTPHeaderIfNoneMatch, value) +} + +// CopySource is an option to set X-Oss-Copy-Source header +func CopySource(sourceBucket, sourceObject string) Option { + return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject) +} + +// CopySourceRange is an option to set X-Oss-Copy-Source header +func CopySourceRange(startPosition, partSize int64) Option { + val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" + + strconv.FormatInt((startPosition+partSize-1), 10) + return setHeader(HTTPHeaderOssCopySourceRange, val) +} + +// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header +func CopySourceIfMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfMatch, value) +} + +// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header +func CopySourceIfNoneMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value) +} + +// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header +func CopySourceIfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat)) +} + +// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header +func CopySourceIfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// MetadataDirective is an option to set X-Oss-Metadata-Directive header +func MetadataDirective(directive MetadataDirectiveType) Option { + return setHeader(HTTPHeaderOssMetadataDirective, string(directive)) +} + +// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header +func ServerSideEncryption(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryption, value) +} + +// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header +func ServerSideEncryptionKeyID(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value) +} + +// ObjectACL is an option to set X-Oss-Object-Acl header +func ObjectACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssObjectACL, string(acl)) +} + +// symlinkTarget is an option to set X-Oss-Symlink-Target +func symlinkTarget(targetObjectKey string) Option { + return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey) +} + +// Origin is an option to set Origin header +func Origin(value string) Option { + return setHeader(HTTPHeaderOrigin, value) +} + +// ObjectStorageClass is an option to set the storage class of object +func ObjectStorageClass(storageClass StorageClassType) Option { + return setHeader(HTTPHeaderOssStorageClass, string(storageClass)) +} + +// Callback is an option to set callback values +func Callback(callback string) Option { + return setHeader(HTTPHeaderOssCallback, callback) +} + +// CallbackVar is an option to set callback user defined values +func CallbackVar(callbackVar string) Option { + return setHeader(HTTPHeaderOssCallbackVar, callbackVar) +} + +// RequestPayer is an option to set payer who pay for the request +func RequestPayer(payerType PayerType) Option { + return setHeader(HTTPHeaderOSSRequester, string(payerType)) +} + +// Delimiter is an option to set delimiler parameter +func Delimiter(value string) Option { + return addParam("delimiter", value) +} + +// Marker is an option to set marker parameter +func Marker(value string) Option { + return addParam("marker", value) +} + +// MaxKeys is an option to set maxkeys parameter +func MaxKeys(value int) Option { + return addParam("max-keys", strconv.Itoa(value)) +} + +// Prefix is an option to set prefix parameter +func Prefix(value string) Option { + return addParam("prefix", value) +} + +// EncodingType is an option to set encoding-type parameter +func EncodingType(value string) Option { + return addParam("encoding-type", value) +} + +// MaxUploads is an option to set max-uploads parameter +func MaxUploads(value int) Option { + return addParam("max-uploads", strconv.Itoa(value)) +} + +// KeyMarker is an option to set key-marker parameter +func KeyMarker(value string) Option { + return addParam("key-marker", value) +} + +// UploadIDMarker is an option to set upload-id-marker parameter +func UploadIDMarker(value string) Option { + return addParam("upload-id-marker", value) +} + +// MaxParts is an option to set max-parts parameter +func MaxParts(value int) Option { + return addParam("max-parts", strconv.Itoa(value)) +} + +// PartNumberMarker is an option to set part-number-marker parameter +func PartNumberMarker(value int) Option { + return addParam("part-number-marker", strconv.Itoa(value)) +} + +// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false. +func DeleteObjectsQuiet(isQuiet bool) Option { + return addArg(deleteObjectsQuiet, isQuiet) +} + +// StorageClass bucket storage class +func StorageClass(value StorageClassType) Option { + return addArg(storageClass, value) +} + +// Checkpoint configuration +type cpConfig struct { + IsEnable bool + FilePath string + DirPath string +} + +// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile. +func Checkpoint(isEnable bool, filePath string) Option { + return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath}) +} + +// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile. +func CheckpointDir(isEnable bool, dirPath string) Option { + return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath}) +} + +// Routines DownloadFile/UploadFile routine count +func Routines(n int) Option { + return addArg(routineNum, n) +} + +// InitCRC Init AppendObject CRC +func InitCRC(initCRC uint64) Option { + return addArg(initCRC64, initCRC) +} + +// Progress set progress listener +func Progress(listener ProgressListener) Option { + return addArg(progressListener, listener) +} + +// ResponseContentType is an option to set response-content-type param +func ResponseContentType(value string) Option { + return addParam("response-content-type", value) +} + +// ResponseContentLanguage is an option to set response-content-language param +func ResponseContentLanguage(value string) Option { + return addParam("response-content-language", value) +} + +// ResponseExpires is an option to set response-expires param +func ResponseExpires(value string) Option { + return addParam("response-expires", value) +} + +// ResponseCacheControl is an option to set response-cache-control param +func ResponseCacheControl(value string) Option { + return addParam("response-cache-control", value) +} + +// ResponseContentDisposition is an option to set response-content-disposition param +func ResponseContentDisposition(value string) Option { + return addParam("response-content-disposition", value) +} + +// ResponseContentEncoding is an option to set response-content-encoding param +func ResponseContentEncoding(value string) Option { + return addParam("response-content-encoding", value) +} + +// Process is an option to set x-oss-process param +func Process(value string) Option { + return addParam("x-oss-process", value) +} + +func setHeader(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionHTTP} + return nil + } +} + +func addParam(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionParam} + return nil + } +} + +func addArg(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionArg} + return nil + } +} + +func handleOptions(headers map[string]string, options []Option) error { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return err + } + } + } + + for k, v := range params { + if v.Type == optionHTTP { + headers[k] = v.Value.(string) + } + } + return nil +} + +func getRawParams(options []Option) (map[string]interface{}, error) { + // Option + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + paramsm := map[string]interface{}{} + // Serialize + for k, v := range params { + if v.Type == optionParam { + vs := params[k] + paramsm[k] = vs.Value.(string) + } + } + + return paramsm, nil +} + +func findOption(options []Option, param string, defaultVal interface{}) (interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + if val, ok := params[param]; ok { + return val.Value, nil + } + return defaultVal, nil +} + +func isOptionSet(options []Option, option string) (bool, interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return false, nil, err + } + } + } + + if val, ok := params[option]; ok { + return true, val.Value, nil + } + return false, nil, nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go new file mode 100644 index 000000000000..b38d803fe140 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go @@ -0,0 +1,112 @@ +package oss + +import "io" + +// ProgressEventType defines transfer progress event type +type ProgressEventType int + +const ( + // TransferStartedEvent transfer started, set TotalBytes + TransferStartedEvent ProgressEventType = 1 + iota + // TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes + TransferDataEvent + // TransferCompletedEvent transfer completed + TransferCompletedEvent + // TransferFailedEvent transfer encounters an error + TransferFailedEvent +) + +// ProgressEvent defines progress event +type ProgressEvent struct { + ConsumedBytes int64 + TotalBytes int64 + EventType ProgressEventType +} + +// ProgressListener listens progress change +type ProgressListener interface { + ProgressChanged(event *ProgressEvent) +} + +// -------------------- Private -------------------- + +func newProgressEvent(eventType ProgressEventType, consumed, total int64) *ProgressEvent { + return &ProgressEvent{ + ConsumedBytes: consumed, + TotalBytes: total, + EventType: eventType} +} + +// publishProgress +func publishProgress(listener ProgressListener, event *ProgressEvent) { + if listener != nil && event != nil { + listener.ProgressChanged(event) + } +} + +type readerTracker struct { + completedBytes int64 +} + +type teeReader struct { + reader io.Reader + writer io.Writer + listener ProgressListener + consumedBytes int64 + totalBytes int64 + tracker *readerTracker +} + +// TeeReader returns a Reader that writes to w what it reads from r. +// All reads from r performed through it are matched with +// corresponding writes to w. There is no internal buffering - +// the write must complete before the read completes. +// Any error encountered while writing is reported as a read error. +func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser { + return &teeReader{ + reader: reader, + writer: writer, + listener: listener, + consumedBytes: 0, + totalBytes: totalBytes, + tracker: tracker, + } +} + +func (t *teeReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + + // Read encountered error + if err != nil && err != io.EOF { + event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes) + publishProgress(t.listener, event) + } + + if n > 0 { + t.consumedBytes += int64(n) + // CRC + if t.writer != nil { + if n, err := t.writer.Write(p[:n]); err != nil { + return n, err + } + } + // Progress + if t.listener != nil { + event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes) + publishProgress(t.listener, event) + } + // Track + if t.tracker != nil { + t.tracker.completedBytes = t.consumedBytes + } + } + + return +} + +func (t *teeReader) Close() error { + if rc, ok := t.reader.(io.ReadCloser); ok { + return rc.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go new file mode 100644 index 000000000000..e6de4cdd269a --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go @@ -0,0 +1,26 @@ +// +build !go1.7 + +package oss + +import ( + "net" + "net/http" +) + +func newTransport(conn *Conn, config *Config) *http.Transport { + httpTimeOut := conn.config.HTTPTimeout + httpMaxConns := conn.config.HTTPMaxConns + // New Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + return transport +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go new file mode 100644 index 000000000000..006ea47a01cb --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go @@ -0,0 +1,28 @@ +// +build go1.7 + +package oss + +import ( + "net" + "net/http" +) + +func newTransport(conn *Conn, config *Config) *http.Transport { + httpTimeOut := conn.config.HTTPTimeout + httpMaxConns := conn.config.HTTPMaxConns + // New Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + MaxIdleConns: httpMaxConns.MaxIdleConns, + MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, + IdleConnTimeout: httpTimeOut.IdleConnTimeout, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + return transport +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go new file mode 100644 index 000000000000..794f28231660 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go @@ -0,0 +1,468 @@ +package oss + +import ( + "encoding/xml" + "net/url" + "time" +) + +// ListBucketsResult defines the result object from ListBuckets request +type ListBucketsResult struct { + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Prefix string `xml:"Prefix"` // The prefix in this query + Marker string `xml:"Marker"` // The marker filter + MaxKeys int `xml:"MaxKeys"` // The max entry count to return. This information is returned when IsTruncated is true. + IsTruncated bool `xml:"IsTruncated"` // Flag true means there's remaining buckets to return. + NextMarker string `xml:"NextMarker"` // The marker filter for the next list call + Owner Owner `xml:"Owner"` // The owner information + Buckets []BucketProperties `xml:"Buckets>Bucket"` // The bucket list +} + +// BucketProperties defines bucket properties +type BucketProperties struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket name + Location string `xml:"Location"` // Bucket datacenter + CreationDate time.Time `xml:"CreationDate"` // Bucket create time + StorageClass string `xml:"StorageClass"` // Bucket storage class +} + +// GetBucketACLResult defines GetBucketACL request's result +type GetBucketACLResult struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + ACL string `xml:"AccessControlList>Grant"` // Bucket ACL + Owner Owner `xml:"Owner"` // Bucket owner +} + +// LifecycleConfiguration is the Bucket Lifecycle configuration +type LifecycleConfiguration struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + Rules []LifecycleRule `xml:"Rule"` +} + +// LifecycleRule defines Lifecycle rules +type LifecycleRule struct { + XMLName xml.Name `xml:"Rule"` + ID string `xml:"ID"` // The rule ID + Prefix string `xml:"Prefix"` // The object key prefix + Status string `xml:"Status"` // The rule status (enabled or not) + Expiration LifecycleExpiration `xml:"Expiration"` // The expiration property +} + +// LifecycleExpiration defines the rule's expiration property +type LifecycleExpiration struct { + XMLName xml.Name `xml:"Expiration"` + Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time + Date time.Time `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date. +} + +type lifecycleXML struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + Rules []lifecycleRule `xml:"Rule"` +} + +type lifecycleRule struct { + XMLName xml.Name `xml:"Rule"` + ID string `xml:"ID"` + Prefix string `xml:"Prefix"` + Status string `xml:"Status"` + Expiration lifecycleExpiration `xml:"Expiration"` +} + +type lifecycleExpiration struct { + XMLName xml.Name `xml:"Expiration"` + Days int `xml:"Days,omitempty"` + Date string `xml:"Date,omitempty"` +} + +const expirationDateFormat = "2006-01-02T15:04:05.000Z" + +func convLifecycleRule(rules []LifecycleRule) []lifecycleRule { + rs := []lifecycleRule{} + for _, rule := range rules { + r := lifecycleRule{} + r.ID = rule.ID + r.Prefix = rule.Prefix + r.Status = rule.Status + if rule.Expiration.Date.IsZero() { + r.Expiration.Days = rule.Expiration.Days + } else { + r.Expiration.Date = rule.Expiration.Date.Format(expirationDateFormat) + } + rs = append(rs, r) + } + return rs +} + +// BuildLifecycleRuleByDays builds a lifecycle rule with specified expiration days +func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: LifecycleExpiration{Days: days}} +} + +// BuildLifecycleRuleByDate builds a lifecycle rule with specified expiration time. +func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: LifecycleExpiration{Date: date}} +} + +// GetBucketLifecycleResult defines GetBucketLifecycle's result object +type GetBucketLifecycleResult LifecycleConfiguration + +// RefererXML defines Referer configuration +type RefererXML struct { + XMLName xml.Name `xml:"RefererConfiguration"` + AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // Allow empty referrer + RefererList []string `xml:"RefererList>Referer"` // Referer whitelist +} + +// GetBucketRefererResult defines result object for GetBucketReferer request +type GetBucketRefererResult RefererXML + +// LoggingXML defines logging configuration +type LoggingXML struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` + LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // The logging configuration information +} + +type loggingXMLEmpty struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` +} + +// LoggingEnabled defines the logging configuration information +type LoggingEnabled struct { + XMLName xml.Name `xml:"LoggingEnabled"` + TargetBucket string `xml:"TargetBucket"` // The bucket name for storing the log files + TargetPrefix string `xml:"TargetPrefix"` // The log file prefix +} + +// GetBucketLoggingResult defines the result from GetBucketLogging request +type GetBucketLoggingResult LoggingXML + +// WebsiteXML defines Website configuration +type WebsiteXML struct { + XMLName xml.Name `xml:"WebsiteConfiguration"` + IndexDocument IndexDocument `xml:"IndexDocument"` // The index page + ErrorDocument ErrorDocument `xml:"ErrorDocument"` // The error page +} + +// IndexDocument defines the index page info +type IndexDocument struct { + XMLName xml.Name `xml:"IndexDocument"` + Suffix string `xml:"Suffix"` // The file name for the index page +} + +// ErrorDocument defines the 404 error page info +type ErrorDocument struct { + XMLName xml.Name `xml:"ErrorDocument"` + Key string `xml:"Key"` // 404 error file name +} + +// GetBucketWebsiteResult defines the result from GetBucketWebsite request. +type GetBucketWebsiteResult WebsiteXML + +// CORSXML defines CORS configuration +type CORSXML struct { + XMLName xml.Name `xml:"CORSConfiguration"` + CORSRules []CORSRule `xml:"CORSRule"` // CORS rules +} + +// CORSRule defines CORS rules +type CORSRule struct { + XMLName xml.Name `xml:"CORSRule"` + AllowedOrigin []string `xml:"AllowedOrigin"` // Allowed origins. By default it's wildcard '*' + AllowedMethod []string `xml:"AllowedMethod"` // Allowed methods + AllowedHeader []string `xml:"AllowedHeader"` // Allowed headers + ExposeHeader []string `xml:"ExposeHeader"` // Allowed response headers + MaxAgeSeconds int `xml:"MaxAgeSeconds"` // Max cache ages in seconds +} + +// GetBucketCORSResult defines the result from GetBucketCORS request. +type GetBucketCORSResult CORSXML + +// GetBucketInfoResult defines the result from GetBucketInfo request. +type GetBucketInfoResult struct { + XMLName xml.Name `xml:"BucketInfo"` + BucketInfo BucketInfo `xml:"Bucket"` +} + +// BucketInfo defines Bucket information +type BucketInfo struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket name + Location string `xml:"Location"` // Bucket datacenter + CreationDate time.Time `xml:"CreationDate"` // Bucket creation time + ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket external endpoint + IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket internal endpoint + ACL string `xml:"AccessControlList>Grant"` // Bucket ACL + Owner Owner `xml:"Owner"` // Bucket owner + StorageClass string `xml:"StorageClass"` // Bucket storage class +} + +// ListObjectsResult defines the result from ListObjects request +type ListObjectsResult struct { + XMLName xml.Name `xml:"ListBucketResult"` + Prefix string `xml:"Prefix"` // The object prefix + Marker string `xml:"Marker"` // The marker filter. + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextMarker string `xml:"NextMarker"` // The start point of the next query + Objects []ObjectProperties `xml:"Contents"` // Object list + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter +} + +// ObjectProperties defines Objecct properties +type ObjectProperties struct { + XMLName xml.Name `xml:"Contents"` + Key string `xml:"Key"` // Object key + Type string `xml:"Type"` // Object type + Size int64 `xml:"Size"` // Object size + ETag string `xml:"ETag"` // Object ETag + Owner Owner `xml:"Owner"` // Object owner information + LastModified time.Time `xml:"LastModified"` // Object last modified time + StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive) +} + +// Owner defines Bucket/Object's owner +type Owner struct { + XMLName xml.Name `xml:"Owner"` + ID string `xml:"ID"` // Owner ID + DisplayName string `xml:"DisplayName"` // Owner's display name +} + +// CopyObjectResult defines result object of CopyObject +type CopyObjectResult struct { + XMLName xml.Name `xml:"CopyObjectResult"` + LastModified time.Time `xml:"LastModified"` // New object's last modified time. + ETag string `xml:"ETag"` // New object's ETag +} + +// GetObjectACLResult defines result of GetObjectACL request +type GetObjectACLResult GetBucketACLResult + +type deleteXML struct { + XMLName xml.Name `xml:"Delete"` + Objects []DeleteObject `xml:"Object"` // Objects to delete + Quiet bool `xml:"Quiet"` // Flag of quiet mode. +} + +// DeleteObject defines the struct for deleting object +type DeleteObject struct { + XMLName xml.Name `xml:"Object"` + Key string `xml:"Key"` // Object name +} + +// DeleteObjectsResult defines result of DeleteObjects request +type DeleteObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []string `xml:"Deleted>Key"` // Deleted object list +} + +// InitiateMultipartUploadResult defines result of InitiateMultipartUpload request +type InitiateMultipartUploadResult struct { + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string `xml:"Bucket"` // Bucket name + Key string `xml:"Key"` // Object name to upload + UploadID string `xml:"UploadId"` // Generated UploadId +} + +// UploadPart defines the upload/copy part +type UploadPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part number + ETag string `xml:"ETag"` // ETag value of the part's data +} + +type uploadParts []UploadPart + +func (slice uploadParts) Len() int { + return len(slice) +} + +func (slice uploadParts) Less(i, j int) bool { + return slice[i].PartNumber < slice[j].PartNumber +} + +func (slice uploadParts) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +// UploadPartCopyResult defines result object of multipart copy request. +type UploadPartCopyResult struct { + XMLName xml.Name `xml:"CopyPartResult"` + LastModified time.Time `xml:"LastModified"` // Last modified time + ETag string `xml:"ETag"` // ETag +} + +type completeMultipartUploadXML struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Part []UploadPart `xml:"Part"` +} + +// CompleteMultipartUploadResult defines result object of CompleteMultipartUploadRequest +type CompleteMultipartUploadResult struct { + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string `xml:"Location"` // Object URL + Bucket string `xml:"Bucket"` // Bucket name + ETag string `xml:"ETag"` // Object ETag + Key string `xml:"Key"` // Object name +} + +// ListUploadedPartsResult defines result object of ListUploadedParts +type ListUploadedPartsResult struct { + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string `xml:"Bucket"` // Bucket name + Key string `xml:"Key"` // Object name + UploadID string `xml:"UploadId"` // Upload ID + NextPartNumberMarker string `xml:"NextPartNumberMarker"` // Next part number + MaxParts int `xml:"MaxParts"` // Max parts count + IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries returned.false: all entries returned. + UploadedParts []UploadedPart `xml:"Part"` // Uploaded parts +} + +// UploadedPart defines uploaded part +type UploadedPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part number + LastModified time.Time `xml:"LastModified"` // Last modified time + ETag string `xml:"ETag"` // ETag cache + Size int `xml:"Size"` // Part size +} + +// ListMultipartUploadResult defines result object of ListMultipartUpload +type ListMultipartUploadResult struct { + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` // Bucket name + Delimiter string `xml:"Delimiter"` // Delimiter for grouping object. + Prefix string `xml:"Prefix"` // Object prefix + KeyMarker string `xml:"KeyMarker"` // Object key marker + UploadIDMarker string `xml:"UploadIdMarker"` // UploadId marker + NextKeyMarker string `xml:"NextKeyMarker"` // Next key marker, if not all entries returned. + NextUploadIDMarker string `xml:"NextUploadIdMarker"` // Next uploadId marker, if not all entries returned. + MaxUploads int `xml:"MaxUploads"` // Max uploads to return + IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries are returned. + Uploads []UncompletedUpload `xml:"Upload"` // Ongoing uploads (not completed, not aborted) + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // Common prefixes list. +} + +// UncompletedUpload structure wraps an uncompleted upload task +type UncompletedUpload struct { + XMLName xml.Name `xml:"Upload"` + Key string `xml:"Key"` // Object name + UploadID string `xml:"UploadId"` // The UploadId + Initiated time.Time `xml:"Initiated"` // Initialization time in the format such as 2012-02-23T04:18:23.000Z +} + +// ProcessObjectResult defines result object of ProcessObject +type ProcessObjectResult struct { + Bucket string `json:"bucket"` + FileSize int `json:"fileSize"` + Object string `json:"object"` + Status string `json:"status"` +} + +// decodeDeleteObjectsResult decodes deleting objects result in URL encoding +func decodeDeleteObjectsResult(result *DeleteObjectsResult) error { + var err error + for i := 0; i < len(result.DeletedObjects); i++ { + result.DeletedObjects[i], err = url.QueryUnescape(result.DeletedObjects[i]) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectsResult decodes list objects result in URL encoding +func decodeListObjectsResult(result *ListObjectsResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Marker, err = url.QueryUnescape(result.Marker) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.NextMarker, err = url.QueryUnescape(result.NextMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Objects); i++ { + result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// decodeListUploadedPartsResult decodes +func decodeListUploadedPartsResult(result *ListUploadedPartsResult) error { + var err error + result.Key, err = url.QueryUnescape(result.Key) + if err != nil { + return err + } + return nil +} + +// decodeListMultipartUploadResult decodes list multipart upload result in URL encoding +func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) + if err != nil { + return err + } + result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Uploads); i++ { + result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// createBucketConfiguration defines the configuration for creating a bucket. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + StorageClass StorageClassType `xml:"StorageClass,omitempty"` +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go new file mode 100644 index 000000000000..80371447d956 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go @@ -0,0 +1,526 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +// UploadFile is multipart file upload. +// +// objectKey the object name. +// filePath the local file path to upload. +// partSize the part size in byte. +// options the options for uploading object. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (100KB, 5GB]") + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey) + if cpFilePath != "" { + return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines) + } + } + + return bucket.uploadFile(objectKey, filePath, partSize, options, routines) +} + +func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) + absPath, _ := filepath.Abs(srcFile) + cpFileName := getCpFileName(absPath, dest) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// ----- concurrent upload without checkpoint ----- + +// getCpConfig gets checkpoint configuration +func getCpConfig(options []Option) *cpConfig { + cpcOpt, err := findOption(options, checkpointConfig, nil) + if err != nil || cpcOpt == nil { + return nil + } + + return cpcOpt.(*cpConfig) +} + +// getCpFileName return the name of the checkpoint file +func getCpFileName(src, dest string) string { + md5Ctx := md5.New() + md5Ctx.Write([]byte(src)) + srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + + md5Ctx.Reset() + md5Ctx.Write([]byte(dest)) + destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + + return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum) +} + +// getRoutines gets the routine count. by default it's 1. +func getRoutines(options []Option) int { + rtnOpt, err := findOption(options, routineNum, nil) + if err != nil || rtnOpt == nil { + return 1 + } + + rs := rtnOpt.(int) + if rs < 1 { + rs = 1 + } else if rs > 100 { + rs = 100 + } + + return rs +} + +// getPayer return the payer of the request +func getPayer(options []Option) string { + payerOpt, err := findOption(options, HTTPHeaderOSSRequester, nil) + if err != nil || payerOpt == nil { + return "" + } + + return payerOpt.(string) +} + +// getProgressListener gets the progress callback +func getProgressListener(options []Option) ProgressListener { + isSet, listener, _ := isOptionSet(options, progressListener) + if !isSet { + return nil + } + return listener.(ProgressListener) +} + +// uploadPartHook is for testing usage +type uploadPartHook func(id int, chunk FileChunk) error + +var uploadPartHooker uploadPartHook = defaultUploadPart + +func defaultUploadPart(id int, chunk FileChunk) error { + return nil +} + +// workerArg defines worker argument structure +type workerArg struct { + bucket *Bucket + filePath string + imur InitiateMultipartUploadResult + options []Option + hook uploadPartHook +} + +// worker is the worker coroutine function +func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(id, chunk); err != nil { + failed <- err + break + } + part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, arg.options...) + if err != nil { + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// scheduler function +func scheduler(jobs chan FileChunk, chunks []FileChunk) { + for _, chunk := range chunks { + jobs <- chunk + } + close(jobs) +} + +func getTotalBytes(chunks []FileChunk) int64 { + var tb int64 + for _, chunk := range chunks { + tb += chunk.Size + } + return tb +} + +// uploadFile is a concurrent upload, without checkpoint +func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error { + listener := getProgressListener(options) + + chunks, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + payerOptions := []Option{} + payer := getPayer(options) + if payer != "" { + payerOptions = append(payerOptions, RequestPayer(PayerType(payer))) + } + + // Initialize the multipart upload + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getTotalBytes(chunks) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes) + publishProgress(listener, event) + + // Start the worker coroutine + arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // Schedule the jobs + go scheduler(jobs, chunks) + + // Waiting for the upload finished + completed := 0 + parts := make([]UploadPart, len(chunks)) + for completed < len(chunks) { + select { + case part := <-results: + completed++ + parts[part.PartNumber-1] = part + completedBytes += chunks[part.PartNumber-1].Size + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + bucket.AbortMultipartUpload(imur, payerOptions...) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes) + publishProgress(listener, event) + + // Complete the multpart upload + _, err = bucket.CompleteMultipartUpload(imur, parts, payerOptions...) + if err != nil { + bucket.AbortMultipartUpload(imur, payerOptions...) + return err + } + return nil +} + +// ----- concurrent upload with checkpoint ----- +const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62" + +type uploadCheckpoint struct { + Magic string // Magic + MD5 string // Checkpoint file content's MD5 + FilePath string // Local file path + FileStat cpStat // File state + ObjectKey string // Key + UploadID string // Upload ID + Parts []cpPart // All parts of the local file +} + +type cpStat struct { + Size int64 // File size + LastModified time.Time // File's last modified time + MD5 string // Local file's MD5 +} + +type cpPart struct { + Chunk FileChunk // File chunk + Part UploadPart // Uploaded part + IsCompleted bool // Upload complete flag +} + +// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid. +func (cp uploadCheckpoint) isValid(filePath string) (bool, error) { + // Compare the CP's magic number and MD5. + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != uploadCpMagic || b64 != cp.MD5 { + return false, nil + } + + // Make sure if the local file is updated. + fd, err := os.Open(filePath) + if err != nil { + return false, err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return false, err + } + + md, err := calcFileMD5(filePath) + if err != nil { + return false, err + } + + // Compare the file size, file's last modified time and file's MD5 + if cp.FileStat.Size != st.Size() || + cp.FileStat.LastModified != st.ModTime() || + cp.FileStat.MD5 != md { + return false, nil + } + + return true, nil +} + +// load loads from the file +func (cp *uploadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump dumps to the local file +func (cp *uploadCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialization + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// updatePart updates the part status +func (cp *uploadCheckpoint) updatePart(part UploadPart) { + cp.Parts[part.PartNumber-1].Part = part + cp.Parts[part.PartNumber-1].IsCompleted = true +} + +// todoParts returns unfinished parts +func (cp *uploadCheckpoint) todoParts() []FileChunk { + fcs := []FileChunk{} + for _, part := range cp.Parts { + if !part.IsCompleted { + fcs = append(fcs, part.Chunk) + } + } + return fcs +} + +// allParts returns all parts +func (cp *uploadCheckpoint) allParts() []UploadPart { + ps := []UploadPart{} + for _, part := range cp.Parts { + ps = append(ps, part.Part) + } + return ps +} + +// getCompletedBytes returns completed bytes count +func (cp *uploadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for _, part := range cp.Parts { + if part.IsCompleted { + completedBytes += part.Chunk.Size + } + } + return completedBytes +} + +// calcFileMD5 calculates the MD5 for the specified local file +func calcFileMD5(filePath string) (string, error) { + return "", nil +} + +// prepare initializes the multipart upload +func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error { + // CP + cp.Magic = uploadCpMagic + cp.FilePath = filePath + cp.ObjectKey = objectKey + + // Local file + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return err + } + cp.FileStat.Size = st.Size() + cp.FileStat.LastModified = st.ModTime() + md, err := calcFileMD5(filePath) + if err != nil { + return err + } + cp.FileStat.MD5 = md + + // Chunks + parts, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + cp.Parts = make([]cpPart, len(parts)) + for i, part := range parts { + cp.Parts[i].Chunk = part + cp.Parts[i].IsCompleted = false + } + + // Init load + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + cp.UploadID = imur.UploadID + + return nil +} + +// complete completes the multipart upload and deletes the local CP files +func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { + imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName, + Key: cp.ObjectKey, UploadID: cp.UploadID} + _, err := bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// uploadFileWithCp handles concurrent upload with checkpoint +func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error { + listener := getProgressListener(options) + + payerOptions := []Option{} + payer := getPayer(options) + if payer != "" { + payerOptions = append(payerOptions, RequestPayer(PayerType(payer))) + } + + // Load CP data + ucp := uploadCheckpoint{} + err := ucp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Load error or the CP data is invalid. + valid, err := ucp.isValid(filePath) + if err != nil || !valid { + if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + chunks := ucp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: bucket.BucketName, + Key: objectKey, + UploadID: ucp.UploadID} + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ucp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size) + publishProgress(listener, event) + + // Start the workers + arg := workerArg{&bucket, filePath, imur, payerOptions, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // Schedule jobs + go scheduler(jobs, chunks) + + // Waiting for the job finished + completed := 0 + for completed < len(chunks) { + select { + case part := <-results: + completed++ + ucp.updatePart(part) + ucp.dump(cpFilePath) + completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size + event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size) + publishProgress(listener, event) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size) + publishProgress(listener, event) + + // Complete the multipart upload + err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, payerOptions) + return err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go new file mode 100644 index 000000000000..c0e7b2b1b18c --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go @@ -0,0 +1,265 @@ +package oss + +import ( + "bytes" + "errors" + "fmt" + "hash/crc64" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "time" +) + +// userAgent gets user agent +// It has the SDK version information, OS information and GO version +func userAgent() string { + sys := getSysInfo() + return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name, + sys.release, sys.machine, runtime.Version()) +} + +type sysInfo struct { + name string // OS name such as windows/Linux + release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc + machine string // CPU type amd64/x86_64 +} + +// getSysInfo gets system info +// gets the OS information and CPU type +func getSysInfo() sysInfo { + name := runtime.GOOS + release := "-" + machine := runtime.GOARCH + if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil { + name = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil { + release = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil { + machine = string(bytes.TrimSpace(out)) + } + return sysInfo{name: name, release: release, machine: machine} +} + +// unpackedRange +type unpackedRange struct { + hasStart bool // Flag indicates if the start point is specified + hasEnd bool // Flag indicates if the end point is specified + start int64 // Start point + end int64 // End point +} + +// invalidRangeError returns invalid range error +func invalidRangeError(r string) error { + return fmt.Errorf("InvalidRange %s", r) +} + +// parseRange parse various styles of range such as bytes=M-N +func parseRange(normalizedRange string) (*unpackedRange, error) { + var err error + hasStart := false + hasEnd := false + var start int64 + var end int64 + + // Bytes==M-N or ranges=M-N + nrSlice := strings.Split(normalizedRange, "=") + if len(nrSlice) != 2 || nrSlice[0] != "bytes" { + return nil, invalidRangeError(normalizedRange) + } + + // Bytes=M-N,X-Y + rSlice := strings.Split(nrSlice[1], ",") + rStr := rSlice[0] + + if strings.HasSuffix(rStr, "-") { // M- + startStr := rStr[:len(rStr)-1] + start, err = strconv.ParseInt(startStr, 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + hasStart = true + } else if strings.HasPrefix(rStr, "-") { // -N + len := rStr[1:] + end, err = strconv.ParseInt(len, 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + if end == 0 { // -0 + return nil, invalidRangeError(normalizedRange) + } + hasEnd = true + } else { // M-N + valSlice := strings.Split(rStr, "-") + if len(valSlice) != 2 { + return nil, invalidRangeError(normalizedRange) + } + start, err = strconv.ParseInt(valSlice[0], 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + hasStart = true + end, err = strconv.ParseInt(valSlice[1], 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + hasEnd = true + } + + return &unpackedRange{hasStart, hasEnd, start, end}, nil +} + +// adjustRange returns adjusted range, adjust the range according to the length of the file +func adjustRange(ur *unpackedRange, size int64) (start, end int64) { + if ur == nil { + return 0, size + } + + if ur.hasStart && ur.hasEnd { + start = ur.start + end = ur.end + 1 + if ur.start < 0 || ur.start >= size || ur.end > size || ur.start > ur.end { + start = 0 + end = size + } + } else if ur.hasStart { + start = ur.start + end = size + if ur.start < 0 || ur.start >= size { + start = 0 + } + } else if ur.hasEnd { + start = size - ur.end + end = size + if ur.end < 0 || ur.end > size { + start = 0 + end = size + } + } + return +} + +// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC. +// gets the current time in Unix time, in seconds. +func GetNowSec() int64 { + return time.Now().Unix() +} + +// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. The result is undefined if the Unix time +// in nanoseconds cannot be represented by an int64. Note that this +// means the result of calling UnixNano on the zero Time is undefined. +// gets the current time in Unix time, in nanoseconds. +func GetNowNanoSec() int64 { + return time.Now().UnixNano() +} + +// GetNowGMT gets the current time in GMT format. +func GetNowGMT() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +// FileChunk is the file chunk definition +type FileChunk struct { + Number int // Chunk number + Offset int64 // Chunk offset + Size int64 // Chunk size. +} + +// SplitFileByPartNum splits big file into parts by the num of parts. +// Split the file with specified parts count, returns the split result when error is nil. +func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) { + if chunkNum <= 0 || chunkNum > 10000 { + return nil, errors.New("chunkNum invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + + if int64(chunkNum) > stat.Size() { + return nil, errors.New("oss: chunkNum invalid") + } + + var chunks []FileChunk + var chunk = FileChunk{} + var chunkN = (int64)(chunkNum) + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * (stat.Size() / chunkN) + if i == chunkN-1 { + chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN + } else { + chunk.Size = stat.Size() / chunkN + } + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// SplitFileByPartSize splits big file into parts by the size of parts. +// Splits the file by the part size. Returns the FileChunk when error is nil. +func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) { + if chunkSize <= 0 { + return nil, errors.New("chunkSize invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + var chunkN = stat.Size() / chunkSize + if chunkN >= 10000 { + return nil, errors.New("Too many parts, please increase part size") + } + + var chunks []FileChunk + var chunk = FileChunk{} + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * chunkSize + chunk.Size = chunkSize + chunks = append(chunks, chunk) + } + + if stat.Size()%chunkSize > 0 { + chunk.Number = len(chunks) + 1 + chunk.Offset = int64(len(chunks)) * chunkSize + chunk.Size = stat.Size() % chunkSize + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// GetPartEnd calculates the end position +func GetPartEnd(begin int64, total int64, per int64) int64 { + if begin+per > total { + return total - 1 + } + return begin + per - 1 +} + +// crcTable returns the table constructed from the specified polynomial +var crcTable = func() *crc64.Table { + return crc64.MakeTable(crc64.ECMA) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/LICENSE b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/LICENSE new file mode 100644 index 000000000000..b463b2914cd9 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2017 Alibaba Group Holding Ltd. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/api.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/api.go new file mode 100644 index 000000000000..e807e3072a02 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/api.go @@ -0,0 +1,1240 @@ +package tablestore + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "fmt" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" + "math/rand" + "net" + "net/http" + "time" + "io" + "strings" +) + +const ( + userAgent = "aliyun-tablestore-sdk-golang/4.0.2" + + createTableUri = "/CreateTable" + listTableUri = "/ListTable" + deleteTableUri = "/DeleteTable" + describeTableUri = "/DescribeTable" + updateTableUri = "/UpdateTable" + putRowUri = "/PutRow" + deleteRowUri = "/DeleteRow" + getRowUri = "/GetRow" + updateRowUri = "/UpdateRow" + batchGetRowUri = "/BatchGetRow" + batchWriteRowUri = "/BatchWriteRow" + getRangeUri = "/GetRange" + listStreamUri = "/ListStream" + describeStreamUri = "/DescribeStream" + getShardIteratorUri = "/GetShardIterator" + getStreamRecordUri = "/GetStreamRecord" + computeSplitPointsBySizeRequestUri = "/ComputeSplitPointsBySize" + searchUri = "/Search" + createSearchIndexUri = "/CreateSearchIndex" + listSearchIndexUri = "/ListSearchIndex" + deleteSearchIndexUri = "/DeleteSearchIndex" + describeSearchIndexUri = "/DescribeSearchIndex" + + createIndexUri = "/CreateIndex" + dropIndexUri = "/DropIndex" + + createlocaltransactionuri = "/StartLocalTransaction" + committransactionuri = "/CommitTransaction" + aborttransactionuri = "/AbortTransaction" +) + +// Constructor: to create the client of TableStore service. +// 构造函数:创建表格存储服务的客户端。 +// +// @param endPoint The address of TableStore service. 表格存储服务地址。 +// @param instanceName +// @param accessId The Access ID. 用于标示用户的ID。 +// @param accessKey The Access Key. 用于签名和验证的密钥。 +// @param options set client config +func NewClient(endPoint, instanceName, accessKeyId, accessKeySecret string, options ...ClientOption) *TableStoreClient { + client := NewClientWithConfig(endPoint, instanceName, accessKeyId, accessKeySecret, "", nil) + // client options parse + for _, option := range options { + option(client) + } + + return client +} + +type GetHttpClient func() IHttpClient + +var currentGetHttpClientFunc GetHttpClient = func() IHttpClient { + return &TableStoreHttpClient{} +} + +// Constructor: to create the client of OTS service. 传入config +// 构造函数:创建OTS服务的客户端。 +func NewClientWithConfig(endPoint, instanceName, accessKeyId, accessKeySecret string, securityToken string, config *TableStoreConfig) *TableStoreClient { + tableStoreClient := new(TableStoreClient) + tableStoreClient.endPoint = endPoint + tableStoreClient.instanceName = instanceName + tableStoreClient.accessKeyId = accessKeyId + tableStoreClient.accessKeySecret = accessKeySecret + tableStoreClient.securityToken = securityToken + if config == nil { + config = NewDefaultTableStoreConfig() + } + tableStoreClient.config = config + tableStoreTransportProxy := &http.Transport{ + MaxIdleConnsPerHost: config.MaxIdleConnections, + Dial: (&net.Dialer{ + Timeout: config.HTTPTimeout.ConnectionTimeout, + }).Dial, + } + + tableStoreClient.httpClient = currentGetHttpClientFunc() + + httpClient := &http.Client{ + Transport: tableStoreTransportProxy, + Timeout: tableStoreClient.config.HTTPTimeout.RequestTimeout, + } + tableStoreClient.httpClient.New(httpClient) + + tableStoreClient.random = rand.New(rand.NewSource(time.Now().Unix())) + + return tableStoreClient +} + +// 请求服务端 +func (tableStoreClient *TableStoreClient) doRequestWithRetry(uri string, req, resp proto.Message, responseInfo *ResponseInfo) error { + end := time.Now().Add(tableStoreClient.config.MaxRetryTime) + url := fmt.Sprintf("%s%s", tableStoreClient.endPoint, uri) + /* request body */ + var body []byte + var err error + if req != nil { + body, err = proto.Marshal(req) + if err != nil { + return err + } + } else { + body = nil + } + + var value int64 + var i uint + var respBody []byte + var requestId string + for i = 0; ; i++ { + respBody, err, requestId = tableStoreClient.doRequest(url, uri, body, resp) + responseInfo.RequestId = requestId + + if err == nil { + break + } else { + value = getNextPause(tableStoreClient, err, i, end, value, uri) + + // fmt.Println("hit retry", uri, err, *e.Code, value) + if value <= 0 { + return err + } + + time.Sleep(time.Duration(value) * time.Millisecond) + } + } + + if respBody == nil || len(respBody) == 0 { + return nil + } + + err = proto.Unmarshal(respBody, resp) + if err != nil { + return fmt.Errorf("decode resp failed: %s", err) + } + + return nil +} + +func getNextPause(tableStoreClient *TableStoreClient, err error, count uint, end time.Time, lastInterval int64, action string) int64 { + if tableStoreClient.config.RetryTimes <= count || time.Now().After(end) { + return 0 + } + var retry bool + if otsErr, ok := err.(*OtsError); ok { + retry = shouldRetry(otsErr.Code, otsErr.Message, action) + } else { + if err == io.EOF || err == io.ErrUnexpectedEOF || //retry on special net error contains EOF or reset + strings.Contains(err.Error(), io.EOF.Error()) || + strings.Contains(err.Error(), "Connection reset by peer") || + strings.Contains(err.Error(), "connection reset by peer") { + retry = true + } else if nErr, ok := err.(net.Error); ok { + retry = nErr.Temporary() + } + } + + if retry { + value := lastInterval*2 + tableStoreClient.random.Int63n(DefaultRetryInterval-1) + 1 + if value > MaxRetryInterval { + value = MaxRetryInterval + } + + return value + } + return 0 +} + +func shouldRetry(errorCode string, errorMsg string, action string) bool { + if retryNotMatterActions(errorCode, errorMsg) == true { + return true + } + + if isIdempotent(action) && + (errorCode == STORAGE_TIMEOUT || errorCode == INTERNAL_SERVER_ERROR || errorCode == SERVER_UNAVAILABLE) { + return true + } + return false +} + +func retryNotMatterActions(errorCode string, errorMsg string) bool { + if errorCode == ROW_OPERATION_CONFLICT || errorCode == NOT_ENOUGH_CAPACITY_UNIT || + errorCode == TABLE_NOT_READY || errorCode == PARTITION_UNAVAILABLE || + errorCode == SERVER_BUSY || errorCode == STORAGE_SERVER_BUSY || (errorCode == QUOTA_EXHAUSTED && errorMsg == "Too frequent table operations.") { + return true + } else { + return false + } +} + +func isIdempotent(action string) bool { + if action == batchGetRowUri || action == describeTableUri || + action == getRangeUri || action == getRowUri || + action == listTableUri || action == listStreamUri || + action == getStreamRecordUri || action == describeStreamUri { + return true + } else { + return false + } +} + +func (tableStoreClient *TableStoreClient) doRequest(url string, uri string, body []byte, resp proto.Message) ([]byte, error, string) { + hreq, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) + if err != nil { + return nil, err, "" + } + /* set headers */ + hreq.Header.Set("User-Agent", userAgent) + + date := time.Now().UTC().Format(xOtsDateFormat) + + hreq.Header.Set(xOtsDate, date) + hreq.Header.Set(xOtsApiversion, ApiVersion) + hreq.Header.Set(xOtsAccesskeyid, tableStoreClient.accessKeyId) + hreq.Header.Set(xOtsInstanceName, tableStoreClient.instanceName) + + md5Byte := md5.Sum(body) + md5Base64 := base64.StdEncoding.EncodeToString(md5Byte[:16]) + hreq.Header.Set(xOtsContentmd5, md5Base64) + + otshead := createOtsHeaders(tableStoreClient.accessKeySecret) + otshead.set(xOtsDate, date) + otshead.set(xOtsApiversion, ApiVersion) + otshead.set(xOtsAccesskeyid, tableStoreClient.accessKeyId) + if tableStoreClient.securityToken != "" { + hreq.Header.Set(xOtsHeaderStsToken, tableStoreClient.securityToken) + otshead.set(xOtsHeaderStsToken, tableStoreClient.securityToken) + } + otshead.set(xOtsContentmd5, md5Base64) + otshead.set(xOtsInstanceName, tableStoreClient.instanceName) + sign, err := otshead.signature(uri, "POST", tableStoreClient.accessKeySecret) + + if err != nil { + return nil, err, "" + } + hreq.Header.Set(xOtsSignature, sign) + + /* end set headers */ + return tableStoreClient.postReq(hreq, url) +} + +// table API +// Create a table with the CreateTableRequest, in which the table name and +// primary keys are required. +// 根据CreateTableRequest创建一个表,其中表名和主健列是必选项 +// +// @param request of CreateTableRequest. +// @return Void. 无返回值。 +func (tableStoreClient *TableStoreClient) CreateTable(request *CreateTableRequest) (*CreateTableResponse, error) { + if len(request.TableMeta.TableName) > maxTableNameLength { + return nil, errTableNameTooLong(request.TableMeta.TableName) + } + + if len(request.TableMeta.SchemaEntry) > maxPrimaryKeyNum { + return nil, errPrimaryKeyTooMuch + } + + if len(request.TableMeta.SchemaEntry) == 0 { + return nil, errCreateTableNoPrimaryKey + } + + req := new(otsprotocol.CreateTableRequest) + req.TableMeta = new(otsprotocol.TableMeta) + req.TableMeta.TableName = proto.String(request.TableMeta.TableName) + + if len(request.TableMeta.DefinedColumns) > 0 { + for _, value := range request.TableMeta.DefinedColumns { + req.TableMeta.DefinedColumn = append(req.TableMeta.DefinedColumn, &otsprotocol.DefinedColumnSchema{Name: &value.Name, Type: value.ColumnType.ConvertToPbDefinedColumnType().Enum() }) + } + } + + if len(request.IndexMetas) > 0 { + for _, value := range request.IndexMetas { + req.IndexMetas = append(req.IndexMetas, value.ConvertToPbIndexMeta()) + } + } + + for _, key := range request.TableMeta.SchemaEntry { + keyType := otsprotocol.PrimaryKeyType(*key.Type) + if key.Option != nil { + keyOption := otsprotocol.PrimaryKeyOption(*key.Option) + req.TableMeta.PrimaryKey = append(req.TableMeta.PrimaryKey, &otsprotocol.PrimaryKeySchema{Name: key.Name, Type: &keyType, Option: &keyOption}) + } else { + req.TableMeta.PrimaryKey = append(req.TableMeta.PrimaryKey, &otsprotocol.PrimaryKeySchema{Name: key.Name, Type: &keyType}) + } + } + + req.ReservedThroughput = new(otsprotocol.ReservedThroughput) + req.ReservedThroughput.CapacityUnit = new(otsprotocol.CapacityUnit) + req.ReservedThroughput.CapacityUnit.Read = proto.Int32(int32(request.ReservedThroughput.Readcap)) + req.ReservedThroughput.CapacityUnit.Write = proto.Int32(int32(request.ReservedThroughput.Writecap)) + + req.TableOptions = new(otsprotocol.TableOptions) + req.TableOptions.TimeToLive = proto.Int32(int32(request.TableOption.TimeToAlive)) + req.TableOptions.MaxVersions = proto.Int32(int32(request.TableOption.MaxVersion)) + + if request.StreamSpec != nil { + var ss otsprotocol.StreamSpecification + if request.StreamSpec.EnableStream { + ss = otsprotocol.StreamSpecification{ + EnableStream: &request.StreamSpec.EnableStream, + ExpirationTime: &request.StreamSpec.ExpirationTime} + } else { + ss = otsprotocol.StreamSpecification{ + EnableStream: &request.StreamSpec.EnableStream} + } + + req.StreamSpec = &ss + } + + resp := new(otsprotocol.CreateTableResponse) + response := &CreateTableResponse{} + if err := tableStoreClient.doRequestWithRetry(createTableUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + return response, nil +} + +func (tableStoreClient *TableStoreClient) CreateIndex(request *CreateIndexRequest) (*CreateIndexResponse, error) { + if len(request.MainTableName) > maxTableNameLength { + return nil, errTableNameTooLong(request.MainTableName) + } + + req := new(otsprotocol.CreateIndexRequest) + req.IndexMeta = request.IndexMeta.ConvertToPbIndexMeta() + req.IncludeBaseData = proto.Bool(request.IncludeBaseData) + req.MainTableName = proto.String(request.MainTableName) + + resp := new(otsprotocol.CreateIndexResponse) + response := &CreateIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(createIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + return response, nil +} + +func (tableStoreClient *TableStoreClient) DeleteIndex(request *DeleteIndexRequest) (*DeleteIndexResponse, error) { + if len(request.MainTableName) > maxTableNameLength { + return nil, errTableNameTooLong(request.MainTableName) + } + + req := new(otsprotocol.DropIndexRequest) + req.IndexName = proto.String(request.IndexName) + req.MainTableName = proto.String(request.MainTableName) + + resp := new(otsprotocol.DropIndexResponse) + response := &DeleteIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(dropIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + return response, nil +} + +// List all tables. If done, all table names will be returned. +// 列出所有的表,如果操作成功,将返回所有表的名称。 +// +// @param tableNames The returned table names. 返回的表名集合。 +// @return Void. 无返回值。 +func (tableStoreClient *TableStoreClient) ListTable() (*ListTableResponse, error) { + resp := new(otsprotocol.ListTableResponse) + response := &ListTableResponse{} + if err := tableStoreClient.doRequestWithRetry(listTableUri, nil, resp, &response.ResponseInfo); err != nil { + return response, err + } + + response.TableNames = resp.TableNames + return response, nil +} + +// Delete a table and all its views will be deleted. +// 删除一个表 +// +// @param tableName The table name. 表名。 +// @return Void. 无返回值。 +func (tableStoreClient *TableStoreClient) DeleteTable(request *DeleteTableRequest) (*DeleteTableResponse, error) { + req := new(otsprotocol.DeleteTableRequest) + req.TableName = proto.String(request.TableName) + + response := &DeleteTableResponse{} + if err := tableStoreClient.doRequestWithRetry(deleteTableUri, req, nil, &response.ResponseInfo); err != nil { + return nil, err + } + return response, nil +} + +// Query the tablemeta, tableoption and reservedthroughtputdetails +// @param DescribeTableRequest +// @param DescribeTableResponse +func (tableStoreClient *TableStoreClient) DescribeTable(request *DescribeTableRequest) (*DescribeTableResponse, error) { + req := new(otsprotocol.DescribeTableRequest) + req.TableName = proto.String(request.TableName) + + resp := new(otsprotocol.DescribeTableResponse) + response := new(DescribeTableResponse) + + if err := tableStoreClient.doRequestWithRetry(describeTableUri, req, resp, &response.ResponseInfo); err != nil { + return &DescribeTableResponse{}, err + } + + response.ReservedThroughput = &ReservedThroughput{Readcap: int(*(resp.ReservedThroughputDetails.CapacityUnit.Read)), Writecap: int(*(resp.ReservedThroughputDetails.CapacityUnit.Write))} + + responseTableMeta := new(TableMeta) + responseTableMeta.TableName = *resp.TableMeta.TableName + + for _, key := range resp.TableMeta.PrimaryKey { + keyType := PrimaryKeyType(*key.Type) + + // enable it when we support kep option in describe table + if key.Option != nil { + keyOption := PrimaryKeyOption(*key.Option) + responseTableMeta.SchemaEntry = append(responseTableMeta.SchemaEntry, &PrimaryKeySchema{Name: key.Name, Type: &keyType, Option: &keyOption}) + } else { + responseTableMeta.SchemaEntry = append(responseTableMeta.SchemaEntry, &PrimaryKeySchema{Name: key.Name, Type: &keyType}) + } + } + response.TableMeta = responseTableMeta + response.TableOption = &TableOption{TimeToAlive: int(*resp.TableOptions.TimeToLive), MaxVersion: int(*resp.TableOptions.MaxVersions)} + if resp.StreamDetails != nil && *resp.StreamDetails.EnableStream { + response.StreamDetails = &StreamDetails{ + EnableStream: *resp.StreamDetails.EnableStream, + StreamId: (*StreamId)(resp.StreamDetails.StreamId), + ExpirationTime: *resp.StreamDetails.ExpirationTime, + LastEnableTime: *resp.StreamDetails.LastEnableTime} + } else { + response.StreamDetails = &StreamDetails{ + EnableStream: false} + } + + for _, meta := range resp.IndexMetas { + response.IndexMetas = append(response.IndexMetas, ConvertPbIndexMetaToIndexMeta(meta)) + } + + return response, nil +} + +// Update the table info includes tableoptions and reservedthroughput +// @param UpdateTableRequest +// @param UpdateTableResponse +func (tableStoreClient *TableStoreClient) UpdateTable(request *UpdateTableRequest) (*UpdateTableResponse, error) { + req := new(otsprotocol.UpdateTableRequest) + req.TableName = proto.String(request.TableName) + + if request.ReservedThroughput != nil { + req.ReservedThroughput = new(otsprotocol.ReservedThroughput) + req.ReservedThroughput.CapacityUnit = new(otsprotocol.CapacityUnit) + req.ReservedThroughput.CapacityUnit.Read = proto.Int32(int32(request.ReservedThroughput.Readcap)) + req.ReservedThroughput.CapacityUnit.Write = proto.Int32(int32(request.ReservedThroughput.Writecap)) + } + + if request.TableOption != nil { + req.TableOptions = new(otsprotocol.TableOptions) + req.TableOptions.TimeToLive = proto.Int32(int32(request.TableOption.TimeToAlive)) + req.TableOptions.MaxVersions = proto.Int32(int32(request.TableOption.MaxVersion)) + } + + if request.StreamSpec != nil { + req.StreamSpec = &otsprotocol.StreamSpecification{ + EnableStream: &request.StreamSpec.EnableStream, + ExpirationTime: &request.StreamSpec.ExpirationTime} + } + + resp := new(otsprotocol.UpdateTableResponse) + response := new(UpdateTableResponse) + + if err := tableStoreClient.doRequestWithRetry(updateTableUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.ReservedThroughput = &ReservedThroughput{ + Readcap: int(*(resp.ReservedThroughputDetails.CapacityUnit.Read)), + Writecap: int(*(resp.ReservedThroughputDetails.CapacityUnit.Write))} + response.TableOption = &TableOption{ + TimeToAlive: int(*resp.TableOptions.TimeToLive), + MaxVersion: int(*resp.TableOptions.MaxVersions)} + if *resp.StreamDetails.EnableStream { + response.StreamDetails = &StreamDetails{ + EnableStream: *resp.StreamDetails.EnableStream, + StreamId: (*StreamId)(resp.StreamDetails.StreamId), + ExpirationTime: *resp.StreamDetails.ExpirationTime, + LastEnableTime: *resp.StreamDetails.LastEnableTime} + } else { + response.StreamDetails = &StreamDetails{ + EnableStream: false} + } + return response, nil +} + +// Put or update a row in a table. The operation is determined by CheckingType, +// which has three options: NO, UPDATE, INSERT. The transaction id is optional. +// 插入或更新行数据。操作针对数据的存在性包含三种检查类型:NO(不检查),UPDATE +// (更新,数据必须存在)和INSERT(插入,数据必须不存在)。事务ID是可选项。 +// +// @param builder The builder for putting a row. 插入或更新数据的Builder。 +// @return Void. 无返回值。 +func (tableStoreClient *TableStoreClient) PutRow(request *PutRowRequest) (*PutRowResponse, error) { + if request == nil { + return nil, nil + } + + if request.PutRowChange == nil { + return nil, nil + } + + req := new(otsprotocol.PutRowRequest) + req.TableName = proto.String(request.PutRowChange.TableName) + req.Row = request.PutRowChange.Serialize() + + condition := new(otsprotocol.Condition) + condition.RowExistence = request.PutRowChange.Condition.buildCondition() + if request.PutRowChange.Condition.ColumnCondition != nil { + condition.ColumnCondition = request.PutRowChange.Condition.ColumnCondition.Serialize() + } + + if request.PutRowChange.ReturnType == ReturnType_RT_PK { + content := otsprotocol.ReturnContent{ReturnType: otsprotocol.ReturnType_RT_PK.Enum()} + req.ReturnContent = &content + } + + if request.PutRowChange.TransactionId != nil { + req.TransactionId = request.PutRowChange.TransactionId + } + + req.Condition = condition + + resp := new(otsprotocol.PutRowResponse) + response := &PutRowResponse{} + if err := tableStoreClient.doRequestWithRetry(putRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.ConsumedCapacityUnit = &ConsumedCapacityUnit{} + response.ConsumedCapacityUnit.Read = *resp.Consumed.CapacityUnit.Read + response.ConsumedCapacityUnit.Write = *resp.Consumed.CapacityUnit.Write + + if request.PutRowChange.ReturnType == ReturnType_RT_PK { + rows, err := readRowsWithHeader(bytes.NewReader(resp.Row)) + if err != nil { + return response, err + } + + for _, pk := range rows[0].primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + response.PrimaryKey.PrimaryKeys = append(response.PrimaryKey.PrimaryKeys, pkColumn) + } + } + + return response, nil +} + +// Delete row with pk +// @param DeleteRowRequest +func (tableStoreClient *TableStoreClient) DeleteRow(request *DeleteRowRequest) (*DeleteRowResponse, error) { + req := new(otsprotocol.DeleteRowRequest) + req.TableName = proto.String(request.DeleteRowChange.TableName) + req.Condition = request.DeleteRowChange.getCondition() + req.PrimaryKey = request.DeleteRowChange.PrimaryKey.Build(true) + + if request.DeleteRowChange.TransactionId != nil { + req.TransactionId = request.DeleteRowChange.TransactionId + } + + resp := new(otsprotocol.DeleteRowResponse) + response := &DeleteRowResponse{} + if err := tableStoreClient.doRequestWithRetry(deleteRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.ConsumedCapacityUnit = &ConsumedCapacityUnit{} + response.ConsumedCapacityUnit.Read = *resp.Consumed.CapacityUnit.Read + response.ConsumedCapacityUnit.Write = *resp.Consumed.CapacityUnit.Write + return response, nil +} + +// row API +// Get the data of a row or some columns. +// +// @param getrowrequest +func (tableStoreClient *TableStoreClient) GetRow(request *GetRowRequest) (*GetRowResponse, error) { + req := new(otsprotocol.GetRowRequest) + resp := new(otsprotocol.GetRowResponse) + + req.TableName = proto.String(request.SingleRowQueryCriteria.TableName) + + if (request.SingleRowQueryCriteria.getColumnsToGet() != nil) && len(request.SingleRowQueryCriteria.getColumnsToGet()) > 0 { + req.ColumnsToGet = request.SingleRowQueryCriteria.getColumnsToGet() + } + + req.PrimaryKey = request.SingleRowQueryCriteria.PrimaryKey.Build(false) + + if request.SingleRowQueryCriteria.MaxVersion != 0 { + req.MaxVersions = proto.Int32(int32(request.SingleRowQueryCriteria.MaxVersion)) + } + + if request.SingleRowQueryCriteria.TransactionId != nil { + req.TransactionId = request.SingleRowQueryCriteria.TransactionId + } + + if request.SingleRowQueryCriteria.StartColumn != nil { + req.StartColumn = request.SingleRowQueryCriteria.StartColumn + } + + if request.SingleRowQueryCriteria.EndColumn != nil { + req.EndColumn = request.SingleRowQueryCriteria.EndColumn + } + + if request.SingleRowQueryCriteria.TimeRange != nil { + if request.SingleRowQueryCriteria.TimeRange.Specific != 0 { + req.TimeRange = &otsprotocol.TimeRange{SpecificTime: proto.Int64(request.SingleRowQueryCriteria.TimeRange.Specific)} + } else { + req.TimeRange = &otsprotocol.TimeRange{StartTime: proto.Int64(request.SingleRowQueryCriteria.TimeRange.Start), EndTime: proto.Int64(request.SingleRowQueryCriteria.TimeRange.End)} + } + } else if request.SingleRowQueryCriteria.MaxVersion == 0 { + return nil, errInvalidInput + } + + if request.SingleRowQueryCriteria.Filter != nil { + req.Filter = request.SingleRowQueryCriteria.Filter.Serialize() + } + + response := &GetRowResponse{ConsumedCapacityUnit: &ConsumedCapacityUnit{}} + if err := tableStoreClient.doRequestWithRetry(getRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.ConsumedCapacityUnit.Read = *resp.Consumed.CapacityUnit.Read + response.ConsumedCapacityUnit.Write = *resp.Consumed.CapacityUnit.Write + + if len(resp.Row) == 0 { + return response, nil + } + + rows, err := readRowsWithHeader(bytes.NewReader(resp.Row)) + if err != nil { + return nil, err + } + + for _, pk := range rows[0].primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + response.PrimaryKey.PrimaryKeys = append(response.PrimaryKey.PrimaryKeys, pkColumn) + } + + for _, cell := range rows[0].cells { + dataColumn := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp} + response.Columns = append(response.Columns, dataColumn) + } + + return response, nil +} + +// Update row +// @param UpdateRowRequest +func (tableStoreClient *TableStoreClient) UpdateRow(request *UpdateRowRequest) (*UpdateRowResponse, error) { + req := new(otsprotocol.UpdateRowRequest) + resp := new(otsprotocol.UpdateRowResponse) + + req.TableName = proto.String(request.UpdateRowChange.TableName) + req.Condition = request.UpdateRowChange.getCondition() + req.RowChange = request.UpdateRowChange.Serialize() + if request.UpdateRowChange.TransactionId != nil { + req.TransactionId = request.UpdateRowChange.TransactionId + } + + response := &UpdateRowResponse{ConsumedCapacityUnit: &ConsumedCapacityUnit{}} + + if request.UpdateRowChange.ReturnType == ReturnType_RT_AFTER_MODIFY { + content := otsprotocol.ReturnContent{ReturnType: otsprotocol.ReturnType_RT_AFTER_MODIFY.Enum()} + for _, column := range request.UpdateRowChange.ColumnNamesToReturn { + content.ReturnColumnNames = append(content.ReturnColumnNames, column) + } + req.ReturnContent = &content + } + + if err := tableStoreClient.doRequestWithRetry(updateRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + if request.UpdateRowChange.ReturnType == ReturnType_RT_AFTER_MODIFY { + plainbufferRow, err := readRowsWithHeader(bytes.NewReader(resp.Row)) + if err != nil { + return response, err + } + for _, cell := range plainbufferRow[0].cells { + fmt.Println(cell.cellName) + attribute := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp} + response.Columns = append(response.Columns, attribute) + } + } + + response.ConsumedCapacityUnit.Read = *resp.Consumed.CapacityUnit.Read + response.ConsumedCapacityUnit.Write = *resp.Consumed.CapacityUnit.Write + return response, nil +} + +// Batch Get Row +// @param BatchGetRowRequest +func (tableStoreClient *TableStoreClient) BatchGetRow(request *BatchGetRowRequest) (*BatchGetRowResponse, error) { + req := new(otsprotocol.BatchGetRowRequest) + + var tablesInBatch []*otsprotocol.TableInBatchGetRowRequest + + for _, Criteria := range request.MultiRowQueryCriteria { + table := new(otsprotocol.TableInBatchGetRowRequest) + table.TableName = proto.String(Criteria.TableName) + table.ColumnsToGet = Criteria.ColumnsToGet + + if Criteria.StartColumn != nil { + table.StartColumn = Criteria.StartColumn + } + + if Criteria.EndColumn != nil { + table.EndColumn = Criteria.EndColumn + } + + if Criteria.Filter != nil { + table.Filter = Criteria.Filter.Serialize() + } + + if Criteria.MaxVersion != 0 { + table.MaxVersions = proto.Int32(int32(Criteria.MaxVersion)) + } + + if Criteria.TimeRange != nil { + if Criteria.TimeRange.Specific != 0 { + table.TimeRange = &otsprotocol.TimeRange{SpecificTime: proto.Int64(Criteria.TimeRange.Specific)} + } else { + table.TimeRange = &otsprotocol.TimeRange{StartTime: proto.Int64(Criteria.TimeRange.Start), EndTime: proto.Int64(Criteria.TimeRange.End)} + } + } else if Criteria.MaxVersion == 0 { + return nil, errInvalidInput + } + + for _, pk := range Criteria.PrimaryKey { + pkWithBytes := pk.Build(false) + table.PrimaryKey = append(table.PrimaryKey, pkWithBytes) + } + + tablesInBatch = append(tablesInBatch, table) + } + + req.Tables = tablesInBatch + resp := new(otsprotocol.BatchGetRowResponse) + + response := &BatchGetRowResponse{TableToRowsResult: make(map[string][]RowResult)} + if err := tableStoreClient.doRequestWithRetry(batchGetRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + for _, table := range resp.Tables { + index := int32(0) + for _, row := range table.Rows { + rowResult := &RowResult{TableName: *table.TableName, IsSucceed: *row.IsOk, ConsumedCapacityUnit: &ConsumedCapacityUnit{}, Index: index} + index++ + if *row.IsOk == false { + rowResult.Error = Error{Code: *row.Error.Code, Message: *row.Error.Message} + } else { + // len == 0 means row not exist + if len(row.Row) > 0 { + rows, err := readRowsWithHeader(bytes.NewReader(row.Row)) + if err != nil { + return nil, err + } + + for _, pk := range rows[0].primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + rowResult.PrimaryKey.PrimaryKeys = append(rowResult.PrimaryKey.PrimaryKeys, pkColumn) + } + + for _, cell := range rows[0].cells { + dataColumn := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp} + rowResult.Columns = append(rowResult.Columns, dataColumn) + } + } + + rowResult.ConsumedCapacityUnit.Read = *row.Consumed.CapacityUnit.Read + rowResult.ConsumedCapacityUnit.Write = *row.Consumed.CapacityUnit.Write + } + + response.TableToRowsResult[*table.TableName] = append(response.TableToRowsResult[*table.TableName], *rowResult) + } + + } + return response, nil +} + +// Batch Write Row +// @param BatchWriteRowRequest +func (tableStoreClient *TableStoreClient) BatchWriteRow(request *BatchWriteRowRequest) (*BatchWriteRowResponse, error) { + req := new(otsprotocol.BatchWriteRowRequest) + + var tablesInBatch []*otsprotocol.TableInBatchWriteRowRequest + + for key, value := range request.RowChangesGroupByTable { + table := new(otsprotocol.TableInBatchWriteRowRequest) + table.TableName = proto.String(key) + + for _, row := range value { + rowInBatch := &otsprotocol.RowInBatchWriteRowRequest{} + rowInBatch.Condition = row.getCondition() + rowInBatch.RowChange = row.Serialize() + rowInBatch.Type = row.getOperationType().Enum() + table.Rows = append(table.Rows, rowInBatch) + } + + tablesInBatch = append(tablesInBatch, table) + } + + req.Tables = tablesInBatch + + resp := new(otsprotocol.BatchWriteRowResponse) + response := &BatchWriteRowResponse{TableToRowsResult: make(map[string][]RowResult)} + + if err := tableStoreClient.doRequestWithRetry(batchWriteRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + for _, table := range resp.Tables { + index := int32(0) + for _, row := range table.Rows { + rowResult := &RowResult{TableName: *table.TableName, IsSucceed: *row.IsOk, ConsumedCapacityUnit: &ConsumedCapacityUnit{}, Index: index} + index++ + if *row.IsOk == false { + rowResult.Error = Error{Code: *row.Error.Code, Message: *row.Error.Message} + } else { + rowResult.ConsumedCapacityUnit.Read = *row.Consumed.CapacityUnit.Read + rowResult.ConsumedCapacityUnit.Write = *row.Consumed.CapacityUnit.Write + } /*else { + rows, err := readRowsWithHeader(bytes.NewReader(row.Row)) + if err != nil { + return nil, err + } + + for _, pk := range (rows[0].primaryKey) { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + rowResult.PrimaryKey.PrimaryKeys = append(rowResult.PrimaryKey.PrimaryKeys, pkColumn) + } + + for _, cell := range (rows[0].cells) { + dataColumn := &DataColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value} + rowResult.Columns = append(rowResult.Columns, dataColumn) + } + + rowResult.ConsumedCapacityUnit.Read = *row.Consumed.CapacityUnit.Read + rowResult.ConsumedCapacityUnit.Write = *row.Consumed.CapacityUnit.Write + }*/ + + response.TableToRowsResult[*table.TableName] = append(response.TableToRowsResult[*table.TableName], *rowResult) + } + } + return response, nil +} + +// Get Range +// @param GetRangeRequest +func (tableStoreClient *TableStoreClient) GetRange(request *GetRangeRequest) (*GetRangeResponse, error) { + req := new(otsprotocol.GetRangeRequest) + req.TableName = proto.String(request.RangeRowQueryCriteria.TableName) + req.Direction = request.RangeRowQueryCriteria.Direction.ToDirection().Enum() + + if request.RangeRowQueryCriteria.MaxVersion != 0 { + req.MaxVersions = proto.Int32(request.RangeRowQueryCriteria.MaxVersion) + } + + if request.RangeRowQueryCriteria.TransactionId != nil { + req.TransactionId = request.RangeRowQueryCriteria.TransactionId + } + + if request.RangeRowQueryCriteria.TimeRange != nil { + if request.RangeRowQueryCriteria.TimeRange.Specific != 0 { + req.TimeRange = &otsprotocol.TimeRange{SpecificTime: proto.Int64(request.RangeRowQueryCriteria.TimeRange.Specific)} + } else { + req.TimeRange = &otsprotocol.TimeRange{StartTime: proto.Int64(request.RangeRowQueryCriteria.TimeRange.Start), EndTime: proto.Int64(request.RangeRowQueryCriteria.TimeRange.End)} + } + } else if request.RangeRowQueryCriteria.MaxVersion == 0 { + return nil, errInvalidInput + } + + if request.RangeRowQueryCriteria.Limit != 0 { + req.Limit = proto.Int32(request.RangeRowQueryCriteria.Limit) + } + + if (request.RangeRowQueryCriteria.ColumnsToGet != nil) && len(request.RangeRowQueryCriteria.ColumnsToGet) > 0 { + req.ColumnsToGet = request.RangeRowQueryCriteria.ColumnsToGet + } + + if request.RangeRowQueryCriteria.Filter != nil { + req.Filter = request.RangeRowQueryCriteria.Filter.Serialize() + } + + if request.RangeRowQueryCriteria.StartColumn != nil { + req.StartColumn = request.RangeRowQueryCriteria.StartColumn + } + + if request.RangeRowQueryCriteria.EndColumn != nil { + req.EndColumn = request.RangeRowQueryCriteria.EndColumn + } + + req.InclusiveStartPrimaryKey = request.RangeRowQueryCriteria.StartPrimaryKey.Build(false) + req.ExclusiveEndPrimaryKey = request.RangeRowQueryCriteria.EndPrimaryKey.Build(false) + + resp := new(otsprotocol.GetRangeResponse) + response := &GetRangeResponse{ConsumedCapacityUnit: &ConsumedCapacityUnit{}} + if err := tableStoreClient.doRequestWithRetry(getRangeUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.ConsumedCapacityUnit.Read = *resp.Consumed.CapacityUnit.Read + response.ConsumedCapacityUnit.Write = *resp.Consumed.CapacityUnit.Write + if len(resp.NextStartPrimaryKey) != 0 { + currentRows, err := readRowsWithHeader(bytes.NewReader(resp.NextStartPrimaryKey)) + if err != nil { + return nil, err + } + + response.NextStartPrimaryKey = &PrimaryKey{} + for _, pk := range currentRows[0].primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + response.NextStartPrimaryKey.PrimaryKeys = append(response.NextStartPrimaryKey.PrimaryKeys, pkColumn) + } + } + + if len(resp.Rows) == 0 { + return response, nil + } + + rows, err := readRowsWithHeader(bytes.NewReader(resp.Rows)) + if err != nil { + return response, err + } + + for _, row := range rows { + currentRow := &Row{} + currentpk := new(PrimaryKey) + for _, pk := range row.primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + currentpk.PrimaryKeys = append(currentpk.PrimaryKeys, pkColumn) + } + + currentRow.PrimaryKey = currentpk + + for _, cell := range row.cells { + dataColumn := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp} + currentRow.Columns = append(currentRow.Columns, dataColumn) + } + + response.Rows = append(response.Rows, currentRow) + } + + return response, nil + +} + +func (client *TableStoreClient) ListStream(req *ListStreamRequest) (*ListStreamResponse, error) { + pbReq := &otsprotocol.ListStreamRequest{} + pbReq.TableName = req.TableName + + pbResp := otsprotocol.ListStreamResponse{} + resp := ListStreamResponse{} + if err := client.doRequestWithRetry(listStreamUri, pbReq, &pbResp, &resp.ResponseInfo); err != nil { + return nil, err + } + + streams := make([]Stream, len(pbResp.Streams)) + for i, pbStream := range pbResp.Streams { + streams[i] = Stream{ + Id: (*StreamId)(pbStream.StreamId), + TableName: pbStream.TableName, + CreationTime: *pbStream.CreationTime} + } + resp.Streams = streams[:] + return &resp, nil +} + +func (client *TableStoreClient) DescribeStream(req *DescribeStreamRequest) (*DescribeStreamResponse, error) { + pbReq := &otsprotocol.DescribeStreamRequest{} + { + pbReq.StreamId = (*string)(req.StreamId) + pbReq.InclusiveStartShardId = (*string)(req.InclusiveStartShardId) + pbReq.ShardLimit = req.ShardLimit + } + pbResp := otsprotocol.DescribeStreamResponse{} + resp := DescribeStreamResponse{} + if err := client.doRequestWithRetry(describeStreamUri, pbReq, &pbResp, &resp.ResponseInfo); err != nil { + return nil, err + } + + resp.StreamId = (*StreamId)(pbResp.StreamId) + resp.ExpirationTime = *pbResp.ExpirationTime + resp.TableName = pbResp.TableName + resp.CreationTime = *pbResp.CreationTime + Assert(pbResp.StreamStatus != nil, "StreamStatus in DescribeStreamResponse is required.") + switch *pbResp.StreamStatus { + case otsprotocol.StreamStatus_STREAM_ENABLING: + resp.Status = SS_Enabling + case otsprotocol.StreamStatus_STREAM_ACTIVE: + resp.Status = SS_Active + } + resp.NextShardId = (*ShardId)(pbResp.NextShardId) + shards := make([]*StreamShard, len(pbResp.Shards)) + for i, pbShard := range pbResp.Shards { + shards[i] = &StreamShard{ + SelfShard: (*ShardId)(pbShard.ShardId), + FatherShard: (*ShardId)(pbShard.ParentId), + MotherShard: (*ShardId)(pbShard.ParentSiblingId)} + } + resp.Shards = shards[:] + return &resp, nil +} + +func (client *TableStoreClient) GetShardIterator(req *GetShardIteratorRequest) (*GetShardIteratorResponse, error) { + pbReq := &otsprotocol.GetShardIteratorRequest{ + StreamId: (*string)(req.StreamId), + ShardId: (*string)(req.ShardId)} + + if req.Timestamp != nil { + pbReq.Timestamp = req.Timestamp + } + + if req.Token != nil { + pbReq.Token = req.Token + } + + pbResp := otsprotocol.GetShardIteratorResponse{} + resp := GetShardIteratorResponse{} + if err := client.doRequestWithRetry(getShardIteratorUri, pbReq, &pbResp, &resp.ResponseInfo); err != nil { + return nil, err + } + + resp.ShardIterator = (*ShardIterator)(pbResp.ShardIterator) + resp.Token = pbResp.NextToken + return &resp, nil +} + +func (client TableStoreClient) GetStreamRecord(req *GetStreamRecordRequest) (*GetStreamRecordResponse, error) { + pbReq := &otsprotocol.GetStreamRecordRequest{ + ShardIterator: (*string)(req.ShardIterator)} + if req.Limit != nil { + pbReq.Limit = req.Limit + } + + pbResp := otsprotocol.GetStreamRecordResponse{} + resp := GetStreamRecordResponse{} + if err := client.doRequestWithRetry(getStreamRecordUri, pbReq, &pbResp, &resp.ResponseInfo); err != nil { + return nil, err + } + + if pbResp.NextShardIterator != nil { + resp.NextShardIterator = (*ShardIterator)(pbResp.NextShardIterator) + } + records := make([]*StreamRecord, len(pbResp.StreamRecords)) + for i, pbRecord := range pbResp.StreamRecords { + record := StreamRecord{} + records[i] = &record + + switch *pbRecord.ActionType { + case otsprotocol.ActionType_PUT_ROW: + record.Type = AT_Put + case otsprotocol.ActionType_UPDATE_ROW: + record.Type = AT_Update + case otsprotocol.ActionType_DELETE_ROW: + record.Type = AT_Delete + } + + plainRows, err := readRowsWithHeader(bytes.NewReader(pbRecord.Record)) + if err != nil { + return nil, err + } + Assert(len(plainRows) == 1, + "There must be exactly one row in a StreamRecord.") + plainRow := plainRows[0] + pkey := PrimaryKey{} + record.PrimaryKey = &pkey + pkey.PrimaryKeys = make([]*PrimaryKeyColumn, len(plainRow.primaryKey)) + for i, pk := range plainRow.primaryKey { + pkc := PrimaryKeyColumn{ + ColumnName: string(pk.cellName), + Value: pk.cellValue.Value} + pkey.PrimaryKeys[i] = &pkc + } + Assert(plainRow.extension != nil, + "extension in a stream record is required.") + record.Info = plainRow.extension + record.Columns = make([]*RecordColumn, len(plainRow.cells)) + for i, plainCell := range plainRow.cells { + cell := RecordColumn{} + record.Columns[i] = &cell + + name := string(plainCell.cellName) + cell.Name = &name + if plainCell.cellValue != nil { + cell.Type = RCT_Put + } else { + if plainCell.cellTimestamp > 0 { + cell.Type = RCT_DeleteOneVersion + } else { + cell.Type = RCT_DeleteAllVersions + } + } + switch cell.Type { + case RCT_Put: + cell.Value = plainCell.cellValue.Value + fallthrough + case RCT_DeleteOneVersion: + cell.Timestamp = &plainCell.cellTimestamp + case RCT_DeleteAllVersions: + break + } + } + } + resp.Records = records + return &resp, nil +} + +func (client TableStoreClient) ComputeSplitPointsBySize(req *ComputeSplitPointsBySizeRequest) (*ComputeSplitPointsBySizeResponse, error) { + pbReq := &otsprotocol.ComputeSplitPointsBySizeRequest{ + TableName: &(req.TableName), + SplitSize: &(req.SplitSize), + } + + pbResp := otsprotocol.ComputeSplitPointsBySizeResponse{} + resp := ComputeSplitPointsBySizeResponse{} + if err := client.doRequestWithRetry(computeSplitPointsBySizeRequestUri, pbReq, &pbResp, &resp.ResponseInfo); err != nil { + return nil, err + } + + fmt.Println(len(pbResp.SplitPoints)) + fmt.Println(len(pbResp.Locations)) + + beginPk := &PrimaryKey{} + endPk := &PrimaryKey{} + for _, pkSchema := range pbResp.Schema { + beginPk.AddPrimaryKeyColumnWithMinValue(*pkSchema.Name) + endPk.AddPrimaryKeyColumnWithMaxValue(*pkSchema.Name) + } + lastPk := beginPk + nowPk := endPk + + for _, pbRecord := range pbResp.SplitPoints { + plainRows, err := readRowsWithHeader(bytes.NewReader(pbRecord)) + if err != nil { + return nil, err + } + + nowPk = &PrimaryKey{} + for _, pk := range plainRows[0].primaryKey { + nowPk.AddPrimaryKeyColumn(string(pk.cellName), pk.cellValue.Value) + } + + if len(pbResp.Schema) > 1 { + for i := 1; i < len(pbResp.Schema); i++ { + nowPk.AddPrimaryKeyColumnWithMinValue(*pbResp.Schema[i].Name) + } + } + + newSplit := &Split{LowerBound: lastPk, UpperBound: nowPk} + resp.Splits = append(resp.Splits, newSplit) + lastPk = nowPk + + } + + newSplit := &Split{LowerBound: lastPk, UpperBound: endPk} + resp.Splits = append(resp.Splits, newSplit) + + index := 0 + for _, pbLocation := range pbResp.Locations { + count := *pbLocation.Repeat + value := *pbLocation.Location + + for i := int64(0); i < count; i++ { + resp.Splits[index].Location = value + index++ + } + } + return &resp, nil +} + +func (client *TableStoreClient) StartLocalTransaction(request *StartLocalTransactionRequest) (*StartLocalTransactionResponse, error) { + req := new(otsprotocol.StartLocalTransactionRequest) + resp := new(otsprotocol.StartLocalTransactionResponse) + + req.TableName = proto.String(request.TableName) + req.Key = request.PrimaryKey.Build(false) + + response := &StartLocalTransactionResponse{} + if err := client.doRequestWithRetry(createlocaltransactionuri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.TransactionId = resp.TransactionId + return response, nil +} + +func (client *TableStoreClient) CommitTransaction(request *CommitTransactionRequest) (*CommitTransactionResponse, error) { + req := new(otsprotocol.CommitTransactionRequest) + resp := new(otsprotocol.CommitTransactionResponse) + + req.TransactionId = request.TransactionId + + response := &CommitTransactionResponse{} + if err := client.doRequestWithRetry(committransactionuri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + return response, nil +} + +func (client *TableStoreClient) AbortTransaction(request *AbortTransactionRequest) (*AbortTransactionResponse, error) { + req := new(otsprotocol.AbortTransactionRequest) + resp := new(otsprotocol.AbortTransactionResponse) + + req.TransactionId = request.TransactionId + + response := &AbortTransactionResponse{} + if err := client.doRequestWithRetry(aborttransactionuri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + return response, nil +} \ No newline at end of file diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/error.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/error.go new file mode 100644 index 000000000000..9a107b740a4e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/error.go @@ -0,0 +1,52 @@ +package tablestore + +import ( + "errors" + "fmt" +) + +var ( + errMissMustHeader = func(header string) error { + return errors.New("[tablestore] miss must header: " + header) + } + errTableNameTooLong = func(name string) error { + return errors.New("[tablestore] table name: \"" + name + "\" too long") + } + + errInvalidPartitionType = errors.New("[tablestore] invalid partition key") + errMissPrimaryKey = errors.New("[tablestore] missing primary key") + errPrimaryKeyTooMuch = errors.New("[tablestore] primary key too much") + errMultiDeleteRowsTooMuch = errors.New("[tablestore] multi delete rows too much") + errCreateTableNoPrimaryKey = errors.New("[tablestore] create table no primary key") + errUnexpectIoEnd = errors.New("[tablestore] unexpect io end") + errTag = errors.New("[tablestore] unexpect tag") + errNoChecksum = errors.New("[tablestore] expect checksum") + errChecksum = errors.New("[tablestore] checksum failed") + errInvalidInput = errors.New("[tablestore] invalid input") +) + +const ( + OTS_CLIENT_UNKNOWN = "OTSClientUnknownError" + + ROW_OPERATION_CONFLICT = "OTSRowOperationConflict" + NOT_ENOUGH_CAPACITY_UNIT = "OTSNotEnoughCapacityUnit" + TABLE_NOT_READY = "OTSTableNotReady" + PARTITION_UNAVAILABLE = "OTSPartitionUnavailable" + SERVER_BUSY = "OTSServerBusy" + STORAGE_SERVER_BUSY = "OTSStorageServerBusy" + QUOTA_EXHAUSTED = "OTSQuotaExhausted" + + STORAGE_TIMEOUT = "OTSTimeout" + SERVER_UNAVAILABLE = "OTSServerUnavailable" + INTERNAL_SERVER_ERROR = "OTSInternalServerError" +) + +type OtsError struct { + Code string + Message string + RequestId string +} + +func (e *OtsError) Error() string { + return fmt.Sprintf("%s %s %s", e.Code, e.Message, e.RequestId) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/interface.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/interface.go new file mode 100644 index 000000000000..7e0e3b269a98 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/interface.go @@ -0,0 +1,22 @@ +package tablestore + +type TableStoreApi interface { + CreateTable(request *CreateTableRequest) (*CreateTableResponse, error) + ListTable() (*ListTableResponse, error) + DeleteTable(request *DeleteTableRequest) (*DeleteTableResponse, error) + DescribeTable(request *DescribeTableRequest) (*DescribeTableResponse, error) + UpdateTable(request *UpdateTableRequest) (*UpdateTableResponse, error) + PutRow(request *PutRowRequest) (*PutRowResponse, error) + DeleteRow(request *DeleteRowRequest) (*DeleteRowResponse, error) + GetRow(request *GetRowRequest) (*GetRowResponse, error) + UpdateRow(request *UpdateRowRequest) (*UpdateRowResponse, error) + BatchGetRow(request *BatchGetRowRequest) (*BatchGetRowResponse, error) + BatchWriteRow(request *BatchWriteRowRequest) (*BatchWriteRowResponse, error) + GetRange(request *GetRangeRequest) (*GetRangeResponse, error) + + // stream related + ListStream(request *ListStreamRequest) (*ListStreamResponse, error) + DescribeStream(request *DescribeStreamRequest) (*DescribeStreamResponse, error) + GetShardIterator(request *GetShardIteratorRequest) (*GetShardIteratorResponse, error) + GetStreamRecord(request *GetStreamRecordRequest) (*GetStreamRecordResponse, error) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/model.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/model.go new file mode 100644 index 000000000000..4f7cdc84ce79 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/model.go @@ -0,0 +1,855 @@ +package tablestore + +import ( + "fmt" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" + "math/rand" + "net/http" + "strconv" + "strings" + "time" + //"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" +) + +// @class TableStoreClient +// The TableStoreClient, which will connect OTS service for authorization, create/list/ +// delete tables/table groups, to get/put/delete a row. +// Note: TableStoreClient is thread-safe. +// TableStoreClient的功能包括连接OTS服务进行验证、创建/列出/删除表或表组、插入/获取/ +// 删除/更新行数据 +type TableStoreClient struct { + endPoint string + instanceName string + accessKeyId string + accessKeySecret string + securityToken string + + httpClient IHttpClient + config *TableStoreConfig + random *rand.Rand +} + +type ClientOption func(*TableStoreClient) + +type TableStoreHttpClient struct { + httpClient *http.Client +} + +// use this to mock http.client for testing +type IHttpClient interface { + Do(*http.Request) (*http.Response, error) + New(*http.Client) +} + +func (httpClient *TableStoreHttpClient) Do(req *http.Request) (*http.Response, error) { + return httpClient.httpClient.Do(req) +} + +func (httpClient *TableStoreHttpClient) New(client *http.Client) { + httpClient.httpClient = client +} + +type HTTPTimeout struct { + ConnectionTimeout time.Duration + RequestTimeout time.Duration +} + +type TableStoreConfig struct { + RetryTimes uint + MaxRetryTime time.Duration + HTTPTimeout HTTPTimeout + MaxIdleConnections int +} + +func NewDefaultTableStoreConfig() *TableStoreConfig { + httpTimeout := &HTTPTimeout{ + ConnectionTimeout: time.Second * 15, + RequestTimeout: time.Second * 30} + config := &TableStoreConfig{ + RetryTimes: 10, + HTTPTimeout: *httpTimeout, + MaxRetryTime: time.Second * 5, + MaxIdleConnections: 2000} + return config +} + +type CreateTableRequest struct { + TableMeta *TableMeta + TableOption *TableOption + ReservedThroughput *ReservedThroughput + StreamSpec *StreamSpecification + IndexMetas []*IndexMeta +} + +type CreateIndexRequest struct { + MainTableName string + IndexMeta *IndexMeta + IncludeBaseData bool +} + +type DeleteIndexRequest struct { + MainTableName string + IndexName string +} + +type ResponseInfo struct { + RequestId string +} + +type CreateTableResponse struct { + ResponseInfo +} + +type CreateIndexResponse struct { + ResponseInfo +} + +type DeleteIndexResponse struct { + ResponseInfo +} + +type DeleteTableResponse struct { + ResponseInfo +} + +type TableMeta struct { + TableName string + SchemaEntry []*PrimaryKeySchema + DefinedColumns []*DefinedColumnSchema +} + +type PrimaryKeySchema struct { + Name *string + Type *PrimaryKeyType + Option *PrimaryKeyOption +} + +type PrimaryKey struct { + PrimaryKeys []*PrimaryKeyColumn +} + +type TableOption struct { + TimeToAlive, MaxVersion int +} + +type ReservedThroughput struct { + Readcap, Writecap int +} + +type ListTableResponse struct { + TableNames []string + ResponseInfo +} + +type DeleteTableRequest struct { + TableName string +} + +type DescribeTableRequest struct { + TableName string +} + +type DescribeTableResponse struct { + TableMeta *TableMeta + TableOption *TableOption + ReservedThroughput *ReservedThroughput + StreamDetails *StreamDetails + IndexMetas []*IndexMeta + ResponseInfo +} + +type UpdateTableRequest struct { + TableName string + TableOption *TableOption + ReservedThroughput *ReservedThroughput + StreamSpec *StreamSpecification +} + +type UpdateTableResponse struct { + TableOption *TableOption + ReservedThroughput *ReservedThroughput + StreamDetails *StreamDetails + ResponseInfo +} + +type ConsumedCapacityUnit struct { + Read int32 + Write int32 +} + +type PutRowResponse struct { + ConsumedCapacityUnit *ConsumedCapacityUnit + PrimaryKey PrimaryKey + ResponseInfo +} + +type DeleteRowResponse struct { + ConsumedCapacityUnit *ConsumedCapacityUnit + ResponseInfo +} + +type UpdateRowResponse struct { + Columns []*AttributeColumn + ConsumedCapacityUnit *ConsumedCapacityUnit + ResponseInfo +} + +type PrimaryKeyType int32 + +const ( + PrimaryKeyType_INTEGER PrimaryKeyType = 1 + PrimaryKeyType_STRING PrimaryKeyType = 2 + PrimaryKeyType_BINARY PrimaryKeyType = 3 +) + +const ( + DefaultRetryInterval = 10 + MaxRetryInterval = 320 +) + +type PrimaryKeyOption int32 + +const ( + NONE PrimaryKeyOption = 0 + AUTO_INCREMENT PrimaryKeyOption = 1 + MIN PrimaryKeyOption = 2 + MAX PrimaryKeyOption = 3 +) + +type PrimaryKeyColumn struct { + ColumnName string + Value interface{} + PrimaryKeyOption PrimaryKeyOption +} + +func (this *PrimaryKeyColumn) String() string { + xs := make([]string, 0) + xs = append(xs, fmt.Sprintf("\"Name\": \"%s\"", this.ColumnName)) + switch this.PrimaryKeyOption { + case NONE: + xs = append(xs, fmt.Sprintf("\"Value\": \"%s\"", this.Value)) + case MIN: + xs = append(xs, "\"Value\": -inf") + case MAX: + xs = append(xs, "\"Value\": +inf") + case AUTO_INCREMENT: + xs = append(xs, "\"Value\": auto-incr") + } + return fmt.Sprintf("{%s}", strings.Join(xs, ", ")) +} + +type AttributeColumn struct { + ColumnName string + Value interface{} + Timestamp int64 +} + +type TimeRange struct { + Start int64 + End int64 + Specific int64 +} + +type ColumnToUpdate struct { + ColumnName string + Type byte + Timestamp int64 + HasType bool + HasTimestamp bool + IgnoreValue bool + Value interface{} +} + +type RowExistenceExpectation int + +const ( + RowExistenceExpectation_IGNORE RowExistenceExpectation = 0 + RowExistenceExpectation_EXPECT_EXIST RowExistenceExpectation = 1 + RowExistenceExpectation_EXPECT_NOT_EXIST RowExistenceExpectation = 2 +) + +type ComparatorType int32 + +const ( + CT_EQUAL ComparatorType = 1 + CT_NOT_EQUAL ComparatorType = 2 + CT_GREATER_THAN ComparatorType = 3 + CT_GREATER_EQUAL ComparatorType = 4 + CT_LESS_THAN ComparatorType = 5 + CT_LESS_EQUAL ComparatorType = 6 +) + +type LogicalOperator int32 + +const ( + LO_NOT LogicalOperator = 1 + LO_AND LogicalOperator = 2 + LO_OR LogicalOperator = 3 +) + +type FilterType int32 + +const ( + FT_SINGLE_COLUMN_VALUE FilterType = 1 + FT_COMPOSITE_COLUMN_VALUE FilterType = 2 + FT_COLUMN_PAGINATION FilterType = 3 +) + +type ColumnFilter interface { + Serialize() []byte + ToFilter() *otsprotocol.Filter +} + +type VariantType int32 + +const ( + Variant_INTEGER VariantType = 0; + Variant_DOUBLE VariantType = 1; + //VT_BOOLEAN = 2; + Variant_STRING VariantType = 3; +) + +type ValueTransferRule struct { + Regex string + Cast_type VariantType +} + +type SingleColumnCondition struct { + Comparator *ComparatorType + ColumnName *string + ColumnValue interface{} //[]byte + FilterIfMissing bool + LatestVersionOnly bool + TransferRule *ValueTransferRule +} + +type ReturnType int32 + +const ( + ReturnType_RT_NONE ReturnType = 0 + ReturnType_RT_PK ReturnType = 1 + ReturnType_RT_AFTER_MODIFY ReturnType = 2 +) + +type PaginationFilter struct { + Offset int32 + Limit int32 +} + +type CompositeColumnValueFilter struct { + Operator LogicalOperator + Filters []ColumnFilter +} + +func (ccvfilter *CompositeColumnValueFilter) Serialize() []byte { + result, _ := proto.Marshal(ccvfilter.ToFilter()) + return result +} + +func (ccvfilter *CompositeColumnValueFilter) ToFilter() *otsprotocol.Filter { + compositefilter := NewCompositeFilter(ccvfilter.Filters, ccvfilter.Operator) + compositeFilterToBytes, _ := proto.Marshal(compositefilter) + filter := new(otsprotocol.Filter) + filter.Type = otsprotocol.FilterType_FT_COMPOSITE_COLUMN_VALUE.Enum() + filter.Filter = compositeFilterToBytes + return filter +} + +func (ccvfilter *CompositeColumnValueFilter) AddFilter(filter ColumnFilter) { + ccvfilter.Filters = append(ccvfilter.Filters, filter) +} + +func (condition *SingleColumnCondition) ToFilter() *otsprotocol.Filter { + singlefilter := NewSingleColumnValueFilter(condition) + singleFilterToBytes, _ := proto.Marshal(singlefilter) + filter := new(otsprotocol.Filter) + filter.Type = otsprotocol.FilterType_FT_SINGLE_COLUMN_VALUE.Enum() + filter.Filter = singleFilterToBytes + return filter +} + +func (condition *SingleColumnCondition) Serialize() []byte { + result, _ := proto.Marshal(condition.ToFilter()) + return result +} + +func (pageFilter *PaginationFilter) ToFilter() *otsprotocol.Filter { + compositefilter := NewPaginationFilter(pageFilter) + compositeFilterToBytes, _ := proto.Marshal(compositefilter) + filter := new(otsprotocol.Filter) + filter.Type = otsprotocol.FilterType_FT_COLUMN_PAGINATION.Enum() + filter.Filter = compositeFilterToBytes + return filter +} + +func (pageFilter *PaginationFilter) Serialize() []byte { + result, _ := proto.Marshal(pageFilter.ToFilter()) + return result +} + +func NewTableOptionWithMaxVersion(maxVersion int) *TableOption { + tableOption := new(TableOption) + tableOption.TimeToAlive = -1 + tableOption.MaxVersion = maxVersion + return tableOption +} + +func NewTableOption(timeToAlive int, maxVersion int) *TableOption { + tableOption := new(TableOption) + tableOption.TimeToAlive = timeToAlive + tableOption.MaxVersion = maxVersion + return tableOption +} + +type RowCondition struct { + RowExistenceExpectation RowExistenceExpectation + ColumnCondition ColumnFilter +} + +type PutRowChange struct { + TableName string + PrimaryKey *PrimaryKey + Columns []AttributeColumn + Condition *RowCondition + ReturnType ReturnType + TransactionId *string +} + +type PutRowRequest struct { + PutRowChange *PutRowChange +} + +type DeleteRowChange struct { + TableName string + PrimaryKey *PrimaryKey + Condition *RowCondition + TransactionId *string +} + +type DeleteRowRequest struct { + DeleteRowChange *DeleteRowChange +} + +type SingleRowQueryCriteria struct { + ColumnsToGet []string + TableName string + PrimaryKey *PrimaryKey + MaxVersion int32 + TimeRange *TimeRange + Filter ColumnFilter + StartColumn *string + EndColumn *string + TransactionId *string +} + +type UpdateRowChange struct { + TableName string + PrimaryKey *PrimaryKey + Columns []ColumnToUpdate + Condition *RowCondition + TransactionId *string + ReturnType ReturnType + ColumnNamesToReturn []string +} + +type UpdateRowRequest struct { + UpdateRowChange *UpdateRowChange +} + +func (rowQueryCriteria *SingleRowQueryCriteria) AddColumnToGet(columnName string) { + rowQueryCriteria.ColumnsToGet = append(rowQueryCriteria.ColumnsToGet, columnName) +} + +func (rowQueryCriteria *SingleRowQueryCriteria) SetStartColumn(columnName string) { + rowQueryCriteria.StartColumn = &columnName +} + +func (rowQueryCriteria *SingleRowQueryCriteria) SetEndtColumn(columnName string) { + rowQueryCriteria.EndColumn = &columnName +} + +func (rowQueryCriteria *SingleRowQueryCriteria) getColumnsToGet() []string { + return rowQueryCriteria.ColumnsToGet +} + +func (rowQueryCriteria *MultiRowQueryCriteria) AddColumnToGet(columnName string) { + rowQueryCriteria.ColumnsToGet = append(rowQueryCriteria.ColumnsToGet, columnName) +} + +func (rowQueryCriteria *RangeRowQueryCriteria) AddColumnToGet(columnName string) { + rowQueryCriteria.ColumnsToGet = append(rowQueryCriteria.ColumnsToGet, columnName) +} + +func (rowQueryCriteria *MultiRowQueryCriteria) AddRow(pk *PrimaryKey) { + rowQueryCriteria.PrimaryKey = append(rowQueryCriteria.PrimaryKey, pk) +} + +type GetRowRequest struct { + SingleRowQueryCriteria *SingleRowQueryCriteria +} + +type MultiRowQueryCriteria struct { + PrimaryKey []*PrimaryKey + ColumnsToGet []string + TableName string + MaxVersion int + TimeRange *TimeRange + Filter ColumnFilter + StartColumn *string + EndColumn *string +} + +type BatchGetRowRequest struct { + MultiRowQueryCriteria []*MultiRowQueryCriteria +} + +type ColumnMap struct { + Columns map[string][]*AttributeColumn + columnsKey []string +} + +type GetRowResponse struct { + PrimaryKey PrimaryKey + Columns []*AttributeColumn + ConsumedCapacityUnit *ConsumedCapacityUnit + columnMap *ColumnMap + ResponseInfo +} + +type Error struct { + Code string + Message string +} + +type RowResult struct { + TableName string + IsSucceed bool + Error Error + PrimaryKey PrimaryKey + Columns []*AttributeColumn + ConsumedCapacityUnit *ConsumedCapacityUnit + Index int32 +} + +type RowChange interface { + Serialize() []byte + getOperationType() otsprotocol.OperationType + getCondition() *otsprotocol.Condition + GetTableName() string +} + +type BatchGetRowResponse struct { + TableToRowsResult map[string][]RowResult + ResponseInfo +} + +type BatchWriteRowRequest struct { + RowChangesGroupByTable map[string][]RowChange +} + +type BatchWriteRowResponse struct { + TableToRowsResult map[string][]RowResult + ResponseInfo +} + +type Direction int32 + +const ( + FORWARD Direction = 0 + BACKWARD Direction = 1 +) + +type RangeRowQueryCriteria struct { + TableName string + StartPrimaryKey *PrimaryKey + EndPrimaryKey *PrimaryKey + ColumnsToGet []string + MaxVersion int32 + TimeRange *TimeRange + Filter ColumnFilter + Direction Direction + Limit int32 + StartColumn *string + EndColumn *string + TransactionId *string +} + +type GetRangeRequest struct { + RangeRowQueryCriteria *RangeRowQueryCriteria +} + +type Row struct { + PrimaryKey *PrimaryKey + Columns []*AttributeColumn +} + +type GetRangeResponse struct { + Rows []*Row + ConsumedCapacityUnit *ConsumedCapacityUnit + NextStartPrimaryKey *PrimaryKey + ResponseInfo +} + +type ListStreamRequest struct { + TableName *string +} + +type Stream struct { + Id *StreamId + TableName *string + CreationTime int64 +} + +type ListStreamResponse struct { + Streams []Stream + ResponseInfo +} + +type StreamSpecification struct { + EnableStream bool + ExpirationTime int32 // must be positive. in hours +} + +type StreamDetails struct { + EnableStream bool + StreamId *StreamId // nil when stream is disabled. + ExpirationTime int32 // in hours + LastEnableTime int64 // the last time stream is enabled, in usec +} + +type DescribeStreamRequest struct { + StreamId *StreamId // required + InclusiveStartShardId *ShardId // optional + ShardLimit *int32 // optional +} + +type DescribeStreamResponse struct { + StreamId *StreamId // required + ExpirationTime int32 // in hours + TableName *string // required + CreationTime int64 // in usec + Status StreamStatus // required + Shards []*StreamShard + NextShardId *ShardId // optional. nil means "no more shards" + ResponseInfo +} + +type GetShardIteratorRequest struct { + StreamId *StreamId // required + ShardId *ShardId // required + Timestamp *int64 + Token *string +} + +type GetShardIteratorResponse struct { + ShardIterator *ShardIterator // required + Token *string + ResponseInfo +} + +type GetStreamRecordRequest struct { + ShardIterator *ShardIterator // required + Limit *int32 // optional. max records which will reside in response +} + +type GetStreamRecordResponse struct { + Records []*StreamRecord + NextShardIterator *ShardIterator // optional. an indicator to be used to read more records in this shard + ResponseInfo +} + +type ComputeSplitPointsBySizeRequest struct { + TableName string + SplitSize int64 +} + +type ComputeSplitPointsBySizeResponse struct { + SchemaEntry []*PrimaryKeySchema + Splits []*Split + ResponseInfo +} + +type Split struct { + LowerBound *PrimaryKey + UpperBound *PrimaryKey + Location string +} + +type StreamId string +type ShardId string +type ShardIterator string +type StreamStatus int + +const ( + SS_Enabling StreamStatus = iota + SS_Active +) + +/* + * Shards are possibly splitted into two or merged from two. + * After splitting, both newly generated shards have the same FatherShard. + * After merging, the newly generated shard have both FatherShard and MotherShard. + */ +type StreamShard struct { + SelfShard *ShardId // required + FatherShard *ShardId // optional + MotherShard *ShardId // optional +} + +type StreamRecord struct { + Type ActionType + Info *RecordSequenceInfo // required + PrimaryKey *PrimaryKey // required + Columns []*RecordColumn +} + +func (this *StreamRecord) String() string { + return fmt.Sprintf( + "{\"Type\":%s, \"PrimaryKey\":%s, \"Info\":%s, \"Columns\":%s}", + this.Type, + *this.PrimaryKey, + this.Info, + this.Columns) +} + +type ActionType int + +const ( + AT_Put ActionType = iota + AT_Update + AT_Delete +) + +func (this ActionType) String() string { + switch this { + case AT_Put: + return "\"PutRow\"" + case AT_Update: + return "\"UpdateRow\"" + case AT_Delete: + return "\"DeleteRow\"" + default: + panic(fmt.Sprintf("unknown action type: %d", int(this))) + } +} + +type RecordSequenceInfo struct { + Epoch int32 + Timestamp int64 + RowIndex int32 +} + +func (this *RecordSequenceInfo) String() string { + return fmt.Sprintf( + "{\"Epoch\":%d, \"Timestamp\": %d, \"RowIndex\": %d}", + this.Epoch, + this.Timestamp, + this.RowIndex) +} + +type RecordColumn struct { + Type RecordColumnType + Name *string // required + Value interface{} // optional. present when Type is RCT_Put + Timestamp *int64 // optional, in msec. present when Type is RCT_Put or RCT_DeleteOneVersion +} + +func (this *RecordColumn) String() string { + xs := make([]string, 0) + xs = append(xs, fmt.Sprintf("\"Name\":%s", strconv.Quote(*this.Name))) + switch this.Type { + case RCT_DeleteAllVersions: + xs = append(xs, "\"Type\":\"DeleteAllVersions\"") + case RCT_DeleteOneVersion: + xs = append(xs, "\"Type\":\"DeleteOneVersion\"") + xs = append(xs, fmt.Sprintf("\"Timestamp\":%d", *this.Timestamp)) + case RCT_Put: + xs = append(xs, "\"Type\":\"Put\"") + xs = append(xs, fmt.Sprintf("\"Timestamp\":%d", *this.Timestamp)) + xs = append(xs, fmt.Sprintf("\"Value\":%s", this.Value)) + } + return fmt.Sprintf("{%s}", strings.Join(xs, ", ")) +} + +type RecordColumnType int + +const ( + RCT_Put RecordColumnType = iota + RCT_DeleteOneVersion + RCT_DeleteAllVersions +) + +type IndexMeta struct { + IndexName string + Primarykey []string + DefinedColumns []string + IndexType IndexType +} + +type DefinedColumnSchema struct { + Name string + ColumnType DefinedColumnType +} + +type IndexType int32 + +const ( + IT_GLOBAL_INDEX IndexType = 1 + IT_LOCAL_INDEX IndexType = 2 +) + +type DefinedColumnType int32 + +const ( + /** + * 64位整数。 + */ + DefinedColumn_INTEGER DefinedColumnType = 1 + + /** + * 浮点数。 + */ + DefinedColumn_DOUBLE DefinedColumnType = 2 + + /** + * 布尔值。 + */ + DefinedColumn_BOOLEAN DefinedColumnType = 3 + + /** + * 字符串。 + */ + DefinedColumn_STRING DefinedColumnType = 4 + + /** + * BINARY。 + */ + DefinedColumn_BINARY DefinedColumnType = 5 +) + +type StartLocalTransactionRequest struct { + PrimaryKey *PrimaryKey + TableName string +} + +type StartLocalTransactionResponse struct { + TransactionId *string + ResponseInfo +} + +type CommitTransactionRequest struct { + TransactionId *string +} + +type CommitTransactionResponse struct { + ResponseInfo +} + +type AbortTransactionRequest struct { + TransactionId *string +} + +type AbortTransactionResponse struct { + ResponseInfo +} \ No newline at end of file diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/ots_header.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/ots_header.go new file mode 100644 index 000000000000..bff7d021f500 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/ots_header.go @@ -0,0 +1,124 @@ +package tablestore + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "hash" + "sort" + "strings" +) + +const ( + xOtsDate = "x-ots-date" + xOtsApiversion = "x-ots-apiversion" + xOtsAccesskeyid = "x-ots-accesskeyid" + xOtsContentmd5 = "x-ots-contentmd5" + xOtsHeaderStsToken = "x-ots-ststoken" + xOtsSignature = "x-ots-signature" + xOtsRequestCompressType = "x-ots-request-compress-type" + xOtsRequestCompressSize = "x-ots-request-compress-size" + xOtsResponseCompressTye = "x-ots-response-compress-type" +) + +type otsHeader struct { + name string + value string + must bool +} + +type otsHeaders struct { + headers []*otsHeader + hmacSha1 hash.Hash +} + +func createOtsHeaders(accessKey string) *otsHeaders { + h := new(otsHeaders) + + h.headers = []*otsHeader{ + &otsHeader{name: xOtsDate, must: true}, + &otsHeader{name: xOtsApiversion, must: true}, + &otsHeader{name: xOtsAccesskeyid, must: true}, + &otsHeader{name: xOtsContentmd5, must: true}, + &otsHeader{name: xOtsInstanceName, must: true}, + &otsHeader{name: xOtsSignature, must: true}, + &otsHeader{name: xOtsRequestCompressSize, must: false}, + &otsHeader{name: xOtsResponseCompressTye, must: false}, + &otsHeader{name: xOtsRequestCompressType, must: false}, + &otsHeader{name: xOtsHeaderStsToken, must: false}, + } + + sort.Sort(h) + + h.hmacSha1 = hmac.New(sha1.New, []byte(accessKey)) + return h +} + +func (h *otsHeaders) Len() int { + return len(h.headers) +} + +func (h *otsHeaders) Swap(i, j int) { + h.headers[i], h.headers[j] = h.headers[j], h.headers[i] +} + +func (h *otsHeaders) Less(i, j int) bool { + if h.headers[i].name == xOtsSignature { + return false + } + + if h.headers[j].name == xOtsSignature { + return true + } + + return h.headers[i].name < h.headers[j].name +} + +func (h *otsHeaders) search(name string) *otsHeader { + index := sort.Search(len(h.headers)-1, func(i int) bool { + return h.headers[i].name >= name + }) + + if index >= len(h.headers) { + return nil + } + + return h.headers[index] +} + +func (h *otsHeaders) set(name, value string) { + header := h.search(name) + if header == nil { + return + } + + header.value = value +} + +func (h *otsHeaders) signature(uri, method, accessKey string) (string, error) { + for _, header := range h.headers[:len(h.headers)-1] { + if header.must && header.value == "" { + return "", errMissMustHeader(header.name) + } + } + + // StringToSign = CanonicalURI + '\n' + HTTPRequestMethod + '\n' + CanonicalQueryString + '\n' + CanonicalHeaders + '\n' + // TODO CanonicalQueryString 为空 + stringToSign := uri + "\n" + method + "\n" + "\n" + + // 最后一个header 为 xOtsSignature + for _, header := range h.headers[:len(h.headers)-1] { + if header.value != "" { + stringToSign = stringToSign + header.name + ":" + strings.TrimSpace(header.value) + "\n" + } + } + + h.hmacSha1.Reset() + h.hmacSha1.Write([]byte(stringToSign)) + + // fmt.Println("stringToSign:" + stringToSign) + sign := base64.StdEncoding.EncodeToString(h.hmacSha1.Sum(nil)) + h.set(xOtsSignature, sign) + // fmt.Println("sign:" + sign) + return sign, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/build_proto.sh b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/build_proto.sh new file mode 100644 index 000000000000..18cb5079e8be --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/build_proto.sh @@ -0,0 +1 @@ +protoc --go_out=. search.proto ots_filter.proto table_store.proto diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.pb.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.pb.go new file mode 100644 index 000000000000..4172901ccb6a --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.pb.go @@ -0,0 +1,390 @@ +// Code generated by protoc-gen-go. +// source: ots_filter.proto +// DO NOT EDIT! + +package otsprotocol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type VariantType int32 + +const ( + VariantType_VT_INTEGER VariantType = 0 + VariantType_VT_DOUBLE VariantType = 1 + // VT_BOOLEAN = 2; + VariantType_VT_STRING VariantType = 3 + VariantType_VT_NULL VariantType = 6 + VariantType_VT_BLOB VariantType = 7 +) + +var VariantType_name = map[int32]string{ + 0: "VT_INTEGER", + 1: "VT_DOUBLE", + 3: "VT_STRING", + 6: "VT_NULL", + 7: "VT_BLOB", +} +var VariantType_value = map[string]int32{ + "VT_INTEGER": 0, + "VT_DOUBLE": 1, + "VT_STRING": 3, + "VT_NULL": 6, + "VT_BLOB": 7, +} + +func (x VariantType) Enum() *VariantType { + p := new(VariantType) + *p = x + return p +} +func (x VariantType) String() string { + return proto.EnumName(VariantType_name, int32(x)) +} +func (x *VariantType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(VariantType_value, data, "VariantType") + if err != nil { + return err + } + *x = VariantType(value) + return nil +} +func (VariantType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +type FilterType int32 + +const ( + FilterType_FT_SINGLE_COLUMN_VALUE FilterType = 1 + FilterType_FT_COMPOSITE_COLUMN_VALUE FilterType = 2 + FilterType_FT_COLUMN_PAGINATION FilterType = 3 +) + +var FilterType_name = map[int32]string{ + 1: "FT_SINGLE_COLUMN_VALUE", + 2: "FT_COMPOSITE_COLUMN_VALUE", + 3: "FT_COLUMN_PAGINATION", +} +var FilterType_value = map[string]int32{ + "FT_SINGLE_COLUMN_VALUE": 1, + "FT_COMPOSITE_COLUMN_VALUE": 2, + "FT_COLUMN_PAGINATION": 3, +} + +func (x FilterType) Enum() *FilterType { + p := new(FilterType) + *p = x + return p +} +func (x FilterType) String() string { + return proto.EnumName(FilterType_name, int32(x)) +} +func (x *FilterType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FilterType_value, data, "FilterType") + if err != nil { + return err + } + *x = FilterType(value) + return nil +} +func (FilterType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +type ComparatorType int32 + +const ( + ComparatorType_CT_EQUAL ComparatorType = 1 + ComparatorType_CT_NOT_EQUAL ComparatorType = 2 + ComparatorType_CT_GREATER_THAN ComparatorType = 3 + ComparatorType_CT_GREATER_EQUAL ComparatorType = 4 + ComparatorType_CT_LESS_THAN ComparatorType = 5 + ComparatorType_CT_LESS_EQUAL ComparatorType = 6 +) + +var ComparatorType_name = map[int32]string{ + 1: "CT_EQUAL", + 2: "CT_NOT_EQUAL", + 3: "CT_GREATER_THAN", + 4: "CT_GREATER_EQUAL", + 5: "CT_LESS_THAN", + 6: "CT_LESS_EQUAL", +} +var ComparatorType_value = map[string]int32{ + "CT_EQUAL": 1, + "CT_NOT_EQUAL": 2, + "CT_GREATER_THAN": 3, + "CT_GREATER_EQUAL": 4, + "CT_LESS_THAN": 5, + "CT_LESS_EQUAL": 6, +} + +func (x ComparatorType) Enum() *ComparatorType { + p := new(ComparatorType) + *p = x + return p +} +func (x ComparatorType) String() string { + return proto.EnumName(ComparatorType_name, int32(x)) +} +func (x *ComparatorType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ComparatorType_value, data, "ComparatorType") + if err != nil { + return err + } + *x = ComparatorType(value) + return nil +} +func (ComparatorType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +type LogicalOperator int32 + +const ( + LogicalOperator_LO_NOT LogicalOperator = 1 + LogicalOperator_LO_AND LogicalOperator = 2 + LogicalOperator_LO_OR LogicalOperator = 3 +) + +var LogicalOperator_name = map[int32]string{ + 1: "LO_NOT", + 2: "LO_AND", + 3: "LO_OR", +} +var LogicalOperator_value = map[string]int32{ + "LO_NOT": 1, + "LO_AND": 2, + "LO_OR": 3, +} + +func (x LogicalOperator) Enum() *LogicalOperator { + p := new(LogicalOperator) + *p = x + return p +} +func (x LogicalOperator) String() string { + return proto.EnumName(LogicalOperator_name, int32(x)) +} +func (x *LogicalOperator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(LogicalOperator_value, data, "LogicalOperator") + if err != nil { + return err + } + *x = LogicalOperator(value) + return nil +} +func (LogicalOperator) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +type ValueTransferRule struct { + Regex *string `protobuf:"bytes,1,req,name=regex" json:"regex,omitempty"` + CastType *VariantType `protobuf:"varint,2,opt,name=cast_type,enum=otsprotocol.VariantType" json:"cast_type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ValueTransferRule) Reset() { *m = ValueTransferRule{} } +func (m *ValueTransferRule) String() string { return proto.CompactTextString(m) } +func (*ValueTransferRule) ProtoMessage() {} +func (*ValueTransferRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *ValueTransferRule) GetRegex() string { + if m != nil && m.Regex != nil { + return *m.Regex + } + return "" +} + +func (m *ValueTransferRule) GetCastType() VariantType { + if m != nil && m.CastType != nil { + return *m.CastType + } + return VariantType_VT_INTEGER +} + +type SingleColumnValueFilter struct { + Comparator *ComparatorType `protobuf:"varint,1,req,name=comparator,enum=otsprotocol.ComparatorType" json:"comparator,omitempty"` + ColumnName *string `protobuf:"bytes,2,req,name=column_name" json:"column_name,omitempty"` + ColumnValue []byte `protobuf:"bytes,3,req,name=column_value" json:"column_value,omitempty"` + FilterIfMissing *bool `protobuf:"varint,4,req,name=filter_if_missing" json:"filter_if_missing,omitempty"` + LatestVersionOnly *bool `protobuf:"varint,5,req,name=latest_version_only" json:"latest_version_only,omitempty"` + ValueTransRule *ValueTransferRule `protobuf:"bytes,6,opt,name=value_trans_rule" json:"value_trans_rule,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SingleColumnValueFilter) Reset() { *m = SingleColumnValueFilter{} } +func (m *SingleColumnValueFilter) String() string { return proto.CompactTextString(m) } +func (*SingleColumnValueFilter) ProtoMessage() {} +func (*SingleColumnValueFilter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *SingleColumnValueFilter) GetComparator() ComparatorType { + if m != nil && m.Comparator != nil { + return *m.Comparator + } + return ComparatorType_CT_EQUAL +} + +func (m *SingleColumnValueFilter) GetColumnName() string { + if m != nil && m.ColumnName != nil { + return *m.ColumnName + } + return "" +} + +func (m *SingleColumnValueFilter) GetColumnValue() []byte { + if m != nil { + return m.ColumnValue + } + return nil +} + +func (m *SingleColumnValueFilter) GetFilterIfMissing() bool { + if m != nil && m.FilterIfMissing != nil { + return *m.FilterIfMissing + } + return false +} + +func (m *SingleColumnValueFilter) GetLatestVersionOnly() bool { + if m != nil && m.LatestVersionOnly != nil { + return *m.LatestVersionOnly + } + return false +} + +func (m *SingleColumnValueFilter) GetValueTransRule() *ValueTransferRule { + if m != nil { + return m.ValueTransRule + } + return nil +} + +type CompositeColumnValueFilter struct { + Combinator *LogicalOperator `protobuf:"varint,1,req,name=combinator,enum=otsprotocol.LogicalOperator" json:"combinator,omitempty"` + SubFilters []*Filter `protobuf:"bytes,2,rep,name=sub_filters" json:"sub_filters,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeColumnValueFilter) Reset() { *m = CompositeColumnValueFilter{} } +func (m *CompositeColumnValueFilter) String() string { return proto.CompactTextString(m) } +func (*CompositeColumnValueFilter) ProtoMessage() {} +func (*CompositeColumnValueFilter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *CompositeColumnValueFilter) GetCombinator() LogicalOperator { + if m != nil && m.Combinator != nil { + return *m.Combinator + } + return LogicalOperator_LO_NOT +} + +func (m *CompositeColumnValueFilter) GetSubFilters() []*Filter { + if m != nil { + return m.SubFilters + } + return nil +} + +type ColumnPaginationFilter struct { + Offset *int32 `protobuf:"varint,1,req,name=offset" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ColumnPaginationFilter) Reset() { *m = ColumnPaginationFilter{} } +func (m *ColumnPaginationFilter) String() string { return proto.CompactTextString(m) } +func (*ColumnPaginationFilter) ProtoMessage() {} +func (*ColumnPaginationFilter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *ColumnPaginationFilter) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *ColumnPaginationFilter) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +type Filter struct { + Type *FilterType `protobuf:"varint,1,req,name=type,enum=otsprotocol.FilterType" json:"type,omitempty"` + Filter []byte `protobuf:"bytes,2,req,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} +func (*Filter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *Filter) GetType() FilterType { + if m != nil && m.Type != nil { + return *m.Type + } + return FilterType_FT_SINGLE_COLUMN_VALUE +} + +func (m *Filter) GetFilter() []byte { + if m != nil { + return m.Filter + } + return nil +} + +func init() { + proto.RegisterType((*ValueTransferRule)(nil), "otsprotocol.ValueTransferRule") + proto.RegisterType((*SingleColumnValueFilter)(nil), "otsprotocol.SingleColumnValueFilter") + proto.RegisterType((*CompositeColumnValueFilter)(nil), "otsprotocol.CompositeColumnValueFilter") + proto.RegisterType((*ColumnPaginationFilter)(nil), "otsprotocol.ColumnPaginationFilter") + proto.RegisterType((*Filter)(nil), "otsprotocol.Filter") + proto.RegisterEnum("otsprotocol.VariantType", VariantType_name, VariantType_value) + proto.RegisterEnum("otsprotocol.FilterType", FilterType_name, FilterType_value) + proto.RegisterEnum("otsprotocol.ComparatorType", ComparatorType_name, ComparatorType_value) + proto.RegisterEnum("otsprotocol.LogicalOperator", LogicalOperator_name, LogicalOperator_value) +} + +func init() { proto.RegisterFile("ots_filter.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 585 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x92, 0x51, 0x6b, 0xdb, 0x30, + 0x14, 0x85, 0x67, 0xa7, 0x71, 0x9b, 0xeb, 0x34, 0x55, 0x95, 0xd2, 0xba, 0xed, 0x36, 0x42, 0x60, + 0x60, 0x32, 0xe8, 0x46, 0x18, 0x6c, 0x6f, 0xc3, 0x75, 0xdd, 0x2c, 0xe0, 0xda, 0x5d, 0xa2, 0xf8, + 0x55, 0xb8, 0x41, 0x09, 0x02, 0xc7, 0x0a, 0x96, 0x52, 0xda, 0xb7, 0xfd, 0xdb, 0xfd, 0x8d, 0x61, + 0xd9, 0x1d, 0x69, 0xe8, 0x9b, 0x75, 0xef, 0xd5, 0x77, 0xee, 0xd1, 0x31, 0x20, 0xa1, 0x24, 0x5d, + 0xf0, 0x4c, 0xb1, 0xe2, 0x6a, 0x5d, 0x08, 0x25, 0xb0, 0x2d, 0x94, 0xd4, 0x5f, 0x73, 0x91, 0xf5, + 0x63, 0x38, 0x4e, 0xd2, 0x6c, 0xc3, 0x48, 0x91, 0xe6, 0x72, 0xc1, 0x8a, 0xc9, 0x26, 0x63, 0xf8, + 0x10, 0x9a, 0x05, 0x5b, 0xb2, 0x27, 0xc7, 0xe8, 0x99, 0x6e, 0x0b, 0x7f, 0x86, 0xd6, 0x3c, 0x95, + 0x8a, 0xaa, 0xe7, 0x35, 0x73, 0xcc, 0x9e, 0xe1, 0x76, 0x86, 0xce, 0xd5, 0x16, 0xe4, 0x2a, 0x49, + 0x0b, 0x9e, 0xe6, 0x8a, 0x3c, 0xaf, 0x59, 0xff, 0xaf, 0x01, 0x67, 0x53, 0x9e, 0x2f, 0x33, 0xe6, + 0x8b, 0x6c, 0xb3, 0xca, 0x35, 0xfd, 0x56, 0xeb, 0xe3, 0x2f, 0x00, 0x73, 0xb1, 0x5a, 0xa7, 0x45, + 0xaa, 0x44, 0xa1, 0xe1, 0x9d, 0xe1, 0xe5, 0x2b, 0x92, 0xff, 0xbf, 0x5d, 0xc2, 0x70, 0x17, 0xec, + 0xb9, 0xa6, 0xd0, 0x3c, 0x5d, 0x95, 0xda, 0xe5, 0x3a, 0x27, 0xd0, 0xae, 0x8b, 0x8f, 0x25, 0xdb, + 0x69, 0xf4, 0x4c, 0xb7, 0x8d, 0xcf, 0xe1, 0xb8, 0x72, 0x49, 0xf9, 0x82, 0xae, 0xb8, 0x94, 0x3c, + 0x5f, 0x3a, 0x7b, 0x3d, 0xd3, 0x3d, 0xc0, 0x97, 0xd0, 0xcd, 0x52, 0xc5, 0xa4, 0xa2, 0x8f, 0xac, + 0x90, 0x5c, 0xe4, 0x54, 0xe4, 0xd9, 0xb3, 0xd3, 0xd4, 0xcd, 0x1f, 0x80, 0x34, 0x86, 0xaa, 0xf2, + 0x05, 0x68, 0xb1, 0xc9, 0x98, 0x63, 0xf5, 0x0c, 0xd7, 0x1e, 0x7e, 0xdc, 0xf1, 0xb8, 0xf3, 0x4a, + 0xfd, 0x27, 0xb8, 0x28, 0xd7, 0x15, 0x92, 0xab, 0x37, 0xbc, 0x7e, 0xd5, 0x5e, 0x1f, 0x78, 0xbe, + 0xe5, 0xf5, 0xfd, 0x2b, 0x62, 0x28, 0x96, 0x7c, 0x9e, 0x66, 0xf1, 0x9a, 0x69, 0xc3, 0xd8, 0x05, + 0x5b, 0x6e, 0x1e, 0xea, 0xac, 0xa4, 0x63, 0xf6, 0x1a, 0xae, 0x3d, 0xec, 0xbe, 0xba, 0x52, 0xb1, + 0xfb, 0xdf, 0xe1, 0xb4, 0x12, 0xbc, 0x4f, 0x97, 0xa5, 0x00, 0x17, 0x79, 0xad, 0xda, 0x01, 0x4b, + 0x2c, 0x16, 0x92, 0x29, 0xad, 0xd8, 0x2c, 0x93, 0xcc, 0xf8, 0x8a, 0x2b, 0xfd, 0x74, 0xcd, 0xfe, + 0x4f, 0xb0, 0xea, 0xc1, 0x4f, 0xb0, 0xa7, 0xe3, 0xac, 0x16, 0x3b, 0x7b, 0x43, 0x45, 0x07, 0xd0, + 0x01, 0xab, 0xda, 0x47, 0x03, 0xda, 0x83, 0x19, 0xd8, 0x5b, 0x61, 0xe3, 0x0e, 0x40, 0x42, 0xe8, + 0x38, 0x22, 0xc1, 0x28, 0x98, 0xa0, 0x77, 0xf8, 0x10, 0x5a, 0x09, 0xa1, 0x37, 0xf1, 0xec, 0x3a, + 0x0c, 0x90, 0x51, 0x1f, 0xa7, 0x64, 0x32, 0x8e, 0x46, 0xa8, 0x81, 0x6d, 0xd8, 0x4f, 0x08, 0x8d, + 0x66, 0x61, 0x88, 0xac, 0xfa, 0x70, 0x1d, 0xc6, 0xd7, 0x68, 0x7f, 0x90, 0x02, 0x6c, 0x89, 0x5e, + 0xc0, 0xe9, 0x2d, 0xa1, 0xd3, 0x71, 0x34, 0x0a, 0x03, 0xea, 0xc7, 0xe1, 0xec, 0x2e, 0xa2, 0x89, + 0x17, 0xce, 0x4a, 0xe4, 0x07, 0x38, 0xbf, 0x25, 0xd4, 0x8f, 0xef, 0xee, 0xe3, 0xe9, 0x98, 0xec, + 0xb4, 0x4d, 0xec, 0xc0, 0x89, 0x6e, 0xeb, 0xe2, 0xbd, 0x37, 0x1a, 0x47, 0x1e, 0x19, 0xc7, 0x11, + 0x6a, 0x0c, 0xfe, 0x18, 0xd0, 0xd9, 0xf9, 0xbb, 0xda, 0x70, 0xe0, 0x13, 0x1a, 0xfc, 0x9e, 0x79, + 0x21, 0x32, 0x30, 0x82, 0xb6, 0x4f, 0x68, 0x14, 0xbf, 0x54, 0x4c, 0xdc, 0x85, 0x23, 0x9f, 0xd0, + 0xd1, 0x24, 0xf0, 0x48, 0x30, 0xa1, 0xe4, 0x97, 0x17, 0xa1, 0x06, 0x3e, 0x01, 0xb4, 0x55, 0xac, + 0x46, 0xf7, 0xea, 0xcb, 0x61, 0x30, 0x9d, 0x56, 0x73, 0x4d, 0x7c, 0x0c, 0x87, 0x2f, 0x95, 0x6a, + 0xc8, 0x1a, 0x7c, 0x83, 0xa3, 0xdd, 0xcc, 0x01, 0xac, 0x30, 0x2e, 0x45, 0x91, 0x51, 0x7f, 0x7b, + 0xd1, 0x0d, 0x32, 0x71, 0x0b, 0x9a, 0x61, 0x4c, 0xe3, 0x09, 0x6a, 0xfc, 0x0b, 0x00, 0x00, 0xff, + 0xff, 0xb3, 0x10, 0x19, 0xa7, 0xc1, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.proto b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.proto new file mode 100644 index 000000000000..172d6b47a1b9 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.proto @@ -0,0 +1,61 @@ +syntax = "proto2"; +package otsprotocol; + +enum VariantType { + VT_INTEGER = 0; + VT_DOUBLE = 1; + //VT_BOOLEAN = 2; + VT_STRING = 3; + VT_NULL = 6; + VT_BLOB = 7; +} + +message ValueTransferRule { + required string regex = 1; + optional VariantType cast_type = 2; +} + +enum FilterType { + FT_SINGLE_COLUMN_VALUE = 1; + FT_COMPOSITE_COLUMN_VALUE = 2; + FT_COLUMN_PAGINATION = 3; +} + +enum ComparatorType { + CT_EQUAL = 1; + CT_NOT_EQUAL = 2; + CT_GREATER_THAN = 3; + CT_GREATER_EQUAL = 4; + CT_LESS_THAN = 5; + CT_LESS_EQUAL = 6; +} + +message SingleColumnValueFilter { + required ComparatorType comparator = 1; + required string column_name = 2; + required bytes column_value = 3; // Serialized SQLVariant + required bool filter_if_missing = 4; + required bool latest_version_only = 5; + optional ValueTransferRule value_trans_rule = 6; +} + +enum LogicalOperator { + LO_NOT = 1; + LO_AND = 2; + LO_OR = 3; +} + +message CompositeColumnValueFilter { + required LogicalOperator combinator = 1; + repeated Filter sub_filters = 2; +} + +message ColumnPaginationFilter { + required int32 offset = 1; + required int32 limit = 2; +} + +message Filter { + required FilterType type = 1; + required bytes filter = 2; // Serialized string of filter of the type +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.pb.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.pb.go new file mode 100644 index 000000000000..2f327b1e0356 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.pb.go @@ -0,0 +1,1997 @@ +// Code generated by protoc-gen-go. +// source: search.proto +// DO NOT EDIT! + +/* +Package otsprotocol is a generated protocol buffer package. + +It is generated from these files: + search.proto + ots_filter.proto + table_store.proto + +It has these top-level messages: + MatchQuery + MatchPhraseQuery + MatchAllQuery + TermQuery + TermsQuery + RangeQuery + PrefixQuery + WildcardQuery + BoolQuery + ConstScoreQuery + FieldValueFactor + FunctionScoreQuery + NestedQuery + GeoBoundingBoxQuery + GeoDistanceQuery + GeoPolygonQuery + Query + Collapse + NestedFilter + ScoreSort + FieldSort + GeoDistanceSort + PrimaryKeySort + Sorter + Sort + SearchQuery + ColumnsToGet + SearchRequest + SearchResponse + FieldSchema + IndexSchema + IndexSetting + CreateSearchIndexRequest + CreateSearchIndexResponse + IndexInfo + ListSearchIndexRequest + ListSearchIndexResponse + DeleteSearchIndexRequest + DeleteSearchIndexResponse + SyncStat + DescribeSearchIndexRequest + DescribeSearchIndexResponse + ValueTransferRule + SingleColumnValueFilter + CompositeColumnValueFilter + ColumnPaginationFilter + Filter + Error + PrimaryKeySchema + PartitionRange + TableOptions + TableMeta + Condition + CapacityUnit + ReservedThroughputDetails + ReservedThroughput + ConsumedCapacity + StreamSpecification + StreamDetails + CreateTableRequest + CreateTableResponse + UpdateTableRequest + UpdateTableResponse + DescribeTableRequest + DescribeTableResponse + ListTableRequest + ListTableResponse + DeleteTableRequest + DeleteTableResponse + LoadTableRequest + LoadTableResponse + UnloadTableRequest + UnloadTableResponse + TimeRange + ReturnContent + GetRowRequest + GetRowResponse + UpdateRowRequest + UpdateRowResponse + PutRowRequest + PutRowResponse + DeleteRowRequest + DeleteRowResponse + TableInBatchGetRowRequest + BatchGetRowRequest + RowInBatchGetRowResponse + TableInBatchGetRowResponse + BatchGetRowResponse + RowInBatchWriteRowRequest + TableInBatchWriteRowRequest + BatchWriteRowRequest + RowInBatchWriteRowResponse + TableInBatchWriteRowResponse + BatchWriteRowResponse + GetRangeRequest + GetRangeResponse + ListStreamRequest + Stream + ListStreamResponse + StreamShard + DescribeStreamRequest + DescribeStreamResponse + GetShardIteratorRequest + GetShardIteratorResponse + GetStreamRecordRequest + GetStreamRecordResponse + ComputeSplitPointsBySizeRequest + ComputeSplitPointsBySizeResponse + DefinedColumnSchema + IndexMeta + CreateIndexRequest + CreateIndexResponse + DropIndexRequest + DropIndexResponse + StartLocalTransactionRequest + StartLocalTransactionResponse + CommitTransactionRequest + CommitTransactionResponse + AbortTransactionRequest + AbortTransactionResponse +*/ +package otsprotocol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type QueryType int32 + +const ( + QueryType_MATCH_QUERY QueryType = 1 + QueryType_MATCH_PHRASE_QUERY QueryType = 2 + QueryType_TERM_QUERY QueryType = 3 + QueryType_RANGE_QUERY QueryType = 4 + QueryType_PREFIX_QUERY QueryType = 5 + QueryType_BOOL_QUERY QueryType = 6 + QueryType_CONST_SCORE_QUERY QueryType = 7 + QueryType_FUNCTION_SCORE_QUERY QueryType = 8 + QueryType_NESTED_QUERY QueryType = 9 + QueryType_WILDCARD_QUERY QueryType = 10 + QueryType_MATCH_ALL_QUERY QueryType = 11 + QueryType_GEO_BOUNDING_BOX_QUERY QueryType = 12 + QueryType_GEO_DISTANCE_QUERY QueryType = 13 + QueryType_GEO_POLYGON_QUERY QueryType = 14 + QueryType_TERMS_QUERY QueryType = 15 +) + +var QueryType_name = map[int32]string{ + 1: "MATCH_QUERY", + 2: "MATCH_PHRASE_QUERY", + 3: "TERM_QUERY", + 4: "RANGE_QUERY", + 5: "PREFIX_QUERY", + 6: "BOOL_QUERY", + 7: "CONST_SCORE_QUERY", + 8: "FUNCTION_SCORE_QUERY", + 9: "NESTED_QUERY", + 10: "WILDCARD_QUERY", + 11: "MATCH_ALL_QUERY", + 12: "GEO_BOUNDING_BOX_QUERY", + 13: "GEO_DISTANCE_QUERY", + 14: "GEO_POLYGON_QUERY", + 15: "TERMS_QUERY", +} +var QueryType_value = map[string]int32{ + "MATCH_QUERY": 1, + "MATCH_PHRASE_QUERY": 2, + "TERM_QUERY": 3, + "RANGE_QUERY": 4, + "PREFIX_QUERY": 5, + "BOOL_QUERY": 6, + "CONST_SCORE_QUERY": 7, + "FUNCTION_SCORE_QUERY": 8, + "NESTED_QUERY": 9, + "WILDCARD_QUERY": 10, + "MATCH_ALL_QUERY": 11, + "GEO_BOUNDING_BOX_QUERY": 12, + "GEO_DISTANCE_QUERY": 13, + "GEO_POLYGON_QUERY": 14, + "TERMS_QUERY": 15, +} + +func (x QueryType) Enum() *QueryType { + p := new(QueryType) + *p = x + return p +} +func (x QueryType) String() string { + return proto.EnumName(QueryType_name, int32(x)) +} +func (x *QueryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(QueryType_value, data, "QueryType") + if err != nil { + return err + } + *x = QueryType(value) + return nil +} +func (QueryType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type QueryOperator int32 + +const ( + QueryOperator_OR QueryOperator = 1 + QueryOperator_AND QueryOperator = 2 +) + +var QueryOperator_name = map[int32]string{ + 1: "OR", + 2: "AND", +} +var QueryOperator_value = map[string]int32{ + "OR": 1, + "AND": 2, +} + +func (x QueryOperator) Enum() *QueryOperator { + p := new(QueryOperator) + *p = x + return p +} +func (x QueryOperator) String() string { + return proto.EnumName(QueryOperator_name, int32(x)) +} +func (x *QueryOperator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(QueryOperator_value, data, "QueryOperator") + if err != nil { + return err + } + *x = QueryOperator(value) + return nil +} +func (QueryOperator) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type ScoreMode int32 + +const ( + ScoreMode_SCORE_MODE_NONE ScoreMode = 1 + ScoreMode_SCORE_MODE_AVG ScoreMode = 2 + ScoreMode_SCORE_MODE_MAX ScoreMode = 3 + ScoreMode_SCORE_MODE_TOTAL ScoreMode = 4 + ScoreMode_SCORE_MODE_MIN ScoreMode = 5 +) + +var ScoreMode_name = map[int32]string{ + 1: "SCORE_MODE_NONE", + 2: "SCORE_MODE_AVG", + 3: "SCORE_MODE_MAX", + 4: "SCORE_MODE_TOTAL", + 5: "SCORE_MODE_MIN", +} +var ScoreMode_value = map[string]int32{ + "SCORE_MODE_NONE": 1, + "SCORE_MODE_AVG": 2, + "SCORE_MODE_MAX": 3, + "SCORE_MODE_TOTAL": 4, + "SCORE_MODE_MIN": 5, +} + +func (x ScoreMode) Enum() *ScoreMode { + p := new(ScoreMode) + *p = x + return p +} +func (x ScoreMode) String() string { + return proto.EnumName(ScoreMode_name, int32(x)) +} +func (x *ScoreMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ScoreMode_value, data, "ScoreMode") + if err != nil { + return err + } + *x = ScoreMode(value) + return nil +} +func (ScoreMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type SortOrder int32 + +const ( + SortOrder_SORT_ORDER_ASC SortOrder = 0 + SortOrder_SORT_ORDER_DESC SortOrder = 1 +) + +var SortOrder_name = map[int32]string{ + 0: "SORT_ORDER_ASC", + 1: "SORT_ORDER_DESC", +} +var SortOrder_value = map[string]int32{ + "SORT_ORDER_ASC": 0, + "SORT_ORDER_DESC": 1, +} + +func (x SortOrder) Enum() *SortOrder { + p := new(SortOrder) + *p = x + return p +} +func (x SortOrder) String() string { + return proto.EnumName(SortOrder_name, int32(x)) +} +func (x *SortOrder) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SortOrder_value, data, "SortOrder") + if err != nil { + return err + } + *x = SortOrder(value) + return nil +} +func (SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type SortMode int32 + +const ( + SortMode_SORT_MODE_MIN SortMode = 0 + SortMode_SORT_MODE_MAX SortMode = 1 + SortMode_SORT_MODE_AVG SortMode = 2 +) + +var SortMode_name = map[int32]string{ + 0: "SORT_MODE_MIN", + 1: "SORT_MODE_MAX", + 2: "SORT_MODE_AVG", +} +var SortMode_value = map[string]int32{ + "SORT_MODE_MIN": 0, + "SORT_MODE_MAX": 1, + "SORT_MODE_AVG": 2, +} + +func (x SortMode) Enum() *SortMode { + p := new(SortMode) + *p = x + return p +} +func (x SortMode) String() string { + return proto.EnumName(SortMode_name, int32(x)) +} +func (x *SortMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SortMode_value, data, "SortMode") + if err != nil { + return err + } + *x = SortMode(value) + return nil +} +func (SortMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type GeoDistanceType int32 + +const ( + GeoDistanceType_GEO_DISTANCE_ARC GeoDistanceType = 0 + GeoDistanceType_GEO_DISTANCE_PLANE GeoDistanceType = 1 +) + +var GeoDistanceType_name = map[int32]string{ + 0: "GEO_DISTANCE_ARC", + 1: "GEO_DISTANCE_PLANE", +} +var GeoDistanceType_value = map[string]int32{ + "GEO_DISTANCE_ARC": 0, + "GEO_DISTANCE_PLANE": 1, +} + +func (x GeoDistanceType) Enum() *GeoDistanceType { + p := new(GeoDistanceType) + *p = x + return p +} +func (x GeoDistanceType) String() string { + return proto.EnumName(GeoDistanceType_name, int32(x)) +} +func (x *GeoDistanceType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GeoDistanceType_value, data, "GeoDistanceType") + if err != nil { + return err + } + *x = GeoDistanceType(value) + return nil +} +func (GeoDistanceType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type ColumnReturnType int32 + +const ( + ColumnReturnType_RETURN_ALL ColumnReturnType = 1 + ColumnReturnType_RETURN_SPECIFIED ColumnReturnType = 2 + ColumnReturnType_RETURN_NONE ColumnReturnType = 3 +) + +var ColumnReturnType_name = map[int32]string{ + 1: "RETURN_ALL", + 2: "RETURN_SPECIFIED", + 3: "RETURN_NONE", +} +var ColumnReturnType_value = map[string]int32{ + "RETURN_ALL": 1, + "RETURN_SPECIFIED": 2, + "RETURN_NONE": 3, +} + +func (x ColumnReturnType) Enum() *ColumnReturnType { + p := new(ColumnReturnType) + *p = x + return p +} +func (x ColumnReturnType) String() string { + return proto.EnumName(ColumnReturnType_name, int32(x)) +} +func (x *ColumnReturnType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ColumnReturnType_value, data, "ColumnReturnType") + if err != nil { + return err + } + *x = ColumnReturnType(value) + return nil +} +func (ColumnReturnType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type IndexOptions int32 + +const ( + IndexOptions_DOCS IndexOptions = 1 + IndexOptions_FREQS IndexOptions = 2 + IndexOptions_POSITIONS IndexOptions = 3 + IndexOptions_OFFSETS IndexOptions = 4 +) + +var IndexOptions_name = map[int32]string{ + 1: "DOCS", + 2: "FREQS", + 3: "POSITIONS", + 4: "OFFSETS", +} +var IndexOptions_value = map[string]int32{ + "DOCS": 1, + "FREQS": 2, + "POSITIONS": 3, + "OFFSETS": 4, +} + +func (x IndexOptions) Enum() *IndexOptions { + p := new(IndexOptions) + *p = x + return p +} +func (x IndexOptions) String() string { + return proto.EnumName(IndexOptions_name, int32(x)) +} +func (x *IndexOptions) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexOptions_value, data, "IndexOptions") + if err != nil { + return err + } + *x = IndexOptions(value) + return nil +} +func (IndexOptions) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type FieldType int32 + +const ( + FieldType_LONG FieldType = 1 + FieldType_DOUBLE FieldType = 2 + FieldType_BOOLEAN FieldType = 3 + FieldType_KEYWORD FieldType = 4 + FieldType_TEXT FieldType = 5 + FieldType_NESTED FieldType = 6 + FieldType_GEO_POINT FieldType = 7 +) + +var FieldType_name = map[int32]string{ + 1: "LONG", + 2: "DOUBLE", + 3: "BOOLEAN", + 4: "KEYWORD", + 5: "TEXT", + 6: "NESTED", + 7: "GEO_POINT", +} +var FieldType_value = map[string]int32{ + "LONG": 1, + "DOUBLE": 2, + "BOOLEAN": 3, + "KEYWORD": 4, + "TEXT": 5, + "NESTED": 6, + "GEO_POINT": 7, +} + +func (x FieldType) Enum() *FieldType { + p := new(FieldType) + *p = x + return p +} +func (x FieldType) String() string { + return proto.EnumName(FieldType_name, int32(x)) +} +func (x *FieldType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldType_value, data, "FieldType") + if err != nil { + return err + } + *x = FieldType(value) + return nil +} +func (FieldType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +type SyncPhase int32 + +const ( + SyncPhase_FULL SyncPhase = 1 + SyncPhase_INCR SyncPhase = 2 +) + +var SyncPhase_name = map[int32]string{ + 1: "FULL", + 2: "INCR", +} +var SyncPhase_value = map[string]int32{ + "FULL": 1, + "INCR": 2, +} + +func (x SyncPhase) Enum() *SyncPhase { + p := new(SyncPhase) + *p = x + return p +} +func (x SyncPhase) String() string { + return proto.EnumName(SyncPhase_name, int32(x)) +} +func (x *SyncPhase) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SyncPhase_value, data, "SyncPhase") + if err != nil { + return err + } + *x = SyncPhase(value) + return nil +} +func (SyncPhase) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +type MatchQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Text *string `protobuf:"bytes,2,opt,name=text" json:"text,omitempty"` + MinimumShouldMatch *int32 `protobuf:"varint,3,opt,name=minimum_should_match" json:"minimum_should_match,omitempty"` + Operator *QueryOperator `protobuf:"varint,4,opt,name=operator,enum=otsprotocol.QueryOperator" json:"operator,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MatchQuery) Reset() { *m = MatchQuery{} } +func (m *MatchQuery) String() string { return proto.CompactTextString(m) } +func (*MatchQuery) ProtoMessage() {} +func (*MatchQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *MatchQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *MatchQuery) GetText() string { + if m != nil && m.Text != nil { + return *m.Text + } + return "" +} + +func (m *MatchQuery) GetMinimumShouldMatch() int32 { + if m != nil && m.MinimumShouldMatch != nil { + return *m.MinimumShouldMatch + } + return 0 +} + +func (m *MatchQuery) GetOperator() QueryOperator { + if m != nil && m.Operator != nil { + return *m.Operator + } + return QueryOperator_OR +} + +type MatchPhraseQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Text *string `protobuf:"bytes,2,opt,name=text" json:"text,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MatchPhraseQuery) Reset() { *m = MatchPhraseQuery{} } +func (m *MatchPhraseQuery) String() string { return proto.CompactTextString(m) } +func (*MatchPhraseQuery) ProtoMessage() {} +func (*MatchPhraseQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *MatchPhraseQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *MatchPhraseQuery) GetText() string { + if m != nil && m.Text != nil { + return *m.Text + } + return "" +} + +type MatchAllQuery struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MatchAllQuery) Reset() { *m = MatchAllQuery{} } +func (m *MatchAllQuery) String() string { return proto.CompactTextString(m) } +func (*MatchAllQuery) ProtoMessage() {} +func (*MatchAllQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type TermQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Term []byte `protobuf:"bytes,2,opt,name=term" json:"term,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TermQuery) Reset() { *m = TermQuery{} } +func (m *TermQuery) String() string { return proto.CompactTextString(m) } +func (*TermQuery) ProtoMessage() {} +func (*TermQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *TermQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *TermQuery) GetTerm() []byte { + if m != nil { + return m.Term + } + return nil +} + +type TermsQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Terms [][]byte `protobuf:"bytes,2,rep,name=terms" json:"terms,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TermsQuery) Reset() { *m = TermsQuery{} } +func (m *TermsQuery) String() string { return proto.CompactTextString(m) } +func (*TermsQuery) ProtoMessage() {} +func (*TermsQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *TermsQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *TermsQuery) GetTerms() [][]byte { + if m != nil { + return m.Terms + } + return nil +} + +type RangeQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + RangeFrom []byte `protobuf:"bytes,2,opt,name=range_from" json:"range_from,omitempty"` + RangeTo []byte `protobuf:"bytes,3,opt,name=range_to" json:"range_to,omitempty"` + IncludeLower *bool `protobuf:"varint,4,opt,name=include_lower" json:"include_lower,omitempty"` + IncludeUpper *bool `protobuf:"varint,5,opt,name=include_upper" json:"include_upper,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RangeQuery) Reset() { *m = RangeQuery{} } +func (m *RangeQuery) String() string { return proto.CompactTextString(m) } +func (*RangeQuery) ProtoMessage() {} +func (*RangeQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *RangeQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *RangeQuery) GetRangeFrom() []byte { + if m != nil { + return m.RangeFrom + } + return nil +} + +func (m *RangeQuery) GetRangeTo() []byte { + if m != nil { + return m.RangeTo + } + return nil +} + +func (m *RangeQuery) GetIncludeLower() bool { + if m != nil && m.IncludeLower != nil { + return *m.IncludeLower + } + return false +} + +func (m *RangeQuery) GetIncludeUpper() bool { + if m != nil && m.IncludeUpper != nil { + return *m.IncludeUpper + } + return false +} + +type PrefixQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Prefix *string `protobuf:"bytes,2,opt,name=prefix" json:"prefix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PrefixQuery) Reset() { *m = PrefixQuery{} } +func (m *PrefixQuery) String() string { return proto.CompactTextString(m) } +func (*PrefixQuery) ProtoMessage() {} +func (*PrefixQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *PrefixQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *PrefixQuery) GetPrefix() string { + if m != nil && m.Prefix != nil { + return *m.Prefix + } + return "" +} + +type WildcardQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WildcardQuery) Reset() { *m = WildcardQuery{} } +func (m *WildcardQuery) String() string { return proto.CompactTextString(m) } +func (*WildcardQuery) ProtoMessage() {} +func (*WildcardQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *WildcardQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *WildcardQuery) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type BoolQuery struct { + MustQueries []*Query `protobuf:"bytes,1,rep,name=must_queries" json:"must_queries,omitempty"` + MustNotQueries []*Query `protobuf:"bytes,2,rep,name=must_not_queries" json:"must_not_queries,omitempty"` + FilterQueries []*Query `protobuf:"bytes,3,rep,name=filter_queries" json:"filter_queries,omitempty"` + ShouldQueries []*Query `protobuf:"bytes,4,rep,name=should_queries" json:"should_queries,omitempty"` + MinimumShouldMatch *int32 `protobuf:"varint,5,opt,name=minimum_should_match" json:"minimum_should_match,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BoolQuery) Reset() { *m = BoolQuery{} } +func (m *BoolQuery) String() string { return proto.CompactTextString(m) } +func (*BoolQuery) ProtoMessage() {} +func (*BoolQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *BoolQuery) GetMustQueries() []*Query { + if m != nil { + return m.MustQueries + } + return nil +} + +func (m *BoolQuery) GetMustNotQueries() []*Query { + if m != nil { + return m.MustNotQueries + } + return nil +} + +func (m *BoolQuery) GetFilterQueries() []*Query { + if m != nil { + return m.FilterQueries + } + return nil +} + +func (m *BoolQuery) GetShouldQueries() []*Query { + if m != nil { + return m.ShouldQueries + } + return nil +} + +func (m *BoolQuery) GetMinimumShouldMatch() int32 { + if m != nil && m.MinimumShouldMatch != nil { + return *m.MinimumShouldMatch + } + return 0 +} + +type ConstScoreQuery struct { + Filter *Query `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConstScoreQuery) Reset() { *m = ConstScoreQuery{} } +func (m *ConstScoreQuery) String() string { return proto.CompactTextString(m) } +func (*ConstScoreQuery) ProtoMessage() {} +func (*ConstScoreQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *ConstScoreQuery) GetFilter() *Query { + if m != nil { + return m.Filter + } + return nil +} + +type FieldValueFactor struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldValueFactor) Reset() { *m = FieldValueFactor{} } +func (m *FieldValueFactor) String() string { return proto.CompactTextString(m) } +func (*FieldValueFactor) ProtoMessage() {} +func (*FieldValueFactor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *FieldValueFactor) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +type FunctionScoreQuery struct { + Query *Query `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` + FieldValueFactor *FieldValueFactor `protobuf:"bytes,2,opt,name=field_value_factor" json:"field_value_factor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FunctionScoreQuery) Reset() { *m = FunctionScoreQuery{} } +func (m *FunctionScoreQuery) String() string { return proto.CompactTextString(m) } +func (*FunctionScoreQuery) ProtoMessage() {} +func (*FunctionScoreQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *FunctionScoreQuery) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +func (m *FunctionScoreQuery) GetFieldValueFactor() *FieldValueFactor { + if m != nil { + return m.FieldValueFactor + } + return nil +} + +type NestedQuery struct { + Path *string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Query *Query `protobuf:"bytes,2,opt,name=query" json:"query,omitempty"` + ScoreMode *ScoreMode `protobuf:"varint,3,opt,name=score_mode,enum=otsprotocol.ScoreMode" json:"score_mode,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NestedQuery) Reset() { *m = NestedQuery{} } +func (m *NestedQuery) String() string { return proto.CompactTextString(m) } +func (*NestedQuery) ProtoMessage() {} +func (*NestedQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *NestedQuery) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +func (m *NestedQuery) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +func (m *NestedQuery) GetScoreMode() ScoreMode { + if m != nil && m.ScoreMode != nil { + return *m.ScoreMode + } + return ScoreMode_SCORE_MODE_NONE +} + +type GeoBoundingBoxQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + TopLeft *string `protobuf:"bytes,2,opt,name=top_left" json:"top_left,omitempty"` + BottomRight *string `protobuf:"bytes,3,opt,name=bottom_right" json:"bottom_right,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeoBoundingBoxQuery) Reset() { *m = GeoBoundingBoxQuery{} } +func (m *GeoBoundingBoxQuery) String() string { return proto.CompactTextString(m) } +func (*GeoBoundingBoxQuery) ProtoMessage() {} +func (*GeoBoundingBoxQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *GeoBoundingBoxQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *GeoBoundingBoxQuery) GetTopLeft() string { + if m != nil && m.TopLeft != nil { + return *m.TopLeft + } + return "" +} + +func (m *GeoBoundingBoxQuery) GetBottomRight() string { + if m != nil && m.BottomRight != nil { + return *m.BottomRight + } + return "" +} + +type GeoDistanceQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + CenterPoint *string `protobuf:"bytes,2,opt,name=center_point" json:"center_point,omitempty"` + Distance *float64 `protobuf:"fixed64,3,opt,name=distance" json:"distance,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeoDistanceQuery) Reset() { *m = GeoDistanceQuery{} } +func (m *GeoDistanceQuery) String() string { return proto.CompactTextString(m) } +func (*GeoDistanceQuery) ProtoMessage() {} +func (*GeoDistanceQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *GeoDistanceQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *GeoDistanceQuery) GetCenterPoint() string { + if m != nil && m.CenterPoint != nil { + return *m.CenterPoint + } + return "" +} + +func (m *GeoDistanceQuery) GetDistance() float64 { + if m != nil && m.Distance != nil { + return *m.Distance + } + return 0 +} + +type GeoPolygonQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Points []string `protobuf:"bytes,2,rep,name=points" json:"points,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeoPolygonQuery) Reset() { *m = GeoPolygonQuery{} } +func (m *GeoPolygonQuery) String() string { return proto.CompactTextString(m) } +func (*GeoPolygonQuery) ProtoMessage() {} +func (*GeoPolygonQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *GeoPolygonQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *GeoPolygonQuery) GetPoints() []string { + if m != nil { + return m.Points + } + return nil +} + +type Query struct { + Type *QueryType `protobuf:"varint,1,opt,name=type,enum=otsprotocol.QueryType" json:"type,omitempty"` + Query []byte `protobuf:"bytes,2,opt,name=query" json:"query,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *Query) GetType() QueryType { + if m != nil && m.Type != nil { + return *m.Type + } + return QueryType_MATCH_QUERY +} + +func (m *Query) GetQuery() []byte { + if m != nil { + return m.Query + } + return nil +} + +type Collapse struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Collapse) Reset() { *m = Collapse{} } +func (m *Collapse) String() string { return proto.CompactTextString(m) } +func (*Collapse) ProtoMessage() {} +func (*Collapse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *Collapse) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +type NestedFilter struct { + Path *string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Filter *Query `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NestedFilter) Reset() { *m = NestedFilter{} } +func (m *NestedFilter) String() string { return proto.CompactTextString(m) } +func (*NestedFilter) ProtoMessage() {} +func (*NestedFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *NestedFilter) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +func (m *NestedFilter) GetFilter() *Query { + if m != nil { + return m.Filter + } + return nil +} + +type ScoreSort struct { + Order *SortOrder `protobuf:"varint,1,opt,name=order,enum=otsprotocol.SortOrder" json:"order,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ScoreSort) Reset() { *m = ScoreSort{} } +func (m *ScoreSort) String() string { return proto.CompactTextString(m) } +func (*ScoreSort) ProtoMessage() {} +func (*ScoreSort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *ScoreSort) GetOrder() SortOrder { + if m != nil && m.Order != nil { + return *m.Order + } + return SortOrder_SORT_ORDER_ASC +} + +type FieldSort struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Order *SortOrder `protobuf:"varint,2,opt,name=order,enum=otsprotocol.SortOrder" json:"order,omitempty"` + Mode *SortMode `protobuf:"varint,3,opt,name=mode,enum=otsprotocol.SortMode" json:"mode,omitempty"` + NestedFilter *NestedFilter `protobuf:"bytes,4,opt,name=nested_filter" json:"nested_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldSort) Reset() { *m = FieldSort{} } +func (m *FieldSort) String() string { return proto.CompactTextString(m) } +func (*FieldSort) ProtoMessage() {} +func (*FieldSort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *FieldSort) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *FieldSort) GetOrder() SortOrder { + if m != nil && m.Order != nil { + return *m.Order + } + return SortOrder_SORT_ORDER_ASC +} + +func (m *FieldSort) GetMode() SortMode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return SortMode_SORT_MODE_MIN +} + +func (m *FieldSort) GetNestedFilter() *NestedFilter { + if m != nil { + return m.NestedFilter + } + return nil +} + +type GeoDistanceSort struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Points []string `protobuf:"bytes,2,rep,name=points" json:"points,omitempty"` + Order *SortOrder `protobuf:"varint,3,opt,name=order,enum=otsprotocol.SortOrder" json:"order,omitempty"` + Mode *SortMode `protobuf:"varint,4,opt,name=mode,enum=otsprotocol.SortMode" json:"mode,omitempty"` + DistanceType *GeoDistanceType `protobuf:"varint,5,opt,name=distance_type,enum=otsprotocol.GeoDistanceType" json:"distance_type,omitempty"` + NestedFilter *NestedFilter `protobuf:"bytes,6,opt,name=nested_filter" json:"nested_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeoDistanceSort) Reset() { *m = GeoDistanceSort{} } +func (m *GeoDistanceSort) String() string { return proto.CompactTextString(m) } +func (*GeoDistanceSort) ProtoMessage() {} +func (*GeoDistanceSort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *GeoDistanceSort) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *GeoDistanceSort) GetPoints() []string { + if m != nil { + return m.Points + } + return nil +} + +func (m *GeoDistanceSort) GetOrder() SortOrder { + if m != nil && m.Order != nil { + return *m.Order + } + return SortOrder_SORT_ORDER_ASC +} + +func (m *GeoDistanceSort) GetMode() SortMode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return SortMode_SORT_MODE_MIN +} + +func (m *GeoDistanceSort) GetDistanceType() GeoDistanceType { + if m != nil && m.DistanceType != nil { + return *m.DistanceType + } + return GeoDistanceType_GEO_DISTANCE_ARC +} + +func (m *GeoDistanceSort) GetNestedFilter() *NestedFilter { + if m != nil { + return m.NestedFilter + } + return nil +} + +type PrimaryKeySort struct { + Order *SortOrder `protobuf:"varint,1,opt,name=order,enum=otsprotocol.SortOrder" json:"order,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PrimaryKeySort) Reset() { *m = PrimaryKeySort{} } +func (m *PrimaryKeySort) String() string { return proto.CompactTextString(m) } +func (*PrimaryKeySort) ProtoMessage() {} +func (*PrimaryKeySort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *PrimaryKeySort) GetOrder() SortOrder { + if m != nil && m.Order != nil { + return *m.Order + } + return SortOrder_SORT_ORDER_ASC +} + +type Sorter struct { + FieldSort *FieldSort `protobuf:"bytes,1,opt,name=field_sort" json:"field_sort,omitempty"` + GeoDistanceSort *GeoDistanceSort `protobuf:"bytes,2,opt,name=geo_distance_sort" json:"geo_distance_sort,omitempty"` + ScoreSort *ScoreSort `protobuf:"bytes,3,opt,name=score_sort" json:"score_sort,omitempty"` + PkSort *PrimaryKeySort `protobuf:"bytes,4,opt,name=pk_sort" json:"pk_sort,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Sorter) Reset() { *m = Sorter{} } +func (m *Sorter) String() string { return proto.CompactTextString(m) } +func (*Sorter) ProtoMessage() {} +func (*Sorter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *Sorter) GetFieldSort() *FieldSort { + if m != nil { + return m.FieldSort + } + return nil +} + +func (m *Sorter) GetGeoDistanceSort() *GeoDistanceSort { + if m != nil { + return m.GeoDistanceSort + } + return nil +} + +func (m *Sorter) GetScoreSort() *ScoreSort { + if m != nil { + return m.ScoreSort + } + return nil +} + +func (m *Sorter) GetPkSort() *PrimaryKeySort { + if m != nil { + return m.PkSort + } + return nil +} + +type Sort struct { + Sorter []*Sorter `protobuf:"bytes,1,rep,name=sorter" json:"sorter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Sort) Reset() { *m = Sort{} } +func (m *Sort) String() string { return proto.CompactTextString(m) } +func (*Sort) ProtoMessage() {} +func (*Sort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *Sort) GetSorter() []*Sorter { + if m != nil { + return m.Sorter + } + return nil +} + +type SearchQuery struct { + Offset *int32 `protobuf:"varint,1,opt,name=offset" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` + Query *Query `protobuf:"bytes,4,opt,name=query" json:"query,omitempty"` + Collapse *Collapse `protobuf:"bytes,5,opt,name=collapse" json:"collapse,omitempty"` + Sort *Sort `protobuf:"bytes,6,opt,name=sort" json:"sort,omitempty"` + GetTotalCount *bool `protobuf:"varint,8,opt,name=getTotalCount" json:"getTotalCount,omitempty"` + Token []byte `protobuf:"bytes,9,opt,name=token" json:"token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchQuery) Reset() { *m = SearchQuery{} } +func (m *SearchQuery) String() string { return proto.CompactTextString(m) } +func (*SearchQuery) ProtoMessage() {} +func (*SearchQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *SearchQuery) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *SearchQuery) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *SearchQuery) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +func (m *SearchQuery) GetCollapse() *Collapse { + if m != nil { + return m.Collapse + } + return nil +} + +func (m *SearchQuery) GetSort() *Sort { + if m != nil { + return m.Sort + } + return nil +} + +func (m *SearchQuery) GetGetTotalCount() bool { + if m != nil && m.GetTotalCount != nil { + return *m.GetTotalCount + } + return false +} + +func (m *SearchQuery) GetToken() []byte { + if m != nil { + return m.Token + } + return nil +} + +type ColumnsToGet struct { + ReturnType *ColumnReturnType `protobuf:"varint,1,opt,name=return_type,enum=otsprotocol.ColumnReturnType" json:"return_type,omitempty"` + ColumnNames []string `protobuf:"bytes,2,rep,name=column_names" json:"column_names,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ColumnsToGet) Reset() { *m = ColumnsToGet{} } +func (m *ColumnsToGet) String() string { return proto.CompactTextString(m) } +func (*ColumnsToGet) ProtoMessage() {} +func (*ColumnsToGet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *ColumnsToGet) GetReturnType() ColumnReturnType { + if m != nil && m.ReturnType != nil { + return *m.ReturnType + } + return ColumnReturnType_RETURN_ALL +} + +func (m *ColumnsToGet) GetColumnNames() []string { + if m != nil { + return m.ColumnNames + } + return nil +} + +type SearchRequest struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + ColumnsToGet *ColumnsToGet `protobuf:"bytes,3,opt,name=columns_to_get" json:"columns_to_get,omitempty"` + SearchQuery []byte `protobuf:"bytes,4,opt,name=search_query" json:"search_query,omitempty"` + RoutingValues [][]byte `protobuf:"bytes,5,rep,name=routing_values" json:"routing_values,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} +func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *SearchRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *SearchRequest) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *SearchRequest) GetColumnsToGet() *ColumnsToGet { + if m != nil { + return m.ColumnsToGet + } + return nil +} + +func (m *SearchRequest) GetSearchQuery() []byte { + if m != nil { + return m.SearchQuery + } + return nil +} + +func (m *SearchRequest) GetRoutingValues() [][]byte { + if m != nil { + return m.RoutingValues + } + return nil +} + +type SearchResponse struct { + TotalHits *int64 `protobuf:"varint,1,opt,name=total_hits" json:"total_hits,omitempty"` + Rows [][]byte `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` + IsAllSucceeded *bool `protobuf:"varint,3,opt,name=is_all_succeeded" json:"is_all_succeeded,omitempty"` + NextToken []byte `protobuf:"bytes,6,opt,name=next_token" json:"next_token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} +func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *SearchResponse) GetTotalHits() int64 { + if m != nil && m.TotalHits != nil { + return *m.TotalHits + } + return 0 +} + +func (m *SearchResponse) GetRows() [][]byte { + if m != nil { + return m.Rows + } + return nil +} + +func (m *SearchResponse) GetIsAllSucceeded() bool { + if m != nil && m.IsAllSucceeded != nil { + return *m.IsAllSucceeded + } + return false +} + +func (m *SearchResponse) GetNextToken() []byte { + if m != nil { + return m.NextToken + } + return nil +} + +type FieldSchema struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + FieldType *FieldType `protobuf:"varint,2,opt,name=field_type,enum=otsprotocol.FieldType" json:"field_type,omitempty"` + IndexOptions *IndexOptions `protobuf:"varint,3,opt,name=index_options,enum=otsprotocol.IndexOptions" json:"index_options,omitempty"` + Analyzer *string `protobuf:"bytes,4,opt,name=analyzer" json:"analyzer,omitempty"` + Index *bool `protobuf:"varint,5,opt,name=index" json:"index,omitempty"` + DocValues *bool `protobuf:"varint,6,opt,name=doc_values" json:"doc_values,omitempty"` + Store *bool `protobuf:"varint,7,opt,name=store" json:"store,omitempty"` + FieldSchemas []*FieldSchema `protobuf:"bytes,8,rep,name=field_schemas" json:"field_schemas,omitempty"` + IsArray *bool `protobuf:"varint,9,opt,name=is_array" json:"is_array,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldSchema) Reset() { *m = FieldSchema{} } +func (m *FieldSchema) String() string { return proto.CompactTextString(m) } +func (*FieldSchema) ProtoMessage() {} +func (*FieldSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *FieldSchema) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *FieldSchema) GetFieldType() FieldType { + if m != nil && m.FieldType != nil { + return *m.FieldType + } + return FieldType_LONG +} + +func (m *FieldSchema) GetIndexOptions() IndexOptions { + if m != nil && m.IndexOptions != nil { + return *m.IndexOptions + } + return IndexOptions_DOCS +} + +func (m *FieldSchema) GetAnalyzer() string { + if m != nil && m.Analyzer != nil { + return *m.Analyzer + } + return "" +} + +func (m *FieldSchema) GetIndex() bool { + if m != nil && m.Index != nil { + return *m.Index + } + return false +} + +func (m *FieldSchema) GetDocValues() bool { + if m != nil && m.DocValues != nil { + return *m.DocValues + } + return false +} + +func (m *FieldSchema) GetStore() bool { + if m != nil && m.Store != nil { + return *m.Store + } + return false +} + +func (m *FieldSchema) GetFieldSchemas() []*FieldSchema { + if m != nil { + return m.FieldSchemas + } + return nil +} + +func (m *FieldSchema) GetIsArray() bool { + if m != nil && m.IsArray != nil { + return *m.IsArray + } + return false +} + +type IndexSchema struct { + FieldSchemas []*FieldSchema `protobuf:"bytes,1,rep,name=field_schemas" json:"field_schemas,omitempty"` + IndexSetting *IndexSetting `protobuf:"bytes,2,opt,name=index_setting" json:"index_setting,omitempty"` + IndexSort *Sort `protobuf:"bytes,3,opt,name=index_sort" json:"index_sort,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexSchema) Reset() { *m = IndexSchema{} } +func (m *IndexSchema) String() string { return proto.CompactTextString(m) } +func (*IndexSchema) ProtoMessage() {} +func (*IndexSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +func (m *IndexSchema) GetFieldSchemas() []*FieldSchema { + if m != nil { + return m.FieldSchemas + } + return nil +} + +func (m *IndexSchema) GetIndexSetting() *IndexSetting { + if m != nil { + return m.IndexSetting + } + return nil +} + +func (m *IndexSchema) GetIndexSort() *Sort { + if m != nil { + return m.IndexSort + } + return nil +} + +type IndexSetting struct { + NumberOfShards *int32 `protobuf:"varint,1,opt,name=number_of_shards" json:"number_of_shards,omitempty"` + RoutingFields []string `protobuf:"bytes,2,rep,name=routing_fields" json:"routing_fields,omitempty"` + RoutingPartitionSize *int32 `protobuf:"varint,3,opt,name=routing_partition_size" json:"routing_partition_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexSetting) Reset() { *m = IndexSetting{} } +func (m *IndexSetting) String() string { return proto.CompactTextString(m) } +func (*IndexSetting) ProtoMessage() {} +func (*IndexSetting) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *IndexSetting) GetNumberOfShards() int32 { + if m != nil && m.NumberOfShards != nil { + return *m.NumberOfShards + } + return 0 +} + +func (m *IndexSetting) GetRoutingFields() []string { + if m != nil { + return m.RoutingFields + } + return nil +} + +func (m *IndexSetting) GetRoutingPartitionSize() int32 { + if m != nil && m.RoutingPartitionSize != nil { + return *m.RoutingPartitionSize + } + return 0 +} + +type CreateSearchIndexRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,req,name=index_name" json:"index_name,omitempty"` + Schema *IndexSchema `protobuf:"bytes,3,opt,name=schema" json:"schema,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateSearchIndexRequest) Reset() { *m = CreateSearchIndexRequest{} } +func (m *CreateSearchIndexRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSearchIndexRequest) ProtoMessage() {} +func (*CreateSearchIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *CreateSearchIndexRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *CreateSearchIndexRequest) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CreateSearchIndexRequest) GetSchema() *IndexSchema { + if m != nil { + return m.Schema + } + return nil +} + +type CreateSearchIndexResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateSearchIndexResponse) Reset() { *m = CreateSearchIndexResponse{} } +func (m *CreateSearchIndexResponse) String() string { return proto.CompactTextString(m) } +func (*CreateSearchIndexResponse) ProtoMessage() {} +func (*CreateSearchIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +type IndexInfo struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexInfo) Reset() { *m = IndexInfo{} } +func (m *IndexInfo) String() string { return proto.CompactTextString(m) } +func (*IndexInfo) ProtoMessage() {} +func (*IndexInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *IndexInfo) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *IndexInfo) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +type ListSearchIndexRequest struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListSearchIndexRequest) Reset() { *m = ListSearchIndexRequest{} } +func (m *ListSearchIndexRequest) String() string { return proto.CompactTextString(m) } +func (*ListSearchIndexRequest) ProtoMessage() {} +func (*ListSearchIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *ListSearchIndexRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type ListSearchIndexResponse struct { + Indices []*IndexInfo `protobuf:"bytes,1,rep,name=indices" json:"indices,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListSearchIndexResponse) Reset() { *m = ListSearchIndexResponse{} } +func (m *ListSearchIndexResponse) String() string { return proto.CompactTextString(m) } +func (*ListSearchIndexResponse) ProtoMessage() {} +func (*ListSearchIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *ListSearchIndexResponse) GetIndices() []*IndexInfo { + if m != nil { + return m.Indices + } + return nil +} + +type DeleteSearchIndexRequest struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSearchIndexRequest) Reset() { *m = DeleteSearchIndexRequest{} } +func (m *DeleteSearchIndexRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSearchIndexRequest) ProtoMessage() {} +func (*DeleteSearchIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +func (m *DeleteSearchIndexRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *DeleteSearchIndexRequest) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +type DeleteSearchIndexResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSearchIndexResponse) Reset() { *m = DeleteSearchIndexResponse{} } +func (m *DeleteSearchIndexResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSearchIndexResponse) ProtoMessage() {} +func (*DeleteSearchIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +type SyncStat struct { + SyncPhase *SyncPhase `protobuf:"varint,1,opt,name=sync_phase,enum=otsprotocol.SyncPhase" json:"sync_phase,omitempty"` + CurrentSyncTimestamp *int64 `protobuf:"varint,2,opt,name=current_sync_timestamp" json:"current_sync_timestamp,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SyncStat) Reset() { *m = SyncStat{} } +func (m *SyncStat) String() string { return proto.CompactTextString(m) } +func (*SyncStat) ProtoMessage() {} +func (*SyncStat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +func (m *SyncStat) GetSyncPhase() SyncPhase { + if m != nil && m.SyncPhase != nil { + return *m.SyncPhase + } + return SyncPhase_FULL +} + +func (m *SyncStat) GetCurrentSyncTimestamp() int64 { + if m != nil && m.CurrentSyncTimestamp != nil { + return *m.CurrentSyncTimestamp + } + return 0 +} + +type DescribeSearchIndexRequest struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeSearchIndexRequest) Reset() { *m = DescribeSearchIndexRequest{} } +func (m *DescribeSearchIndexRequest) String() string { return proto.CompactTextString(m) } +func (*DescribeSearchIndexRequest) ProtoMessage() {} +func (*DescribeSearchIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *DescribeSearchIndexRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *DescribeSearchIndexRequest) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +type DescribeSearchIndexResponse struct { + Schema *IndexSchema `protobuf:"bytes,1,opt,name=schema" json:"schema,omitempty"` + SyncStat *SyncStat `protobuf:"bytes,2,opt,name=sync_stat" json:"sync_stat,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeSearchIndexResponse) Reset() { *m = DescribeSearchIndexResponse{} } +func (m *DescribeSearchIndexResponse) String() string { return proto.CompactTextString(m) } +func (*DescribeSearchIndexResponse) ProtoMessage() {} +func (*DescribeSearchIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *DescribeSearchIndexResponse) GetSchema() *IndexSchema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *DescribeSearchIndexResponse) GetSyncStat() *SyncStat { + if m != nil { + return m.SyncStat + } + return nil +} + +func init() { + proto.RegisterType((*MatchQuery)(nil), "otsprotocol.MatchQuery") + proto.RegisterType((*MatchPhraseQuery)(nil), "otsprotocol.MatchPhraseQuery") + proto.RegisterType((*MatchAllQuery)(nil), "otsprotocol.MatchAllQuery") + proto.RegisterType((*TermQuery)(nil), "otsprotocol.TermQuery") + proto.RegisterType((*TermsQuery)(nil), "otsprotocol.TermsQuery") + proto.RegisterType((*RangeQuery)(nil), "otsprotocol.RangeQuery") + proto.RegisterType((*PrefixQuery)(nil), "otsprotocol.PrefixQuery") + proto.RegisterType((*WildcardQuery)(nil), "otsprotocol.WildcardQuery") + proto.RegisterType((*BoolQuery)(nil), "otsprotocol.BoolQuery") + proto.RegisterType((*ConstScoreQuery)(nil), "otsprotocol.ConstScoreQuery") + proto.RegisterType((*FieldValueFactor)(nil), "otsprotocol.FieldValueFactor") + proto.RegisterType((*FunctionScoreQuery)(nil), "otsprotocol.FunctionScoreQuery") + proto.RegisterType((*NestedQuery)(nil), "otsprotocol.NestedQuery") + proto.RegisterType((*GeoBoundingBoxQuery)(nil), "otsprotocol.GeoBoundingBoxQuery") + proto.RegisterType((*GeoDistanceQuery)(nil), "otsprotocol.GeoDistanceQuery") + proto.RegisterType((*GeoPolygonQuery)(nil), "otsprotocol.GeoPolygonQuery") + proto.RegisterType((*Query)(nil), "otsprotocol.Query") + proto.RegisterType((*Collapse)(nil), "otsprotocol.Collapse") + proto.RegisterType((*NestedFilter)(nil), "otsprotocol.NestedFilter") + proto.RegisterType((*ScoreSort)(nil), "otsprotocol.ScoreSort") + proto.RegisterType((*FieldSort)(nil), "otsprotocol.FieldSort") + proto.RegisterType((*GeoDistanceSort)(nil), "otsprotocol.GeoDistanceSort") + proto.RegisterType((*PrimaryKeySort)(nil), "otsprotocol.PrimaryKeySort") + proto.RegisterType((*Sorter)(nil), "otsprotocol.Sorter") + proto.RegisterType((*Sort)(nil), "otsprotocol.Sort") + proto.RegisterType((*SearchQuery)(nil), "otsprotocol.SearchQuery") + proto.RegisterType((*ColumnsToGet)(nil), "otsprotocol.ColumnsToGet") + proto.RegisterType((*SearchRequest)(nil), "otsprotocol.SearchRequest") + proto.RegisterType((*SearchResponse)(nil), "otsprotocol.SearchResponse") + proto.RegisterType((*FieldSchema)(nil), "otsprotocol.FieldSchema") + proto.RegisterType((*IndexSchema)(nil), "otsprotocol.IndexSchema") + proto.RegisterType((*IndexSetting)(nil), "otsprotocol.IndexSetting") + proto.RegisterType((*CreateSearchIndexRequest)(nil), "otsprotocol.CreateSearchIndexRequest") + proto.RegisterType((*CreateSearchIndexResponse)(nil), "otsprotocol.CreateSearchIndexResponse") + proto.RegisterType((*IndexInfo)(nil), "otsprotocol.IndexInfo") + proto.RegisterType((*ListSearchIndexRequest)(nil), "otsprotocol.ListSearchIndexRequest") + proto.RegisterType((*ListSearchIndexResponse)(nil), "otsprotocol.ListSearchIndexResponse") + proto.RegisterType((*DeleteSearchIndexRequest)(nil), "otsprotocol.DeleteSearchIndexRequest") + proto.RegisterType((*DeleteSearchIndexResponse)(nil), "otsprotocol.DeleteSearchIndexResponse") + proto.RegisterType((*SyncStat)(nil), "otsprotocol.SyncStat") + proto.RegisterType((*DescribeSearchIndexRequest)(nil), "otsprotocol.DescribeSearchIndexRequest") + proto.RegisterType((*DescribeSearchIndexResponse)(nil), "otsprotocol.DescribeSearchIndexResponse") + proto.RegisterEnum("otsprotocol.QueryType", QueryType_name, QueryType_value) + proto.RegisterEnum("otsprotocol.QueryOperator", QueryOperator_name, QueryOperator_value) + proto.RegisterEnum("otsprotocol.ScoreMode", ScoreMode_name, ScoreMode_value) + proto.RegisterEnum("otsprotocol.SortOrder", SortOrder_name, SortOrder_value) + proto.RegisterEnum("otsprotocol.SortMode", SortMode_name, SortMode_value) + proto.RegisterEnum("otsprotocol.GeoDistanceType", GeoDistanceType_name, GeoDistanceType_value) + proto.RegisterEnum("otsprotocol.ColumnReturnType", ColumnReturnType_name, ColumnReturnType_value) + proto.RegisterEnum("otsprotocol.IndexOptions", IndexOptions_name, IndexOptions_value) + proto.RegisterEnum("otsprotocol.FieldType", FieldType_name, FieldType_value) + proto.RegisterEnum("otsprotocol.SyncPhase", SyncPhase_name, SyncPhase_value) +} + +func init() { proto.RegisterFile("search.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1930 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x58, 0xe9, 0x72, 0xdb, 0xd6, + 0x15, 0x0e, 0xb8, 0x89, 0x3c, 0x5c, 0x04, 0x5f, 0xcb, 0x2a, 0x6d, 0x67, 0x51, 0x91, 0xa6, 0xd1, + 0xb0, 0xae, 0xd3, 0x28, 0xe9, 0x64, 0x3a, 0xd3, 0x99, 0x94, 0x22, 0x41, 0x86, 0x13, 0x09, 0xa0, + 0x01, 0xca, 0x96, 0x7f, 0xc1, 0x30, 0x78, 0x29, 0xa2, 0x01, 0x71, 0xe9, 0x8b, 0xcb, 0xc6, 0xf4, + 0x03, 0xf4, 0x15, 0xda, 0x3f, 0x7d, 0x16, 0xff, 0xec, 0x33, 0xf4, 0x0d, 0xfa, 0x18, 0x9d, 0xbb, + 0x80, 0x3b, 0x65, 0x75, 0xf2, 0xcf, 0x38, 0xfc, 0xce, 0xf6, 0x9d, 0xe5, 0x1e, 0x0b, 0x2a, 0x09, + 0xf6, 0x69, 0x30, 0x7e, 0x3a, 0xa5, 0x84, 0x11, 0x54, 0x26, 0x2c, 0x11, 0xff, 0x0a, 0x48, 0x64, + 0xbc, 0x03, 0xb8, 0xf4, 0x59, 0x30, 0x7e, 0x36, 0xc3, 0x74, 0x8e, 0x10, 0xc0, 0x28, 0xc4, 0xd1, + 0xd0, 0x8b, 0xfd, 0x09, 0xae, 0x6b, 0x27, 0xda, 0x69, 0x09, 0x55, 0x20, 0xc7, 0xf0, 0x5b, 0x56, + 0xcf, 0x88, 0xaf, 0x8f, 0xe1, 0x68, 0x12, 0xc6, 0xe1, 0x64, 0x36, 0xf1, 0x92, 0x31, 0x99, 0x45, + 0x43, 0x6f, 0xc2, 0xd5, 0xeb, 0xd9, 0x13, 0xed, 0x34, 0x8f, 0x9e, 0x40, 0x91, 0x4c, 0x31, 0xf5, + 0x19, 0xa1, 0xf5, 0xdc, 0x89, 0x76, 0x5a, 0x3b, 0x7b, 0xf4, 0x74, 0xc5, 0xdb, 0x53, 0xe1, 0xc5, + 0x56, 0x08, 0xe3, 0x5b, 0xd0, 0x85, 0xef, 0xfe, 0x98, 0xfa, 0x09, 0xbe, 0x63, 0x04, 0xc6, 0x21, + 0x54, 0x85, 0x56, 0x33, 0x8a, 0x84, 0x8a, 0xf1, 0x7b, 0x28, 0x0d, 0x30, 0x9d, 0x7c, 0x40, 0x9f, + 0x4e, 0x84, 0x7e, 0xc5, 0xf8, 0x0a, 0x80, 0xc3, 0x93, 0xfd, 0xf8, 0x2a, 0xe4, 0x39, 0x3e, 0xa9, + 0x67, 0x4e, 0xb2, 0xa7, 0x15, 0x83, 0x01, 0x38, 0x7e, 0x7c, 0x73, 0x4b, 0x80, 0x08, 0x80, 0x72, + 0x84, 0x37, 0xa2, 0x44, 0xb9, 0x41, 0x3a, 0x14, 0xa5, 0x8c, 0x11, 0x41, 0x4e, 0x05, 0x3d, 0x80, + 0x6a, 0x18, 0x07, 0xd1, 0x6c, 0x88, 0xbd, 0x88, 0xfc, 0x8c, 0x25, 0x43, 0xc5, 0x55, 0xf1, 0x6c, + 0x3a, 0xc5, 0xb4, 0x9e, 0xe7, 0x62, 0xe3, 0x6b, 0x28, 0xf7, 0x29, 0x1e, 0x85, 0x6f, 0xf7, 0xbb, + 0xad, 0x41, 0x61, 0x2a, 0x20, 0x8a, 0x99, 0x33, 0xa8, 0xbe, 0x08, 0xa3, 0x61, 0xe0, 0xd3, 0xe1, + 0xad, 0xc9, 0xfd, 0xcd, 0x8f, 0x66, 0x58, 0xe9, 0xfc, 0x47, 0x83, 0xd2, 0x39, 0x21, 0x92, 0x4a, + 0x74, 0x0a, 0x95, 0xc9, 0x2c, 0x61, 0xde, 0x9b, 0x19, 0xa6, 0x21, 0x4e, 0xea, 0xda, 0x49, 0xf6, + 0xb4, 0x7c, 0x86, 0xb6, 0x6b, 0x88, 0x9e, 0x80, 0x2e, 0x90, 0x31, 0x59, 0xa2, 0x33, 0x7b, 0xd1, + 0x0d, 0xa8, 0x8d, 0xc2, 0x88, 0x61, 0xba, 0xc0, 0x66, 0x6f, 0xc3, 0xaa, 0xce, 0x4a, 0xb1, 0xb9, + 0xbd, 0xd8, 0x7d, 0xdd, 0xc8, 0x29, 0xcc, 0x1b, 0x7f, 0x84, 0xc3, 0x16, 0x89, 0x13, 0xe6, 0x06, + 0x84, 0xaa, 0xea, 0x19, 0x50, 0x90, 0x81, 0x08, 0x36, 0x76, 0x1a, 0x35, 0x7e, 0x0b, 0x7a, 0x87, + 0xb3, 0xf6, 0x9c, 0xd3, 0xd4, 0xf1, 0x03, 0x46, 0xe8, 0x2e, 0x26, 0x0d, 0x0a, 0xa8, 0x33, 0x8b, + 0x03, 0x16, 0x92, 0x78, 0xc5, 0xc3, 0xaf, 0x21, 0xcf, 0xe3, 0x9e, 0xef, 0x77, 0x80, 0xfe, 0x04, + 0x48, 0x1a, 0x13, 0x85, 0xf0, 0x46, 0xc2, 0x85, 0xa8, 0x47, 0xf9, 0xec, 0x93, 0x35, 0xfc, 0x66, + 0x1c, 0xc6, 0x5f, 0xa1, 0x6c, 0xe1, 0x84, 0x61, 0x55, 0xe0, 0x0a, 0xe4, 0xa6, 0x3e, 0x1b, 0xab, + 0xd2, 0x2e, 0x5c, 0x67, 0xf6, 0xba, 0x6e, 0x00, 0x24, 0x3c, 0x56, 0x6f, 0x42, 0x86, 0x58, 0xf4, + 0x65, 0xed, 0xec, 0x78, 0x0d, 0x27, 0x52, 0xb9, 0x24, 0x43, 0x6c, 0x3c, 0x83, 0xfb, 0x5d, 0x4c, + 0xce, 0xc9, 0x2c, 0x1e, 0x86, 0xf1, 0xcd, 0x39, 0xb9, 0xa5, 0x13, 0x75, 0x28, 0x32, 0x32, 0xf5, + 0x22, 0x3c, 0x4a, 0xf7, 0xc4, 0x11, 0x54, 0x5e, 0x13, 0xc6, 0xc8, 0xc4, 0xa3, 0xe1, 0xcd, 0x98, + 0x09, 0x57, 0x25, 0xc3, 0x02, 0xbd, 0x8b, 0x49, 0x3b, 0x4c, 0x98, 0x1f, 0x07, 0xb7, 0x0c, 0xd4, + 0x11, 0x54, 0x02, 0x1c, 0xf3, 0x7e, 0x99, 0x92, 0x30, 0x4e, 0x6d, 0xea, 0x50, 0x1c, 0x2a, 0x55, + 0x61, 0x4f, 0xe3, 0x15, 0xee, 0x62, 0xd2, 0x27, 0xd1, 0xfc, 0x86, 0xc4, 0xb7, 0x0f, 0x0a, 0xb7, + 0x23, 0x5b, 0xb4, 0x64, 0xfc, 0x19, 0xf2, 0x12, 0xfc, 0x1b, 0xc8, 0xb1, 0xf9, 0x54, 0xc2, 0x36, + 0x89, 0x10, 0x88, 0xc1, 0x7c, 0x8a, 0xf9, 0xc8, 0x2c, 0x79, 0xad, 0x18, 0x9f, 0x42, 0xb1, 0x45, + 0xa2, 0xc8, 0x9f, 0x26, 0x78, 0x67, 0x5f, 0xfc, 0x05, 0x2a, 0xb2, 0x46, 0x1d, 0xd1, 0x69, 0x1b, + 0x45, 0x5a, 0x76, 0xe0, 0xde, 0x2a, 0x19, 0x67, 0x50, 0x12, 0x65, 0x70, 0x09, 0x65, 0xe8, 0x0b, + 0xc8, 0x13, 0x3a, 0x54, 0x1d, 0xbb, 0x55, 0x2d, 0x42, 0x99, 0xcd, 0x7f, 0x35, 0xfe, 0xa5, 0x41, + 0x49, 0xb4, 0x8b, 0x50, 0xda, 0xc5, 0xc2, 0xc2, 0x50, 0xe6, 0x36, 0x43, 0xe8, 0x73, 0xc8, 0xad, + 0x34, 0xc7, 0x83, 0x2d, 0x14, 0xef, 0x0d, 0xf4, 0x07, 0xa8, 0xc6, 0x22, 0x47, 0x4f, 0x25, 0x93, + 0x13, 0xc9, 0x3c, 0x5c, 0x43, 0xaf, 0xb2, 0x60, 0xfc, 0x57, 0x13, 0xb5, 0x4a, 0x6b, 0xbf, 0x37, + 0xca, 0x8d, 0x5a, 0x2d, 0xa3, 0xce, 0xde, 0x29, 0xea, 0xdc, 0x6d, 0x51, 0x7f, 0x03, 0xd5, 0xb4, + 0x81, 0x3c, 0x51, 0xf7, 0xbc, 0x40, 0x7f, 0xbc, 0x86, 0x5e, 0x09, 0x52, 0x54, 0x7f, 0x2b, 0xd5, + 0xc2, 0x87, 0x52, 0xfd, 0x0e, 0x6a, 0x7d, 0x1a, 0x4e, 0x7c, 0x3a, 0xff, 0x11, 0xcf, 0xff, 0x9f, + 0x1a, 0xfe, 0x5b, 0x83, 0x02, 0xff, 0xc2, 0x94, 0x0f, 0xaa, 0xa4, 0x26, 0x21, 0x94, 0xa9, 0x5d, + 0x72, 0xbc, 0xbd, 0x1b, 0x84, 0xf5, 0xef, 0xe0, 0xde, 0x0d, 0x26, 0xde, 0x22, 0x35, 0xa1, 0x22, + 0xbb, 0x6b, 0x6f, 0x6a, 0x42, 0x71, 0xb1, 0x0d, 0x84, 0x46, 0x76, 0x87, 0x93, 0x65, 0x1b, 0x3e, + 0x81, 0x83, 0xe9, 0x4f, 0x12, 0x28, 0x6b, 0xfd, 0x78, 0x0d, 0xb8, 0x9e, 0xb0, 0xf1, 0x3b, 0xc8, + 0x09, 0xad, 0xcf, 0xa1, 0x90, 0x88, 0x84, 0xd4, 0x53, 0x72, 0x7f, 0x2b, 0x73, 0x4c, 0x8d, 0xf7, + 0x1a, 0x94, 0x5d, 0x71, 0xa1, 0xc8, 0xa9, 0xac, 0x41, 0x81, 0x8c, 0x46, 0x09, 0x96, 0x79, 0xe7, + 0xf9, 0xfc, 0x45, 0xe1, 0x24, 0x94, 0x39, 0xe5, 0x97, 0x6b, 0x2e, 0xb7, 0x77, 0xcd, 0x7d, 0x09, + 0xc5, 0x40, 0x8d, 0xa8, 0xa8, 0x71, 0x79, 0xa3, 0x23, 0x16, 0xf3, 0xfb, 0x19, 0xe4, 0x44, 0x4a, + 0xb2, 0xa6, 0xf7, 0xb6, 0xa2, 0xe3, 0xaf, 0xf3, 0x0d, 0x66, 0x03, 0xc2, 0xfc, 0xa8, 0x45, 0x66, + 0x31, 0xab, 0x17, 0xc5, 0xa3, 0xcd, 0x4f, 0x04, 0xf2, 0x13, 0x8e, 0xeb, 0x25, 0xb1, 0x12, 0xae, + 0xa1, 0xd2, 0x22, 0xd1, 0x6c, 0x12, 0x27, 0x03, 0xd2, 0xc5, 0x0c, 0x9d, 0x41, 0x99, 0x62, 0x36, + 0xa3, 0xb1, 0xb7, 0xb2, 0x5e, 0x3e, 0xd9, 0x0c, 0x61, 0x36, 0x89, 0x1d, 0x81, 0x12, 0x7d, 0xc6, + 0x77, 0x9e, 0x90, 0x89, 0x69, 0x48, 0x57, 0xd5, 0x3f, 0x35, 0xa8, 0x4a, 0x6e, 0x1c, 0xfc, 0x66, + 0x86, 0x13, 0x31, 0x34, 0xcc, 0x7f, 0x1d, 0xe1, 0x8d, 0x03, 0x24, 0x8c, 0x87, 0xf8, 0xad, 0x94, + 0xc9, 0x6d, 0xf9, 0x35, 0xd4, 0xa4, 0xbd, 0xc4, 0x63, 0xc4, 0xbb, 0xc1, 0x69, 0x81, 0x1f, 0xee, + 0x08, 0x43, 0x85, 0x7d, 0x94, 0x5e, 0x8a, 0xde, 0x92, 0xe0, 0x0a, 0x3a, 0x86, 0x1a, 0x25, 0x33, + 0x16, 0xc6, 0x37, 0xf2, 0xc1, 0x4a, 0xea, 0x79, 0x71, 0x17, 0xbd, 0x82, 0x5a, 0x1a, 0x59, 0x32, + 0x25, 0xb1, 0xdc, 0x86, 0x8c, 0x33, 0xe5, 0x8d, 0x43, 0x96, 0x88, 0xd0, 0xb2, 0x7c, 0xfb, 0x51, + 0xf2, 0xb3, 0xba, 0xa5, 0x50, 0x1d, 0xf4, 0x30, 0xf1, 0xfc, 0x28, 0xf2, 0x92, 0x59, 0x10, 0x60, + 0x3c, 0xc4, 0x43, 0x11, 0x56, 0x91, 0xeb, 0xc6, 0xf8, 0x2d, 0xf3, 0x24, 0xad, 0x05, 0x41, 0xeb, + 0xdf, 0x33, 0x50, 0x96, 0x6d, 0x1e, 0x8c, 0xf1, 0xc4, 0xdf, 0xb9, 0x2f, 0x16, 0x83, 0x22, 0x98, + 0xde, 0xb5, 0xda, 0x84, 0x85, 0x74, 0x94, 0x25, 0x4d, 0x64, 0xca, 0x5f, 0xed, 0x44, 0xed, 0x94, + 0x75, 0x46, 0x7a, 0x1c, 0x61, 0x4b, 0x00, 0x7f, 0x72, 0xfc, 0xd8, 0x8f, 0xe6, 0xef, 0xd4, 0x8a, + 0x13, 0xf7, 0x93, 0xb0, 0x21, 0xcf, 0x34, 0x1e, 0xd2, 0x90, 0x04, 0x29, 0x31, 0x85, 0xb4, 0x39, + 0x12, 0x46, 0x28, 0xae, 0x1f, 0x88, 0xcf, 0xaf, 0xa0, 0xaa, 0x46, 0x59, 0x64, 0x91, 0xd4, 0x8b, + 0x62, 0x14, 0xea, 0x3b, 0xa6, 0x59, 0xa6, 0xa9, 0x43, 0x91, 0x93, 0x44, 0xa9, 0x3f, 0x17, 0xfd, + 0x55, 0x34, 0xfe, 0xa1, 0x41, 0x59, 0xc4, 0xa5, 0x10, 0x5b, 0x26, 0xb5, 0x0f, 0x98, 0x5c, 0x64, + 0x9e, 0x60, 0xc6, 0x2b, 0xa9, 0xd6, 0xc3, 0x8e, 0xcc, 0x5d, 0x09, 0x40, 0x5f, 0xa4, 0x2d, 0xb5, + 0xb2, 0x1b, 0xb6, 0xe7, 0xc3, 0x78, 0x05, 0x95, 0x35, 0xb5, 0x3a, 0xe8, 0xf1, 0x6c, 0xf2, 0x1a, + 0x53, 0x8f, 0x8c, 0xbc, 0x64, 0xec, 0xd3, 0x61, 0xa2, 0xa6, 0x78, 0xa5, 0x8d, 0x44, 0xec, 0xe9, + 0x82, 0xff, 0x14, 0x8e, 0x53, 0xf9, 0xd4, 0xa7, 0x2c, 0xe4, 0xc4, 0x7b, 0x49, 0xf8, 0x4e, 0xbe, + 0x40, 0x79, 0x23, 0x82, 0x7a, 0x8b, 0x62, 0x9f, 0x61, 0xd9, 0x6c, 0xc2, 0xdb, 0xbe, 0x59, 0xc8, + 0xec, 0x98, 0x05, 0x2e, 0x3b, 0x85, 0x82, 0x64, 0x4a, 0x25, 0x52, 0xdf, 0x91, 0xb7, 0xf8, 0xdd, + 0x78, 0x0c, 0x0f, 0x77, 0x78, 0x93, 0xfd, 0x6d, 0x7c, 0x03, 0x25, 0x21, 0xe8, 0xc5, 0x23, 0x72, + 0xd7, 0x39, 0x34, 0x9e, 0xc0, 0xf1, 0x45, 0x98, 0xb0, 0x3b, 0x44, 0xcf, 0xd1, 0xe7, 0xf0, 0xab, + 0x2d, 0xb4, 0x9a, 0xae, 0x2f, 0xe1, 0x20, 0x8c, 0x87, 0x61, 0xb0, 0xb8, 0xcb, 0x8f, 0xb7, 0xb3, + 0xe0, 0x91, 0x19, 0xe7, 0x50, 0x6f, 0xe3, 0x08, 0xdf, 0x89, 0xb1, 0x7d, 0x51, 0x3f, 0x86, 0x87, + 0x3b, 0x6c, 0x28, 0x1e, 0x9e, 0x43, 0xd1, 0x9d, 0xc7, 0x81, 0xcb, 0x7c, 0xf9, 0x86, 0xcc, 0xe3, + 0xc0, 0x9b, 0x8e, 0xfd, 0x64, 0xf7, 0x21, 0xc5, 0xa1, 0x7d, 0xfe, 0x2b, 0x2f, 0x75, 0x30, 0xa3, + 0x14, 0xc7, 0xcc, 0x13, 0x3a, 0x2c, 0x9c, 0xe0, 0x84, 0xf9, 0x93, 0xa9, 0x70, 0x9a, 0x35, 0xda, + 0xf0, 0xa8, 0x8d, 0x93, 0x80, 0x86, 0xaf, 0x7f, 0x49, 0xe8, 0x6f, 0xe0, 0xf1, 0x4e, 0x2b, 0x8a, + 0xc6, 0x65, 0x2f, 0x68, 0xb7, 0xf7, 0x02, 0x3a, 0x85, 0x92, 0x08, 0x33, 0x61, 0x7e, 0xfa, 0x9e, + 0x3e, 0xd8, 0xca, 0x8c, 0x93, 0xd0, 0x78, 0x9f, 0x81, 0xd2, 0xf2, 0x5e, 0x3c, 0x84, 0xf2, 0x65, + 0x73, 0xd0, 0xfa, 0xc1, 0x7b, 0x76, 0x65, 0x3a, 0x2f, 0x75, 0x0d, 0x1d, 0x03, 0x92, 0x82, 0xfe, + 0x0f, 0x4e, 0xd3, 0x35, 0x95, 0x3c, 0x83, 0x6a, 0x00, 0x03, 0xd3, 0xb9, 0x54, 0xdf, 0x59, 0xae, + 0xe8, 0x34, 0xad, 0x6e, 0x0a, 0xc8, 0x21, 0x1d, 0x2a, 0x7d, 0xc7, 0xec, 0xf4, 0xae, 0x95, 0x24, + 0xcf, 0x55, 0xce, 0x6d, 0xfb, 0x42, 0x7d, 0x17, 0xd0, 0x03, 0xb8, 0xd7, 0xb2, 0x2d, 0x77, 0xe0, + 0xb9, 0x2d, 0xdb, 0x49, 0x15, 0x0f, 0x50, 0x1d, 0x8e, 0x3a, 0x57, 0x56, 0x6b, 0xd0, 0xb3, 0xad, + 0xb5, 0x5f, 0x8a, 0xdc, 0xa4, 0x65, 0xba, 0x03, 0xb3, 0xad, 0x24, 0x9c, 0xc3, 0xda, 0x8b, 0xde, + 0x45, 0xbb, 0xd5, 0x74, 0x52, 0x19, 0xa0, 0xfb, 0x70, 0x28, 0x23, 0x6e, 0x5e, 0xa4, 0xbe, 0xca, + 0xe8, 0x11, 0x1c, 0x77, 0x4d, 0xdb, 0x3b, 0xb7, 0xaf, 0xac, 0x76, 0xcf, 0xea, 0x7a, 0xe7, 0x76, + 0x1a, 0x17, 0x7f, 0x24, 0x10, 0xff, 0xad, 0xdd, 0x73, 0x07, 0x4d, 0xab, 0x95, 0xba, 0xab, 0xf2, + 0xf8, 0xb8, 0xbc, 0x6f, 0x5f, 0xbc, 0xec, 0xda, 0x96, 0x12, 0xd7, 0x78, 0xa6, 0x3c, 0x73, 0x57, + 0x09, 0x0e, 0x1b, 0x27, 0x50, 0x5d, 0xfb, 0xe3, 0x00, 0x2a, 0x40, 0xc6, 0x76, 0x74, 0x0d, 0x1d, + 0x40, 0xb6, 0x69, 0xb5, 0xf5, 0x4c, 0x83, 0xaa, 0xa3, 0x58, 0x5c, 0x72, 0xf7, 0xe1, 0x50, 0xa6, + 0x75, 0x69, 0xb7, 0x4d, 0xcf, 0xb2, 0x2d, 0x53, 0xd7, 0x78, 0x22, 0x2b, 0xc2, 0xe6, 0xf3, 0xae, + 0x9e, 0xd9, 0x90, 0x5d, 0x36, 0xaf, 0xf5, 0x2c, 0x3a, 0x02, 0x7d, 0x45, 0x36, 0xb0, 0x07, 0xcd, + 0x0b, 0x3d, 0xb7, 0x89, 0xec, 0x59, 0x7a, 0xbe, 0xf1, 0x2d, 0x94, 0x96, 0x27, 0x26, 0x07, 0xd8, + 0xce, 0xc0, 0xb3, 0x9d, 0xb6, 0xe9, 0x78, 0x4d, 0xb7, 0xa5, 0x7f, 0x24, 0xe2, 0x58, 0xca, 0xda, + 0xa6, 0xdb, 0xd2, 0xb5, 0x46, 0x0b, 0x8a, 0x8b, 0x93, 0xf3, 0x1e, 0x54, 0x05, 0x60, 0x61, 0xf4, + 0xa3, 0x0d, 0x51, 0xf3, 0x5a, 0xd7, 0xd6, 0x45, 0x22, 0xf0, 0xc6, 0xf7, 0x6b, 0xe7, 0xb2, 0xba, + 0x10, 0xf4, 0x35, 0x8e, 0x9b, 0x0e, 0x0f, 0x61, 0x93, 0xf9, 0xfe, 0x45, 0x93, 0xb3, 0xd1, 0xe8, + 0x81, 0xbe, 0x75, 0x63, 0xd4, 0x00, 0x1c, 0x73, 0x70, 0xe5, 0x58, 0xbc, 0xae, 0xba, 0xc6, 0x2d, + 0xaa, 0x6f, 0xb7, 0x6f, 0xb6, 0x7a, 0x9d, 0x9e, 0xd9, 0xd6, 0x33, 0xa2, 0x0d, 0xa5, 0x54, 0x10, + 0x9b, 0x6d, 0x7c, 0xaf, 0x96, 0x7c, 0xfa, 0x2a, 0x16, 0x21, 0xd7, 0xb6, 0x5b, 0xae, 0xae, 0xa1, + 0x12, 0xe4, 0x3b, 0x8e, 0xf9, 0xcc, 0xd5, 0x33, 0xa8, 0x0a, 0xa5, 0xbe, 0xed, 0xf6, 0x78, 0xcb, + 0xb9, 0x7a, 0x16, 0x95, 0xe1, 0xc0, 0xee, 0x74, 0x5c, 0x73, 0xe0, 0xea, 0xb9, 0xc6, 0x2b, 0xf5, + 0x7f, 0x13, 0x11, 0x44, 0x11, 0x72, 0x17, 0xb6, 0xd5, 0xd5, 0x35, 0x04, 0x50, 0x68, 0xdb, 0x57, + 0xe7, 0x17, 0xa6, 0x9e, 0xe1, 0x78, 0xde, 0xd8, 0x66, 0xd3, 0x92, 0xca, 0x3f, 0x9a, 0x2f, 0x5f, + 0xd8, 0x4e, 0x5b, 0xcf, 0x71, 0xfc, 0xc0, 0xbc, 0x1e, 0xe8, 0x79, 0x8e, 0x97, 0xbd, 0xab, 0x17, + 0xb8, 0x3b, 0xd9, 0x58, 0x3d, 0x6b, 0xa0, 0x1f, 0x34, 0x3e, 0x83, 0xd2, 0x72, 0xcf, 0x14, 0x21, + 0xd7, 0xb9, 0x12, 0x09, 0x16, 0x21, 0xd7, 0xb3, 0x5a, 0x8e, 0x9e, 0xf9, 0x5f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xe6, 0x50, 0x1e, 0x93, 0x04, 0x13, 0x00, 0x00, +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.proto b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.proto new file mode 100644 index 000000000000..0a6613203885 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.proto @@ -0,0 +1,323 @@ +syntax = "proto2"; +package otsprotocol; + +enum QueryType { + MATCH_QUERY = 1; + MATCH_PHRASE_QUERY = 2; + TERM_QUERY = 3; + RANGE_QUERY = 4; + PREFIX_QUERY = 5; + BOOL_QUERY = 6; + CONST_SCORE_QUERY = 7; + FUNCTION_SCORE_QUERY = 8; + NESTED_QUERY = 9; + WILDCARD_QUERY = 10; + MATCH_ALL_QUERY = 11; + GEO_BOUNDING_BOX_QUERY = 12; + GEO_DISTANCE_QUERY = 13; + GEO_POLYGON_QUERY = 14; + TERMS_QUERY = 15; +} + +enum QueryOperator { + OR = 1; + AND = 2; +} + +message MatchQuery { + optional string field_name = 1; + optional string text = 2; + optional int32 minimum_should_match = 3; + optional QueryOperator operator = 4; +} + +message MatchPhraseQuery { + optional string field_name = 1; + optional string text = 2; +} + +message MatchAllQuery { +} + +message TermQuery { + optional string field_name = 1; + optional bytes term = 2; +} + +message TermsQuery { + optional string field_name = 1; + repeated bytes terms = 2; +} + +message RangeQuery { + optional string field_name = 1; + optional bytes range_from = 2; // variant value + optional bytes range_to = 3; // variant value + optional bool include_lower = 4; + optional bool include_upper = 5; +} + +message PrefixQuery { + optional string field_name = 1; + optional string prefix = 2; +} + +message WildcardQuery { + optional string field_name = 1; + optional string value = 2; +} + +message BoolQuery { + repeated Query must_queries = 1; + repeated Query must_not_queries = 2; + repeated Query filter_queries = 3; + repeated Query should_queries = 4; + optional int32 minimum_should_match = 5; +} + +message ConstScoreQuery { + optional Query filter = 1; +} + +message FieldValueFactor { + optional string field_name = 1; +} + +message FunctionScoreQuery { + optional Query query = 1; + optional FieldValueFactor field_value_factor = 2; +} + +enum ScoreMode { + SCORE_MODE_NONE = 1; + SCORE_MODE_AVG = 2; + SCORE_MODE_MAX = 3; + SCORE_MODE_TOTAL = 4; + SCORE_MODE_MIN = 5; +} + +message NestedQuery { + optional string path = 1; + optional Query query = 2; + optional ScoreMode score_mode = 3; +} + +message GeoBoundingBoxQuery { + optional string field_name = 1; + optional string top_left = 2; + optional string bottom_right = 3; +} + +message GeoDistanceQuery { + optional string field_name = 1; + optional string center_point = 2; + optional double distance = 3; +} + +message GeoPolygonQuery { + optional string field_name = 1; + repeated string points = 2; +} + +message Query { + optional QueryType type = 1; + optional bytes query = 2; +} + +message Collapse { + optional string field_name = 1; +} + +message NestedFilter { + optional string path = 1; + optional Query filter = 2; +} + +enum SortOrder { + SORT_ORDER_ASC = 0; + SORT_ORDER_DESC = 1; +} + +enum SortMode { + SORT_MODE_MIN = 0; + SORT_MODE_MAX = 1; + SORT_MODE_AVG = 2; +} + +message ScoreSort { + optional SortOrder order = 1; +} + +message FieldSort { + optional string field_name = 1; + optional SortOrder order = 2; + optional SortMode mode = 3; + optional NestedFilter nested_filter = 4; +} + +enum GeoDistanceType { + GEO_DISTANCE_ARC = 0; + GEO_DISTANCE_PLANE = 1; +} + +message GeoDistanceSort { + optional string field_name = 1; + repeated string points = 2; + optional SortOrder order = 3; + optional SortMode mode = 4; + optional GeoDistanceType distance_type = 5; + optional NestedFilter nested_filter = 6; +} + +message PrimaryKeySort { + optional SortOrder order = 1; +} + +message Sorter { + optional FieldSort field_sort = 1; + optional GeoDistanceSort geo_distance_sort = 2; + optional ScoreSort score_sort = 3; + optional PrimaryKeySort pk_sort = 4; +} + +message Sort { + repeated Sorter sorter = 1; +} + +message SearchQuery { + optional int32 offset = 1; + optional int32 limit = 2; + optional Query query = 4; + optional Collapse collapse = 5; + optional Sort sort = 6; + optional bool getTotalCount = 8; + optional bytes token = 9; +} + +enum ColumnReturnType { + RETURN_ALL = 1; + RETURN_SPECIFIED = 2; + RETURN_NONE = 3; +} + +message ColumnsToGet { + optional ColumnReturnType return_type = 1; + repeated string column_names = 2; +} + +message SearchRequest { + optional string table_name = 1; + optional string index_name = 2; + optional ColumnsToGet columns_to_get = 3; + optional bytes search_query = 4; + repeated bytes routing_values = 5; +} + +/** + * Response部分: + **/ + +message SearchResponse { + optional int64 total_hits = 1; + repeated bytes rows = 2; + optional bool is_all_succeeded = 3; + optional bytes next_token = 6; +} + +/* Create Search Index */ + +enum IndexOptions { + DOCS = 1; + FREQS = 2; + POSITIONS = 3; + OFFSETS = 4; +} + +enum FieldType { + LONG = 1; + DOUBLE = 2; + BOOLEAN = 3; + KEYWORD = 4; + TEXT = 5; + NESTED = 6; + GEO_POINT = 7; +} + +message FieldSchema { + optional string field_name = 1; + optional FieldType field_type = 2; + optional IndexOptions index_options = 3; + optional string analyzer = 4; + optional bool index = 5; + optional bool doc_values = 6; + optional bool store = 7; + repeated FieldSchema field_schemas = 8; // only for nested type + optional bool is_array = 9; +} + +message IndexSchema { + repeated FieldSchema field_schemas = 1; + optional IndexSetting index_setting = 2; + optional Sort index_sort = 3; +} + +message IndexSetting { + optional int32 number_of_shards = 1; + repeated string routing_fields = 2; + optional int32 routing_partition_size = 3; +} + +message CreateSearchIndexRequest { + required string table_name = 1; + required string index_name = 2; + optional IndexSchema schema = 3; +} + +message CreateSearchIndexResponse { +} + +/* List Search Index */ + +message IndexInfo { + optional string table_name = 1; + optional string index_name = 2; +} + +message ListSearchIndexRequest { + optional string table_name = 1; +} +message ListSearchIndexResponse { + repeated IndexInfo indices = 1; +} + +/* Delete Search Index */ + +message DeleteSearchIndexRequest { + optional string table_name = 1; + optional string index_name = 2; +} + +message DeleteSearchIndexResponse { +} + +/* Describe Search Index */ + +enum SyncPhase { + FULL = 1; + INCR = 2; +} + +message SyncStat { + optional SyncPhase sync_phase = 1; + optional int64 current_sync_timestamp = 2; // 同步进度,参考TunnelService。 +} + +message DescribeSearchIndexRequest { + optional string table_name = 1; + optional string index_name = 2; +} + +message DescribeSearchIndexResponse { + optional IndexSchema schema = 1; + optional SyncStat sync_stat = 2; +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.pb.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.pb.go new file mode 100644 index 000000000000..de5d507d13b6 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.pb.go @@ -0,0 +1,3141 @@ +// Code generated by protoc-gen-go. +// source: table_store.proto +// DO NOT EDIT! + +package otsprotocol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type PrimaryKeyType int32 + +const ( + PrimaryKeyType_INTEGER PrimaryKeyType = 1 + PrimaryKeyType_STRING PrimaryKeyType = 2 + PrimaryKeyType_BINARY PrimaryKeyType = 3 +) + +var PrimaryKeyType_name = map[int32]string{ + 1: "INTEGER", + 2: "STRING", + 3: "BINARY", +} +var PrimaryKeyType_value = map[string]int32{ + "INTEGER": 1, + "STRING": 2, + "BINARY": 3, +} + +func (x PrimaryKeyType) Enum() *PrimaryKeyType { + p := new(PrimaryKeyType) + *p = x + return p +} +func (x PrimaryKeyType) String() string { + return proto.EnumName(PrimaryKeyType_name, int32(x)) +} +func (x *PrimaryKeyType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PrimaryKeyType_value, data, "PrimaryKeyType") + if err != nil { + return err + } + *x = PrimaryKeyType(value) + return nil +} +func (PrimaryKeyType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +type PrimaryKeyOption int32 + +const ( + PrimaryKeyOption_AUTO_INCREMENT PrimaryKeyOption = 1 +) + +var PrimaryKeyOption_name = map[int32]string{ + 1: "AUTO_INCREMENT", +} +var PrimaryKeyOption_value = map[string]int32{ + "AUTO_INCREMENT": 1, +} + +func (x PrimaryKeyOption) Enum() *PrimaryKeyOption { + p := new(PrimaryKeyOption) + *p = x + return p +} +func (x PrimaryKeyOption) String() string { + return proto.EnumName(PrimaryKeyOption_name, int32(x)) +} +func (x *PrimaryKeyOption) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PrimaryKeyOption_value, data, "PrimaryKeyOption") + if err != nil { + return err + } + *x = PrimaryKeyOption(value) + return nil +} +func (PrimaryKeyOption) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +type BloomFilterType int32 + +const ( + BloomFilterType_NONE BloomFilterType = 1 + BloomFilterType_CELL BloomFilterType = 2 + BloomFilterType_ROW BloomFilterType = 3 +) + +var BloomFilterType_name = map[int32]string{ + 1: "NONE", + 2: "CELL", + 3: "ROW", +} +var BloomFilterType_value = map[string]int32{ + "NONE": 1, + "CELL": 2, + "ROW": 3, +} + +func (x BloomFilterType) Enum() *BloomFilterType { + p := new(BloomFilterType) + *p = x + return p +} +func (x BloomFilterType) String() string { + return proto.EnumName(BloomFilterType_name, int32(x)) +} +func (x *BloomFilterType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(BloomFilterType_value, data, "BloomFilterType") + if err != nil { + return err + } + *x = BloomFilterType(value) + return nil +} +func (BloomFilterType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +// * +// 表的状态变更只与用户的操作对应,内部的机器failover等状况不对应表的状态变更。 +// 有三个考虑: +// 一是一般场景下用户只会在做了对表的修改操作后才会去检查表的状态; +// 二是内部机器failover导致访问异常到用户能够查看到表的状态变更这两个时刻之间会有一段延迟,无法将表的不可服务状态与用户查看到的表的状态完全匹配上。 +// 三是内部机器failover后不能说是表的整个状态变更,而应该是partition的状态变更,对应表的状态就是PARTIAL_FAILOVER,这个partial的粒度无法体现,会让用户更加困惑。 +type TableStatus int32 + +const ( + TableStatus_ACTIVE TableStatus = 1 + TableStatus_INACTIVE TableStatus = 2 + TableStatus_LOADING TableStatus = 3 + TableStatus_UNLOADING TableStatus = 4 + TableStatus_UPDATING TableStatus = 5 +) + +var TableStatus_name = map[int32]string{ + 1: "ACTIVE", + 2: "INACTIVE", + 3: "LOADING", + 4: "UNLOADING", + 5: "UPDATING", +} +var TableStatus_value = map[string]int32{ + "ACTIVE": 1, + "INACTIVE": 2, + "LOADING": 3, + "UNLOADING": 4, + "UPDATING": 5, +} + +func (x TableStatus) Enum() *TableStatus { + p := new(TableStatus) + *p = x + return p +} +func (x TableStatus) String() string { + return proto.EnumName(TableStatus_name, int32(x)) +} +func (x *TableStatus) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TableStatus_value, data, "TableStatus") + if err != nil { + return err + } + *x = TableStatus(value) + return nil +} +func (TableStatus) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +type RowExistenceExpectation int32 + +const ( + RowExistenceExpectation_IGNORE RowExistenceExpectation = 0 + RowExistenceExpectation_EXPECT_EXIST RowExistenceExpectation = 1 + RowExistenceExpectation_EXPECT_NOT_EXIST RowExistenceExpectation = 2 +) + +var RowExistenceExpectation_name = map[int32]string{ + 0: "IGNORE", + 1: "EXPECT_EXIST", + 2: "EXPECT_NOT_EXIST", +} +var RowExistenceExpectation_value = map[string]int32{ + "IGNORE": 0, + "EXPECT_EXIST": 1, + "EXPECT_NOT_EXIST": 2, +} + +func (x RowExistenceExpectation) Enum() *RowExistenceExpectation { + p := new(RowExistenceExpectation) + *p = x + return p +} +func (x RowExistenceExpectation) String() string { + return proto.EnumName(RowExistenceExpectation_name, int32(x)) +} +func (x *RowExistenceExpectation) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RowExistenceExpectation_value, data, "RowExistenceExpectation") + if err != nil { + return err + } + *x = RowExistenceExpectation(value) + return nil +} +func (RowExistenceExpectation) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +type ReturnType int32 + +const ( + ReturnType_RT_NONE ReturnType = 0 + ReturnType_RT_PK ReturnType = 1 + ReturnType_RT_AFTER_MODIFY ReturnType = 2 +) + +var ReturnType_name = map[int32]string{ + 0: "RT_NONE", + 1: "RT_PK", + 2: "RT_AFTER_MODIFY", +} +var ReturnType_value = map[string]int32{ + "RT_NONE": 0, + "RT_PK": 1, + "RT_AFTER_MODIFY": 2, +} + +func (x ReturnType) Enum() *ReturnType { + p := new(ReturnType) + *p = x + return p +} +func (x ReturnType) String() string { + return proto.EnumName(ReturnType_name, int32(x)) +} +func (x *ReturnType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReturnType_value, data, "ReturnType") + if err != nil { + return err + } + *x = ReturnType(value) + return nil +} +func (ReturnType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +type OperationType int32 + +const ( + OperationType_PUT OperationType = 1 + OperationType_UPDATE OperationType = 2 + OperationType_DELETE OperationType = 3 +) + +var OperationType_name = map[int32]string{ + 1: "PUT", + 2: "UPDATE", + 3: "DELETE", +} +var OperationType_value = map[string]int32{ + "PUT": 1, + "UPDATE": 2, + "DELETE": 3, +} + +func (x OperationType) Enum() *OperationType { + p := new(OperationType) + *p = x + return p +} +func (x OperationType) String() string { + return proto.EnumName(OperationType_name, int32(x)) +} +func (x *OperationType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(OperationType_value, data, "OperationType") + if err != nil { + return err + } + *x = OperationType(value) + return nil +} +func (OperationType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +// ############################################# GetRange ############################################# +type Direction int32 + +const ( + Direction_FORWARD Direction = 0 + Direction_BACKWARD Direction = 1 +) + +var Direction_name = map[int32]string{ + 0: "FORWARD", + 1: "BACKWARD", +} +var Direction_value = map[string]int32{ + "FORWARD": 0, + "BACKWARD": 1, +} + +func (x Direction) Enum() *Direction { + p := new(Direction) + *p = x + return p +} +func (x Direction) String() string { + return proto.EnumName(Direction_name, int32(x)) +} +func (x *Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Direction_value, data, "Direction") + if err != nil { + return err + } + *x = Direction(value) + return nil +} +func (Direction) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +type StreamStatus int32 + +const ( + StreamStatus_STREAM_ENABLING StreamStatus = 1 + StreamStatus_STREAM_ACTIVE StreamStatus = 2 +) + +var StreamStatus_name = map[int32]string{ + 1: "STREAM_ENABLING", + 2: "STREAM_ACTIVE", +} +var StreamStatus_value = map[string]int32{ + "STREAM_ENABLING": 1, + "STREAM_ACTIVE": 2, +} + +func (x StreamStatus) Enum() *StreamStatus { + p := new(StreamStatus) + *p = x + return p +} +func (x StreamStatus) String() string { + return proto.EnumName(StreamStatus_name, int32(x)) +} +func (x *StreamStatus) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(StreamStatus_value, data, "StreamStatus") + if err != nil { + return err + } + *x = StreamStatus(value) + return nil +} +func (StreamStatus) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +type ActionType int32 + +const ( + ActionType_PUT_ROW ActionType = 1 + ActionType_UPDATE_ROW ActionType = 2 + ActionType_DELETE_ROW ActionType = 3 +) + +var ActionType_name = map[int32]string{ + 1: "PUT_ROW", + 2: "UPDATE_ROW", + 3: "DELETE_ROW", +} +var ActionType_value = map[string]int32{ + "PUT_ROW": 1, + "UPDATE_ROW": 2, + "DELETE_ROW": 3, +} + +func (x ActionType) Enum() *ActionType { + p := new(ActionType) + *p = x + return p +} +func (x ActionType) String() string { + return proto.EnumName(ActionType_name, int32(x)) +} +func (x *ActionType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ActionType_value, data, "ActionType") + if err != nil { + return err + } + *x = ActionType(value) + return nil +} +func (ActionType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } + +type DefinedColumnType int32 + +const ( + DefinedColumnType_DCT_INTEGER DefinedColumnType = 1 + DefinedColumnType_DCT_DOUBLE DefinedColumnType = 2 + DefinedColumnType_DCT_BOOLEAN DefinedColumnType = 3 + DefinedColumnType_DCT_STRING DefinedColumnType = 4 + // field 5 is reserved for date type, not supported yet + // field 6 is reserved for decimal type, not supported yet + DefinedColumnType_DCT_BLOB DefinedColumnType = 7 +) + +var DefinedColumnType_name = map[int32]string{ + 1: "DCT_INTEGER", + 2: "DCT_DOUBLE", + 3: "DCT_BOOLEAN", + 4: "DCT_STRING", + 7: "DCT_BLOB", +} +var DefinedColumnType_value = map[string]int32{ + "DCT_INTEGER": 1, + "DCT_DOUBLE": 2, + "DCT_BOOLEAN": 3, + "DCT_STRING": 4, + "DCT_BLOB": 7, +} + +func (x DefinedColumnType) Enum() *DefinedColumnType { + p := new(DefinedColumnType) + *p = x + return p +} +func (x DefinedColumnType) String() string { + return proto.EnumName(DefinedColumnType_name, int32(x)) +} +func (x *DefinedColumnType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefinedColumnType_value, data, "DefinedColumnType") + if err != nil { + return err + } + *x = DefinedColumnType(value) + return nil +} +func (DefinedColumnType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{10} } + +type IndexUpdateMode int32 + +const ( + IndexUpdateMode_IUM_ASYNC_INDEX IndexUpdateMode = 0 + IndexUpdateMode_IUM_SYNC_INDEX IndexUpdateMode = 1 +) + +var IndexUpdateMode_name = map[int32]string{ + 0: "IUM_ASYNC_INDEX", + 1: "IUM_SYNC_INDEX", +} +var IndexUpdateMode_value = map[string]int32{ + "IUM_ASYNC_INDEX": 0, + "IUM_SYNC_INDEX": 1, +} + +func (x IndexUpdateMode) Enum() *IndexUpdateMode { + p := new(IndexUpdateMode) + *p = x + return p +} +func (x IndexUpdateMode) String() string { + return proto.EnumName(IndexUpdateMode_name, int32(x)) +} +func (x *IndexUpdateMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexUpdateMode_value, data, "IndexUpdateMode") + if err != nil { + return err + } + *x = IndexUpdateMode(value) + return nil +} +func (IndexUpdateMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{11} } + +type IndexType int32 + +const ( + IndexType_IT_GLOBAL_INDEX IndexType = 0 + IndexType_IT_LOCAL_INDEX IndexType = 1 +) + +var IndexType_name = map[int32]string{ + 0: "IT_GLOBAL_INDEX", + 1: "IT_LOCAL_INDEX", +} +var IndexType_value = map[string]int32{ + "IT_GLOBAL_INDEX": 0, + "IT_LOCAL_INDEX": 1, +} + +func (x IndexType) Enum() *IndexType { + p := new(IndexType) + *p = x + return p +} +func (x IndexType) String() string { + return proto.EnumName(IndexType_name, int32(x)) +} +func (x *IndexType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexType_value, data, "IndexType") + if err != nil { + return err + } + *x = IndexType(value) + return nil +} +func (IndexType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{12} } + +type Error struct { + Code *string `protobuf:"bytes,1,req,name=code" json:"code,omitempty"` + Message *string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *Error) GetCode() string { + if m != nil && m.Code != nil { + return *m.Code + } + return "" +} + +func (m *Error) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type PrimaryKeySchema struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Type *PrimaryKeyType `protobuf:"varint,2,req,name=type,enum=otsprotocol.PrimaryKeyType" json:"type,omitempty"` + Option *PrimaryKeyOption `protobuf:"varint,3,opt,name=option,enum=otsprotocol.PrimaryKeyOption" json:"option,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PrimaryKeySchema) Reset() { *m = PrimaryKeySchema{} } +func (m *PrimaryKeySchema) String() string { return proto.CompactTextString(m) } +func (*PrimaryKeySchema) ProtoMessage() {} +func (*PrimaryKeySchema) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *PrimaryKeySchema) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *PrimaryKeySchema) GetType() PrimaryKeyType { + if m != nil && m.Type != nil { + return *m.Type + } + return PrimaryKeyType_INTEGER +} + +func (m *PrimaryKeySchema) GetOption() PrimaryKeyOption { + if m != nil && m.Option != nil { + return *m.Option + } + return PrimaryKeyOption_AUTO_INCREMENT +} + +type PartitionRange struct { + Begin []byte `protobuf:"bytes,1,req,name=begin" json:"begin,omitempty"` + End []byte `protobuf:"bytes,2,req,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PartitionRange) Reset() { *m = PartitionRange{} } +func (m *PartitionRange) String() string { return proto.CompactTextString(m) } +func (*PartitionRange) ProtoMessage() {} +func (*PartitionRange) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *PartitionRange) GetBegin() []byte { + if m != nil { + return m.Begin + } + return nil +} + +func (m *PartitionRange) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + +type TableOptions struct { + TimeToLive *int32 `protobuf:"varint,1,opt,name=time_to_live" json:"time_to_live,omitempty"` + MaxVersions *int32 `protobuf:"varint,2,opt,name=max_versions" json:"max_versions,omitempty"` + BloomFilterType *BloomFilterType `protobuf:"varint,3,opt,name=bloom_filter_type,enum=otsprotocol.BloomFilterType" json:"bloom_filter_type,omitempty"` + BlockSize *int32 `protobuf:"varint,4,opt,name=block_size" json:"block_size,omitempty"` + DeviationCellVersionInSec *int64 `protobuf:"varint,5,opt,name=deviation_cell_version_in_sec" json:"deviation_cell_version_in_sec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (m *TableOptions) String() string { return proto.CompactTextString(m) } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *TableOptions) GetTimeToLive() int32 { + if m != nil && m.TimeToLive != nil { + return *m.TimeToLive + } + return 0 +} + +func (m *TableOptions) GetMaxVersions() int32 { + if m != nil && m.MaxVersions != nil { + return *m.MaxVersions + } + return 0 +} + +func (m *TableOptions) GetBloomFilterType() BloomFilterType { + if m != nil && m.BloomFilterType != nil { + return *m.BloomFilterType + } + return BloomFilterType_NONE +} + +func (m *TableOptions) GetBlockSize() int32 { + if m != nil && m.BlockSize != nil { + return *m.BlockSize + } + return 0 +} + +func (m *TableOptions) GetDeviationCellVersionInSec() int64 { + if m != nil && m.DeviationCellVersionInSec != nil { + return *m.DeviationCellVersionInSec + } + return 0 +} + +type TableMeta struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + PrimaryKey []*PrimaryKeySchema `protobuf:"bytes,2,rep,name=primary_key" json:"primary_key,omitempty"` + DefinedColumn []*DefinedColumnSchema `protobuf:"bytes,3,rep,name=defined_column" json:"defined_column,omitempty"` + IndexMeta []*IndexMeta `protobuf:"bytes,4,rep,name=index_meta" json:"index_meta,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableMeta) Reset() { *m = TableMeta{} } +func (m *TableMeta) String() string { return proto.CompactTextString(m) } +func (*TableMeta) ProtoMessage() {} +func (*TableMeta) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *TableMeta) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *TableMeta) GetPrimaryKey() []*PrimaryKeySchema { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *TableMeta) GetDefinedColumn() []*DefinedColumnSchema { + if m != nil { + return m.DefinedColumn + } + return nil +} + +func (m *TableMeta) GetIndexMeta() []*IndexMeta { + if m != nil { + return m.IndexMeta + } + return nil +} + +type Condition struct { + RowExistence *RowExistenceExpectation `protobuf:"varint,1,req,name=row_existence,enum=otsprotocol.RowExistenceExpectation" json:"row_existence,omitempty"` + ColumnCondition []byte `protobuf:"bytes,2,opt,name=column_condition" json:"column_condition,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Condition) Reset() { *m = Condition{} } +func (m *Condition) String() string { return proto.CompactTextString(m) } +func (*Condition) ProtoMessage() {} +func (*Condition) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *Condition) GetRowExistence() RowExistenceExpectation { + if m != nil && m.RowExistence != nil { + return *m.RowExistence + } + return RowExistenceExpectation_IGNORE +} + +func (m *Condition) GetColumnCondition() []byte { + if m != nil { + return m.ColumnCondition + } + return nil +} + +type CapacityUnit struct { + Read *int32 `protobuf:"varint,1,opt,name=read" json:"read,omitempty"` + Write *int32 `protobuf:"varint,2,opt,name=write" json:"write,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CapacityUnit) Reset() { *m = CapacityUnit{} } +func (m *CapacityUnit) String() string { return proto.CompactTextString(m) } +func (*CapacityUnit) ProtoMessage() {} +func (*CapacityUnit) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +func (m *CapacityUnit) GetRead() int32 { + if m != nil && m.Read != nil { + return *m.Read + } + return 0 +} + +func (m *CapacityUnit) GetWrite() int32 { + if m != nil && m.Write != nil { + return *m.Write + } + return 0 +} + +type ReservedThroughputDetails struct { + CapacityUnit *CapacityUnit `protobuf:"bytes,1,req,name=capacity_unit" json:"capacity_unit,omitempty"` + LastIncreaseTime *int64 `protobuf:"varint,2,req,name=last_increase_time" json:"last_increase_time,omitempty"` + LastDecreaseTime *int64 `protobuf:"varint,3,opt,name=last_decrease_time" json:"last_decrease_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReservedThroughputDetails) Reset() { *m = ReservedThroughputDetails{} } +func (m *ReservedThroughputDetails) String() string { return proto.CompactTextString(m) } +func (*ReservedThroughputDetails) ProtoMessage() {} +func (*ReservedThroughputDetails) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *ReservedThroughputDetails) GetCapacityUnit() *CapacityUnit { + if m != nil { + return m.CapacityUnit + } + return nil +} + +func (m *ReservedThroughputDetails) GetLastIncreaseTime() int64 { + if m != nil && m.LastIncreaseTime != nil { + return *m.LastIncreaseTime + } + return 0 +} + +func (m *ReservedThroughputDetails) GetLastDecreaseTime() int64 { + if m != nil && m.LastDecreaseTime != nil { + return *m.LastDecreaseTime + } + return 0 +} + +type ReservedThroughput struct { + CapacityUnit *CapacityUnit `protobuf:"bytes,1,req,name=capacity_unit" json:"capacity_unit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReservedThroughput) Reset() { *m = ReservedThroughput{} } +func (m *ReservedThroughput) String() string { return proto.CompactTextString(m) } +func (*ReservedThroughput) ProtoMessage() {} +func (*ReservedThroughput) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +func (m *ReservedThroughput) GetCapacityUnit() *CapacityUnit { + if m != nil { + return m.CapacityUnit + } + return nil +} + +type ConsumedCapacity struct { + CapacityUnit *CapacityUnit `protobuf:"bytes,1,req,name=capacity_unit" json:"capacity_unit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConsumedCapacity) Reset() { *m = ConsumedCapacity{} } +func (m *ConsumedCapacity) String() string { return proto.CompactTextString(m) } +func (*ConsumedCapacity) ProtoMessage() {} +func (*ConsumedCapacity) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } + +func (m *ConsumedCapacity) GetCapacityUnit() *CapacityUnit { + if m != nil { + return m.CapacityUnit + } + return nil +} + +type StreamSpecification struct { + EnableStream *bool `protobuf:"varint,1,req,name=enable_stream" json:"enable_stream,omitempty"` + ExpirationTime *int32 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamSpecification) Reset() { *m = StreamSpecification{} } +func (m *StreamSpecification) String() string { return proto.CompactTextString(m) } +func (*StreamSpecification) ProtoMessage() {} +func (*StreamSpecification) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{10} } + +func (m *StreamSpecification) GetEnableStream() bool { + if m != nil && m.EnableStream != nil { + return *m.EnableStream + } + return false +} + +func (m *StreamSpecification) GetExpirationTime() int32 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type StreamDetails struct { + EnableStream *bool `protobuf:"varint,1,req,name=enable_stream" json:"enable_stream,omitempty"` + StreamId *string `protobuf:"bytes,2,opt,name=stream_id" json:"stream_id,omitempty"` + ExpirationTime *int32 `protobuf:"varint,3,opt,name=expiration_time" json:"expiration_time,omitempty"` + LastEnableTime *int64 `protobuf:"varint,4,opt,name=last_enable_time" json:"last_enable_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamDetails) Reset() { *m = StreamDetails{} } +func (m *StreamDetails) String() string { return proto.CompactTextString(m) } +func (*StreamDetails) ProtoMessage() {} +func (*StreamDetails) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{11} } + +func (m *StreamDetails) GetEnableStream() bool { + if m != nil && m.EnableStream != nil { + return *m.EnableStream + } + return false +} + +func (m *StreamDetails) GetStreamId() string { + if m != nil && m.StreamId != nil { + return *m.StreamId + } + return "" +} + +func (m *StreamDetails) GetExpirationTime() int32 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +func (m *StreamDetails) GetLastEnableTime() int64 { + if m != nil && m.LastEnableTime != nil { + return *m.LastEnableTime + } + return 0 +} + +// * +// table_meta用于存储表中不可更改的schema属性,可以更改的ReservedThroughput和TableOptions独立出来,作为UpdateTable的参数。 +// 加入GlobalIndex和LocalIndex之后,结构会变为: +// message CreateTableRequest { +// required TableMeta table_meta = 1; +// required ReservedThroughput reserved_throughput = 2; +// required TableOptions table_options = 3; +// repeated LocalIndex local_indexes = 4; // LocalIndex不再单独包含ReservedThroughput和TableOptions,其与主表共享配置。 +// repeated GlobalIndex global_indexes = 5; // GlobalIndex内单独包含ReservedThroughput和TableOptions +// } +type CreateTableRequest struct { + TableMeta *TableMeta `protobuf:"bytes,1,req,name=table_meta" json:"table_meta,omitempty"` + ReservedThroughput *ReservedThroughput `protobuf:"bytes,2,req,name=reserved_throughput" json:"reserved_throughput,omitempty"` + TableOptions *TableOptions `protobuf:"bytes,3,opt,name=table_options" json:"table_options,omitempty"` + Partitions []*PartitionRange `protobuf:"bytes,4,rep,name=partitions" json:"partitions,omitempty"` + StreamSpec *StreamSpecification `protobuf:"bytes,5,opt,name=stream_spec" json:"stream_spec,omitempty"` + IndexMetas []*IndexMeta `protobuf:"bytes,7,rep,name=index_metas" json:"index_metas,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} } +func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTableRequest) ProtoMessage() {} +func (*CreateTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{12} } + +func (m *CreateTableRequest) GetTableMeta() *TableMeta { + if m != nil { + return m.TableMeta + } + return nil +} + +func (m *CreateTableRequest) GetReservedThroughput() *ReservedThroughput { + if m != nil { + return m.ReservedThroughput + } + return nil +} + +func (m *CreateTableRequest) GetTableOptions() *TableOptions { + if m != nil { + return m.TableOptions + } + return nil +} + +func (m *CreateTableRequest) GetPartitions() []*PartitionRange { + if m != nil { + return m.Partitions + } + return nil +} + +func (m *CreateTableRequest) GetStreamSpec() *StreamSpecification { + if m != nil { + return m.StreamSpec + } + return nil +} + +func (m *CreateTableRequest) GetIndexMetas() []*IndexMeta { + if m != nil { + return m.IndexMetas + } + return nil +} + +type CreateTableResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateTableResponse) Reset() { *m = CreateTableResponse{} } +func (m *CreateTableResponse) String() string { return proto.CompactTextString(m) } +func (*CreateTableResponse) ProtoMessage() {} +func (*CreateTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{13} } + +// ############################################# UpdateTable ############################################# +type UpdateTableRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + ReservedThroughput *ReservedThroughput `protobuf:"bytes,2,opt,name=reserved_throughput" json:"reserved_throughput,omitempty"` + TableOptions *TableOptions `protobuf:"bytes,3,opt,name=table_options" json:"table_options,omitempty"` + StreamSpec *StreamSpecification `protobuf:"bytes,4,opt,name=stream_spec" json:"stream_spec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UpdateTableRequest) Reset() { *m = UpdateTableRequest{} } +func (m *UpdateTableRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateTableRequest) ProtoMessage() {} +func (*UpdateTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{14} } + +func (m *UpdateTableRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *UpdateTableRequest) GetReservedThroughput() *ReservedThroughput { + if m != nil { + return m.ReservedThroughput + } + return nil +} + +func (m *UpdateTableRequest) GetTableOptions() *TableOptions { + if m != nil { + return m.TableOptions + } + return nil +} + +func (m *UpdateTableRequest) GetStreamSpec() *StreamSpecification { + if m != nil { + return m.StreamSpec + } + return nil +} + +type UpdateTableResponse struct { + ReservedThroughputDetails *ReservedThroughputDetails `protobuf:"bytes,1,req,name=reserved_throughput_details" json:"reserved_throughput_details,omitempty"` + TableOptions *TableOptions `protobuf:"bytes,2,req,name=table_options" json:"table_options,omitempty"` + StreamDetails *StreamDetails `protobuf:"bytes,3,opt,name=stream_details" json:"stream_details,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UpdateTableResponse) Reset() { *m = UpdateTableResponse{} } +func (m *UpdateTableResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateTableResponse) ProtoMessage() {} +func (*UpdateTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{15} } + +func (m *UpdateTableResponse) GetReservedThroughputDetails() *ReservedThroughputDetails { + if m != nil { + return m.ReservedThroughputDetails + } + return nil +} + +func (m *UpdateTableResponse) GetTableOptions() *TableOptions { + if m != nil { + return m.TableOptions + } + return nil +} + +func (m *UpdateTableResponse) GetStreamDetails() *StreamDetails { + if m != nil { + return m.StreamDetails + } + return nil +} + +// ############################################# DescribeTable ############################################# +type DescribeTableRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeTableRequest) Reset() { *m = DescribeTableRequest{} } +func (m *DescribeTableRequest) String() string { return proto.CompactTextString(m) } +func (*DescribeTableRequest) ProtoMessage() {} +func (*DescribeTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{16} } + +func (m *DescribeTableRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type DescribeTableResponse struct { + TableMeta *TableMeta `protobuf:"bytes,1,req,name=table_meta" json:"table_meta,omitempty"` + ReservedThroughputDetails *ReservedThroughputDetails `protobuf:"bytes,2,req,name=reserved_throughput_details" json:"reserved_throughput_details,omitempty"` + TableOptions *TableOptions `protobuf:"bytes,3,req,name=table_options" json:"table_options,omitempty"` + TableStatus *TableStatus `protobuf:"varint,4,req,name=table_status,enum=otsprotocol.TableStatus" json:"table_status,omitempty"` + StreamDetails *StreamDetails `protobuf:"bytes,5,opt,name=stream_details" json:"stream_details,omitempty"` + ShardSplits [][]byte `protobuf:"bytes,6,rep,name=shard_splits" json:"shard_splits,omitempty"` + IndexMetas []*IndexMeta `protobuf:"bytes,8,rep,name=index_metas" json:"index_metas,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeTableResponse) Reset() { *m = DescribeTableResponse{} } +func (m *DescribeTableResponse) String() string { return proto.CompactTextString(m) } +func (*DescribeTableResponse) ProtoMessage() {} +func (*DescribeTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{17} } + +func (m *DescribeTableResponse) GetTableMeta() *TableMeta { + if m != nil { + return m.TableMeta + } + return nil +} + +func (m *DescribeTableResponse) GetReservedThroughputDetails() *ReservedThroughputDetails { + if m != nil { + return m.ReservedThroughputDetails + } + return nil +} + +func (m *DescribeTableResponse) GetTableOptions() *TableOptions { + if m != nil { + return m.TableOptions + } + return nil +} + +func (m *DescribeTableResponse) GetTableStatus() TableStatus { + if m != nil && m.TableStatus != nil { + return *m.TableStatus + } + return TableStatus_ACTIVE +} + +func (m *DescribeTableResponse) GetStreamDetails() *StreamDetails { + if m != nil { + return m.StreamDetails + } + return nil +} + +func (m *DescribeTableResponse) GetShardSplits() [][]byte { + if m != nil { + return m.ShardSplits + } + return nil +} + +func (m *DescribeTableResponse) GetIndexMetas() []*IndexMeta { + if m != nil { + return m.IndexMetas + } + return nil +} + +// ############################################# ListTable ############################################# +type ListTableRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListTableRequest) Reset() { *m = ListTableRequest{} } +func (m *ListTableRequest) String() string { return proto.CompactTextString(m) } +func (*ListTableRequest) ProtoMessage() {} +func (*ListTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{18} } + +// * +// 当前只返回一个简单的名称列表,需要讨论是否有业务场景需要获取除了表名之外的其他信息。 +// 其他信息可以包含预留吞吐量以及表的状态,这个信息只能是一个粗略的信息,表的详细信息还是需要通过DescribeTable来获取。 +type ListTableResponse struct { + TableNames []string `protobuf:"bytes,1,rep,name=table_names" json:"table_names,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListTableResponse) Reset() { *m = ListTableResponse{} } +func (m *ListTableResponse) String() string { return proto.CompactTextString(m) } +func (*ListTableResponse) ProtoMessage() {} +func (*ListTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{19} } + +func (m *ListTableResponse) GetTableNames() []string { + if m != nil { + return m.TableNames + } + return nil +} + +// ############################################# DeleteTable ############################################# +type DeleteTableRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} } +func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTableRequest) ProtoMessage() {} +func (*DeleteTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{20} } + +func (m *DeleteTableRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type DeleteTableResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteTableResponse) Reset() { *m = DeleteTableResponse{} } +func (m *DeleteTableResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteTableResponse) ProtoMessage() {} +func (*DeleteTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{21} } + +// ############################################# LoadTable ############################################# +type LoadTableRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LoadTableRequest) Reset() { *m = LoadTableRequest{} } +func (m *LoadTableRequest) String() string { return proto.CompactTextString(m) } +func (*LoadTableRequest) ProtoMessage() {} +func (*LoadTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{22} } + +func (m *LoadTableRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type LoadTableResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *LoadTableResponse) Reset() { *m = LoadTableResponse{} } +func (m *LoadTableResponse) String() string { return proto.CompactTextString(m) } +func (*LoadTableResponse) ProtoMessage() {} +func (*LoadTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{23} } + +// ############################################# UnloadTable ############################################# +type UnloadTableRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UnloadTableRequest) Reset() { *m = UnloadTableRequest{} } +func (m *UnloadTableRequest) String() string { return proto.CompactTextString(m) } +func (*UnloadTableRequest) ProtoMessage() {} +func (*UnloadTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{24} } + +func (m *UnloadTableRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type UnloadTableResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *UnloadTableResponse) Reset() { *m = UnloadTableResponse{} } +func (m *UnloadTableResponse) String() string { return proto.CompactTextString(m) } +func (*UnloadTableResponse) ProtoMessage() {} +func (*UnloadTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{25} } + +// * +// 时间戳的取值最小值为0,最大值为INT64.MAX +// 1. 若要查询一个范围,则指定start_time和end_time +// 2. 若要查询一个特定时间戳,则指定specific_time +type TimeRange struct { + StartTime *int64 `protobuf:"varint,1,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,2,opt,name=end_time" json:"end_time,omitempty"` + SpecificTime *int64 `protobuf:"varint,3,opt,name=specific_time" json:"specific_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TimeRange) Reset() { *m = TimeRange{} } +func (m *TimeRange) String() string { return proto.CompactTextString(m) } +func (*TimeRange) ProtoMessage() {} +func (*TimeRange) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{26} } + +func (m *TimeRange) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *TimeRange) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *TimeRange) GetSpecificTime() int64 { + if m != nil && m.SpecificTime != nil { + return *m.SpecificTime + } + return 0 +} + +type ReturnContent struct { + ReturnType *ReturnType `protobuf:"varint,1,opt,name=return_type,enum=otsprotocol.ReturnType" json:"return_type,omitempty"` + ReturnColumnNames []string `protobuf:"bytes,2,rep,name=return_column_names" json:"return_column_names,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReturnContent) Reset() { *m = ReturnContent{} } +func (m *ReturnContent) String() string { return proto.CompactTextString(m) } +func (*ReturnContent) ProtoMessage() {} +func (*ReturnContent) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{27} } + +func (m *ReturnContent) GetReturnType() ReturnType { + if m != nil && m.ReturnType != nil { + return *m.ReturnType + } + return ReturnType_RT_NONE +} + +func (m *ReturnContent) GetReturnColumnNames() []string { + if m != nil { + return m.ReturnColumnNames + } + return nil +} + +// * +// 1. 支持用户指定版本时间戳范围或者特定的版本时间来读取指定版本的列 +// 2. 目前暂不支持行内的断点 +type GetRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + PrimaryKey []byte `protobuf:"bytes,2,req,name=primary_key" json:"primary_key,omitempty"` + ColumnsToGet []string `protobuf:"bytes,3,rep,name=columns_to_get" json:"columns_to_get,omitempty"` + TimeRange *TimeRange `protobuf:"bytes,4,opt,name=time_range" json:"time_range,omitempty"` + MaxVersions *int32 `protobuf:"varint,5,opt,name=max_versions" json:"max_versions,omitempty"` + CacheBlocks *bool `protobuf:"varint,6,opt,name=cache_blocks,def=1" json:"cache_blocks,omitempty"` + Filter []byte `protobuf:"bytes,7,opt,name=filter" json:"filter,omitempty"` + StartColumn *string `protobuf:"bytes,8,opt,name=start_column" json:"start_column,omitempty"` + EndColumn *string `protobuf:"bytes,9,opt,name=end_column" json:"end_column,omitempty"` + Token []byte `protobuf:"bytes,10,opt,name=token" json:"token,omitempty"` + TransactionId *string `protobuf:"bytes,11,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRowRequest) Reset() { *m = GetRowRequest{} } +func (m *GetRowRequest) String() string { return proto.CompactTextString(m) } +func (*GetRowRequest) ProtoMessage() {} +func (*GetRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{28} } + +const Default_GetRowRequest_CacheBlocks bool = true + +func (m *GetRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *GetRowRequest) GetPrimaryKey() []byte { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *GetRowRequest) GetColumnsToGet() []string { + if m != nil { + return m.ColumnsToGet + } + return nil +} + +func (m *GetRowRequest) GetTimeRange() *TimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *GetRowRequest) GetMaxVersions() int32 { + if m != nil && m.MaxVersions != nil { + return *m.MaxVersions + } + return 0 +} + +func (m *GetRowRequest) GetCacheBlocks() bool { + if m != nil && m.CacheBlocks != nil { + return *m.CacheBlocks + } + return Default_GetRowRequest_CacheBlocks +} + +func (m *GetRowRequest) GetFilter() []byte { + if m != nil { + return m.Filter + } + return nil +} + +func (m *GetRowRequest) GetStartColumn() string { + if m != nil && m.StartColumn != nil { + return *m.StartColumn + } + return "" +} + +func (m *GetRowRequest) GetEndColumn() string { + if m != nil && m.EndColumn != nil { + return *m.EndColumn + } + return "" +} + +func (m *GetRowRequest) GetToken() []byte { + if m != nil { + return m.Token + } + return nil +} + +func (m *GetRowRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type GetRowResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,2,req,name=row" json:"row,omitempty"` + NextToken []byte `protobuf:"bytes,3,opt,name=next_token" json:"next_token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRowResponse) Reset() { *m = GetRowResponse{} } +func (m *GetRowResponse) String() string { return proto.CompactTextString(m) } +func (*GetRowResponse) ProtoMessage() {} +func (*GetRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{29} } + +func (m *GetRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *GetRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *GetRowResponse) GetNextToken() []byte { + if m != nil { + return m.NextToken + } + return nil +} + +// ############################################# UpdateRow ############################################# +type UpdateRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + RowChange []byte `protobuf:"bytes,2,req,name=row_change" json:"row_change,omitempty"` + Condition *Condition `protobuf:"bytes,3,req,name=condition" json:"condition,omitempty"` + ReturnContent *ReturnContent `protobuf:"bytes,4,opt,name=return_content" json:"return_content,omitempty"` + TransactionId *string `protobuf:"bytes,5,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UpdateRowRequest) Reset() { *m = UpdateRowRequest{} } +func (m *UpdateRowRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateRowRequest) ProtoMessage() {} +func (*UpdateRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{30} } + +func (m *UpdateRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *UpdateRowRequest) GetRowChange() []byte { + if m != nil { + return m.RowChange + } + return nil +} + +func (m *UpdateRowRequest) GetCondition() *Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *UpdateRowRequest) GetReturnContent() *ReturnContent { + if m != nil { + return m.ReturnContent + } + return nil +} + +func (m *UpdateRowRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type UpdateRowResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,2,opt,name=row" json:"row,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UpdateRowResponse) Reset() { *m = UpdateRowResponse{} } +func (m *UpdateRowResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateRowResponse) ProtoMessage() {} +func (*UpdateRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{31} } + +func (m *UpdateRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *UpdateRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +// * +// 这里允许用户为每列单独设置timestamp,而不是强制整行统一一个timestamp。 +// 原因是列都是用统一的结构,该结构本身是带timestamp的,其次强制统一timestamp增强了规范性但是丧失了灵活性,且该规范性没有明显的好处,反而带来了结构的复杂。 +type PutRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Row []byte `protobuf:"bytes,2,req,name=row" json:"row,omitempty"` + Condition *Condition `protobuf:"bytes,3,req,name=condition" json:"condition,omitempty"` + ReturnContent *ReturnContent `protobuf:"bytes,4,opt,name=return_content" json:"return_content,omitempty"` + TransactionId *string `protobuf:"bytes,5,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutRowRequest) Reset() { *m = PutRowRequest{} } +func (m *PutRowRequest) String() string { return proto.CompactTextString(m) } +func (*PutRowRequest) ProtoMessage() {} +func (*PutRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{32} } + +func (m *PutRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *PutRowRequest) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *PutRowRequest) GetCondition() *Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *PutRowRequest) GetReturnContent() *ReturnContent { + if m != nil { + return m.ReturnContent + } + return nil +} + +func (m *PutRowRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type PutRowResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,2,opt,name=row" json:"row,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutRowResponse) Reset() { *m = PutRowResponse{} } +func (m *PutRowResponse) String() string { return proto.CompactTextString(m) } +func (*PutRowResponse) ProtoMessage() {} +func (*PutRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{33} } + +func (m *PutRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *PutRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +// * +// OTS只支持删除该行的所有列所有版本,不支持: +// 1. 删除所有列的所有小于等于某个版本的所有版本 +type DeleteRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + PrimaryKey []byte `protobuf:"bytes,2,req,name=primary_key" json:"primary_key,omitempty"` + Condition *Condition `protobuf:"bytes,3,req,name=condition" json:"condition,omitempty"` + ReturnContent *ReturnContent `protobuf:"bytes,4,opt,name=return_content" json:"return_content,omitempty"` + TransactionId *string `protobuf:"bytes,5,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteRowRequest) Reset() { *m = DeleteRowRequest{} } +func (m *DeleteRowRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRowRequest) ProtoMessage() {} +func (*DeleteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{34} } + +func (m *DeleteRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *DeleteRowRequest) GetPrimaryKey() []byte { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *DeleteRowRequest) GetCondition() *Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *DeleteRowRequest) GetReturnContent() *ReturnContent { + if m != nil { + return m.ReturnContent + } + return nil +} + +func (m *DeleteRowRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type DeleteRowResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,2,opt,name=row" json:"row,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteRowResponse) Reset() { *m = DeleteRowResponse{} } +func (m *DeleteRowResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteRowResponse) ProtoMessage() {} +func (*DeleteRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{35} } + +func (m *DeleteRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *DeleteRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +// * +// HBase支持Batch操作的每行都拥有不同的查询参数,OTS不支持。 +type TableInBatchGetRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + PrimaryKey [][]byte `protobuf:"bytes,2,rep,name=primary_key" json:"primary_key,omitempty"` + Token [][]byte `protobuf:"bytes,3,rep,name=token" json:"token,omitempty"` + ColumnsToGet []string `protobuf:"bytes,4,rep,name=columns_to_get" json:"columns_to_get,omitempty"` + TimeRange *TimeRange `protobuf:"bytes,5,opt,name=time_range" json:"time_range,omitempty"` + MaxVersions *int32 `protobuf:"varint,6,opt,name=max_versions" json:"max_versions,omitempty"` + CacheBlocks *bool `protobuf:"varint,7,opt,name=cache_blocks,def=1" json:"cache_blocks,omitempty"` + Filter []byte `protobuf:"bytes,8,opt,name=filter" json:"filter,omitempty"` + StartColumn *string `protobuf:"bytes,9,opt,name=start_column" json:"start_column,omitempty"` + EndColumn *string `protobuf:"bytes,10,opt,name=end_column" json:"end_column,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableInBatchGetRowRequest) Reset() { *m = TableInBatchGetRowRequest{} } +func (m *TableInBatchGetRowRequest) String() string { return proto.CompactTextString(m) } +func (*TableInBatchGetRowRequest) ProtoMessage() {} +func (*TableInBatchGetRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{36} } + +const Default_TableInBatchGetRowRequest_CacheBlocks bool = true + +func (m *TableInBatchGetRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *TableInBatchGetRowRequest) GetPrimaryKey() [][]byte { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *TableInBatchGetRowRequest) GetToken() [][]byte { + if m != nil { + return m.Token + } + return nil +} + +func (m *TableInBatchGetRowRequest) GetColumnsToGet() []string { + if m != nil { + return m.ColumnsToGet + } + return nil +} + +func (m *TableInBatchGetRowRequest) GetTimeRange() *TimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *TableInBatchGetRowRequest) GetMaxVersions() int32 { + if m != nil && m.MaxVersions != nil { + return *m.MaxVersions + } + return 0 +} + +func (m *TableInBatchGetRowRequest) GetCacheBlocks() bool { + if m != nil && m.CacheBlocks != nil { + return *m.CacheBlocks + } + return Default_TableInBatchGetRowRequest_CacheBlocks +} + +func (m *TableInBatchGetRowRequest) GetFilter() []byte { + if m != nil { + return m.Filter + } + return nil +} + +func (m *TableInBatchGetRowRequest) GetStartColumn() string { + if m != nil && m.StartColumn != nil { + return *m.StartColumn + } + return "" +} + +func (m *TableInBatchGetRowRequest) GetEndColumn() string { + if m != nil && m.EndColumn != nil { + return *m.EndColumn + } + return "" +} + +type BatchGetRowRequest struct { + Tables []*TableInBatchGetRowRequest `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BatchGetRowRequest) Reset() { *m = BatchGetRowRequest{} } +func (m *BatchGetRowRequest) String() string { return proto.CompactTextString(m) } +func (*BatchGetRowRequest) ProtoMessage() {} +func (*BatchGetRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{37} } + +func (m *BatchGetRowRequest) GetTables() []*TableInBatchGetRowRequest { + if m != nil { + return m.Tables + } + return nil +} + +type RowInBatchGetRowResponse struct { + IsOk *bool `protobuf:"varint,1,req,name=is_ok" json:"is_ok,omitempty"` + Error *Error `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + Consumed *ConsumedCapacity `protobuf:"bytes,3,opt,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,4,opt,name=row" json:"row,omitempty"` + NextToken []byte `protobuf:"bytes,5,opt,name=next_token" json:"next_token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RowInBatchGetRowResponse) Reset() { *m = RowInBatchGetRowResponse{} } +func (m *RowInBatchGetRowResponse) String() string { return proto.CompactTextString(m) } +func (*RowInBatchGetRowResponse) ProtoMessage() {} +func (*RowInBatchGetRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{38} } + +func (m *RowInBatchGetRowResponse) GetIsOk() bool { + if m != nil && m.IsOk != nil { + return *m.IsOk + } + return false +} + +func (m *RowInBatchGetRowResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *RowInBatchGetRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *RowInBatchGetRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *RowInBatchGetRowResponse) GetNextToken() []byte { + if m != nil { + return m.NextToken + } + return nil +} + +type TableInBatchGetRowResponse struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Rows []*RowInBatchGetRowResponse `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableInBatchGetRowResponse) Reset() { *m = TableInBatchGetRowResponse{} } +func (m *TableInBatchGetRowResponse) String() string { return proto.CompactTextString(m) } +func (*TableInBatchGetRowResponse) ProtoMessage() {} +func (*TableInBatchGetRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{39} } + +func (m *TableInBatchGetRowResponse) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *TableInBatchGetRowResponse) GetRows() []*RowInBatchGetRowResponse { + if m != nil { + return m.Rows + } + return nil +} + +type BatchGetRowResponse struct { + Tables []*TableInBatchGetRowResponse `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BatchGetRowResponse) Reset() { *m = BatchGetRowResponse{} } +func (m *BatchGetRowResponse) String() string { return proto.CompactTextString(m) } +func (*BatchGetRowResponse) ProtoMessage() {} +func (*BatchGetRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{40} } + +func (m *BatchGetRowResponse) GetTables() []*TableInBatchGetRowResponse { + if m != nil { + return m.Tables + } + return nil +} + +type RowInBatchWriteRowRequest struct { + Type *OperationType `protobuf:"varint,1,req,name=type,enum=otsprotocol.OperationType" json:"type,omitempty"` + RowChange []byte `protobuf:"bytes,2,req,name=row_change" json:"row_change,omitempty"` + Condition *Condition `protobuf:"bytes,3,req,name=condition" json:"condition,omitempty"` + ReturnContent *ReturnContent `protobuf:"bytes,4,opt,name=return_content" json:"return_content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RowInBatchWriteRowRequest) Reset() { *m = RowInBatchWriteRowRequest{} } +func (m *RowInBatchWriteRowRequest) String() string { return proto.CompactTextString(m) } +func (*RowInBatchWriteRowRequest) ProtoMessage() {} +func (*RowInBatchWriteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{41} } + +func (m *RowInBatchWriteRowRequest) GetType() OperationType { + if m != nil && m.Type != nil { + return *m.Type + } + return OperationType_PUT +} + +func (m *RowInBatchWriteRowRequest) GetRowChange() []byte { + if m != nil { + return m.RowChange + } + return nil +} + +func (m *RowInBatchWriteRowRequest) GetCondition() *Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *RowInBatchWriteRowRequest) GetReturnContent() *ReturnContent { + if m != nil { + return m.ReturnContent + } + return nil +} + +type TableInBatchWriteRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Rows []*RowInBatchWriteRowRequest `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableInBatchWriteRowRequest) Reset() { *m = TableInBatchWriteRowRequest{} } +func (m *TableInBatchWriteRowRequest) String() string { return proto.CompactTextString(m) } +func (*TableInBatchWriteRowRequest) ProtoMessage() {} +func (*TableInBatchWriteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{42} } + +func (m *TableInBatchWriteRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *TableInBatchWriteRowRequest) GetRows() []*RowInBatchWriteRowRequest { + if m != nil { + return m.Rows + } + return nil +} + +type BatchWriteRowRequest struct { + Tables []*TableInBatchWriteRowRequest `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` + TransactionId *string `protobuf:"bytes,2,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BatchWriteRowRequest) Reset() { *m = BatchWriteRowRequest{} } +func (m *BatchWriteRowRequest) String() string { return proto.CompactTextString(m) } +func (*BatchWriteRowRequest) ProtoMessage() {} +func (*BatchWriteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{43} } + +func (m *BatchWriteRowRequest) GetTables() []*TableInBatchWriteRowRequest { + if m != nil { + return m.Tables + } + return nil +} + +func (m *BatchWriteRowRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type RowInBatchWriteRowResponse struct { + IsOk *bool `protobuf:"varint,1,req,name=is_ok" json:"is_ok,omitempty"` + Error *Error `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + Consumed *ConsumedCapacity `protobuf:"bytes,3,opt,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,4,opt,name=row" json:"row,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RowInBatchWriteRowResponse) Reset() { *m = RowInBatchWriteRowResponse{} } +func (m *RowInBatchWriteRowResponse) String() string { return proto.CompactTextString(m) } +func (*RowInBatchWriteRowResponse) ProtoMessage() {} +func (*RowInBatchWriteRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{44} } + +func (m *RowInBatchWriteRowResponse) GetIsOk() bool { + if m != nil && m.IsOk != nil { + return *m.IsOk + } + return false +} + +func (m *RowInBatchWriteRowResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *RowInBatchWriteRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *RowInBatchWriteRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +type TableInBatchWriteRowResponse struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Rows []*RowInBatchWriteRowResponse `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableInBatchWriteRowResponse) Reset() { *m = TableInBatchWriteRowResponse{} } +func (m *TableInBatchWriteRowResponse) String() string { return proto.CompactTextString(m) } +func (*TableInBatchWriteRowResponse) ProtoMessage() {} +func (*TableInBatchWriteRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{45} } + +func (m *TableInBatchWriteRowResponse) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *TableInBatchWriteRowResponse) GetRows() []*RowInBatchWriteRowResponse { + if m != nil { + return m.Rows + } + return nil +} + +type BatchWriteRowResponse struct { + Tables []*TableInBatchWriteRowResponse `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BatchWriteRowResponse) Reset() { *m = BatchWriteRowResponse{} } +func (m *BatchWriteRowResponse) String() string { return proto.CompactTextString(m) } +func (*BatchWriteRowResponse) ProtoMessage() {} +func (*BatchWriteRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{46} } + +func (m *BatchWriteRowResponse) GetTables() []*TableInBatchWriteRowResponse { + if m != nil { + return m.Tables + } + return nil +} + +// * +// HBase支持以下参数: +// 1. TimeRange或指定time +// 2. Filter(根据列值或列名来过滤) +// 我们只支持给同版本的选择条件。 +type GetRangeRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Direction *Direction `protobuf:"varint,2,req,name=direction,enum=otsprotocol.Direction" json:"direction,omitempty"` + ColumnsToGet []string `protobuf:"bytes,3,rep,name=columns_to_get" json:"columns_to_get,omitempty"` + TimeRange *TimeRange `protobuf:"bytes,4,opt,name=time_range" json:"time_range,omitempty"` + MaxVersions *int32 `protobuf:"varint,5,opt,name=max_versions" json:"max_versions,omitempty"` + Limit *int32 `protobuf:"varint,6,opt,name=limit" json:"limit,omitempty"` + InclusiveStartPrimaryKey []byte `protobuf:"bytes,7,req,name=inclusive_start_primary_key" json:"inclusive_start_primary_key,omitempty"` + ExclusiveEndPrimaryKey []byte `protobuf:"bytes,8,req,name=exclusive_end_primary_key" json:"exclusive_end_primary_key,omitempty"` + CacheBlocks *bool `protobuf:"varint,9,opt,name=cache_blocks,def=1" json:"cache_blocks,omitempty"` + Filter []byte `protobuf:"bytes,10,opt,name=filter" json:"filter,omitempty"` + StartColumn *string `protobuf:"bytes,11,opt,name=start_column" json:"start_column,omitempty"` + EndColumn *string `protobuf:"bytes,12,opt,name=end_column" json:"end_column,omitempty"` + Token []byte `protobuf:"bytes,13,opt,name=token" json:"token,omitempty"` + TransactionId *string `protobuf:"bytes,14,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRangeRequest) Reset() { *m = GetRangeRequest{} } +func (m *GetRangeRequest) String() string { return proto.CompactTextString(m) } +func (*GetRangeRequest) ProtoMessage() {} +func (*GetRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{47} } + +const Default_GetRangeRequest_CacheBlocks bool = true + +func (m *GetRangeRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *GetRangeRequest) GetDirection() Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Direction_FORWARD +} + +func (m *GetRangeRequest) GetColumnsToGet() []string { + if m != nil { + return m.ColumnsToGet + } + return nil +} + +func (m *GetRangeRequest) GetTimeRange() *TimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *GetRangeRequest) GetMaxVersions() int32 { + if m != nil && m.MaxVersions != nil { + return *m.MaxVersions + } + return 0 +} + +func (m *GetRangeRequest) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *GetRangeRequest) GetInclusiveStartPrimaryKey() []byte { + if m != nil { + return m.InclusiveStartPrimaryKey + } + return nil +} + +func (m *GetRangeRequest) GetExclusiveEndPrimaryKey() []byte { + if m != nil { + return m.ExclusiveEndPrimaryKey + } + return nil +} + +func (m *GetRangeRequest) GetCacheBlocks() bool { + if m != nil && m.CacheBlocks != nil { + return *m.CacheBlocks + } + return Default_GetRangeRequest_CacheBlocks +} + +func (m *GetRangeRequest) GetFilter() []byte { + if m != nil { + return m.Filter + } + return nil +} + +func (m *GetRangeRequest) GetStartColumn() string { + if m != nil && m.StartColumn != nil { + return *m.StartColumn + } + return "" +} + +func (m *GetRangeRequest) GetEndColumn() string { + if m != nil && m.EndColumn != nil { + return *m.EndColumn + } + return "" +} + +func (m *GetRangeRequest) GetToken() []byte { + if m != nil { + return m.Token + } + return nil +} + +func (m *GetRangeRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type GetRangeResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Rows []byte `protobuf:"bytes,2,req,name=rows" json:"rows,omitempty"` + NextStartPrimaryKey []byte `protobuf:"bytes,3,opt,name=next_start_primary_key" json:"next_start_primary_key,omitempty"` + NextToken []byte `protobuf:"bytes,4,opt,name=next_token" json:"next_token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRangeResponse) Reset() { *m = GetRangeResponse{} } +func (m *GetRangeResponse) String() string { return proto.CompactTextString(m) } +func (*GetRangeResponse) ProtoMessage() {} +func (*GetRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{48} } + +func (m *GetRangeResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *GetRangeResponse) GetRows() []byte { + if m != nil { + return m.Rows + } + return nil +} + +func (m *GetRangeResponse) GetNextStartPrimaryKey() []byte { + if m != nil { + return m.NextStartPrimaryKey + } + return nil +} + +func (m *GetRangeResponse) GetNextToken() []byte { + if m != nil { + return m.NextToken + } + return nil +} + +type ListStreamRequest struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListStreamRequest) Reset() { *m = ListStreamRequest{} } +func (m *ListStreamRequest) String() string { return proto.CompactTextString(m) } +func (*ListStreamRequest) ProtoMessage() {} +func (*ListStreamRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{49} } + +func (m *ListStreamRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type Stream struct { + StreamId *string `protobuf:"bytes,1,req,name=stream_id" json:"stream_id,omitempty"` + TableName *string `protobuf:"bytes,2,req,name=table_name" json:"table_name,omitempty"` + CreationTime *int64 `protobuf:"varint,3,req,name=creation_time" json:"creation_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Stream) Reset() { *m = Stream{} } +func (m *Stream) String() string { return proto.CompactTextString(m) } +func (*Stream) ProtoMessage() {} +func (*Stream) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{50} } + +func (m *Stream) GetStreamId() string { + if m != nil && m.StreamId != nil { + return *m.StreamId + } + return "" +} + +func (m *Stream) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *Stream) GetCreationTime() int64 { + if m != nil && m.CreationTime != nil { + return *m.CreationTime + } + return 0 +} + +type ListStreamResponse struct { + Streams []*Stream `protobuf:"bytes,1,rep,name=streams" json:"streams,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListStreamResponse) Reset() { *m = ListStreamResponse{} } +func (m *ListStreamResponse) String() string { return proto.CompactTextString(m) } +func (*ListStreamResponse) ProtoMessage() {} +func (*ListStreamResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{51} } + +func (m *ListStreamResponse) GetStreams() []*Stream { + if m != nil { + return m.Streams + } + return nil +} + +type StreamShard struct { + ShardId *string `protobuf:"bytes,1,req,name=shard_id" json:"shard_id,omitempty"` + ParentId *string `protobuf:"bytes,2,opt,name=parent_id" json:"parent_id,omitempty"` + ParentSiblingId *string `protobuf:"bytes,3,opt,name=parent_sibling_id" json:"parent_sibling_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamShard) Reset() { *m = StreamShard{} } +func (m *StreamShard) String() string { return proto.CompactTextString(m) } +func (*StreamShard) ProtoMessage() {} +func (*StreamShard) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{52} } + +func (m *StreamShard) GetShardId() string { + if m != nil && m.ShardId != nil { + return *m.ShardId + } + return "" +} + +func (m *StreamShard) GetParentId() string { + if m != nil && m.ParentId != nil { + return *m.ParentId + } + return "" +} + +func (m *StreamShard) GetParentSiblingId() string { + if m != nil && m.ParentSiblingId != nil { + return *m.ParentSiblingId + } + return "" +} + +type DescribeStreamRequest struct { + StreamId *string `protobuf:"bytes,1,req,name=stream_id" json:"stream_id,omitempty"` + InclusiveStartShardId *string `protobuf:"bytes,2,opt,name=inclusive_start_shard_id" json:"inclusive_start_shard_id,omitempty"` + ShardLimit *int32 `protobuf:"varint,3,opt,name=shard_limit" json:"shard_limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeStreamRequest) Reset() { *m = DescribeStreamRequest{} } +func (m *DescribeStreamRequest) String() string { return proto.CompactTextString(m) } +func (*DescribeStreamRequest) ProtoMessage() {} +func (*DescribeStreamRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{53} } + +func (m *DescribeStreamRequest) GetStreamId() string { + if m != nil && m.StreamId != nil { + return *m.StreamId + } + return "" +} + +func (m *DescribeStreamRequest) GetInclusiveStartShardId() string { + if m != nil && m.InclusiveStartShardId != nil { + return *m.InclusiveStartShardId + } + return "" +} + +func (m *DescribeStreamRequest) GetShardLimit() int32 { + if m != nil && m.ShardLimit != nil { + return *m.ShardLimit + } + return 0 +} + +type DescribeStreamResponse struct { + StreamId *string `protobuf:"bytes,1,req,name=stream_id" json:"stream_id,omitempty"` + ExpirationTime *int32 `protobuf:"varint,2,req,name=expiration_time" json:"expiration_time,omitempty"` + TableName *string `protobuf:"bytes,3,req,name=table_name" json:"table_name,omitempty"` + CreationTime *int64 `protobuf:"varint,4,req,name=creation_time" json:"creation_time,omitempty"` + StreamStatus *StreamStatus `protobuf:"varint,5,req,name=stream_status,enum=otsprotocol.StreamStatus" json:"stream_status,omitempty"` + Shards []*StreamShard `protobuf:"bytes,6,rep,name=shards" json:"shards,omitempty"` + NextShardId *string `protobuf:"bytes,7,opt,name=next_shard_id" json:"next_shard_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeStreamResponse) Reset() { *m = DescribeStreamResponse{} } +func (m *DescribeStreamResponse) String() string { return proto.CompactTextString(m) } +func (*DescribeStreamResponse) ProtoMessage() {} +func (*DescribeStreamResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{54} } + +func (m *DescribeStreamResponse) GetStreamId() string { + if m != nil && m.StreamId != nil { + return *m.StreamId + } + return "" +} + +func (m *DescribeStreamResponse) GetExpirationTime() int32 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +func (m *DescribeStreamResponse) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *DescribeStreamResponse) GetCreationTime() int64 { + if m != nil && m.CreationTime != nil { + return *m.CreationTime + } + return 0 +} + +func (m *DescribeStreamResponse) GetStreamStatus() StreamStatus { + if m != nil && m.StreamStatus != nil { + return *m.StreamStatus + } + return StreamStatus_STREAM_ENABLING +} + +func (m *DescribeStreamResponse) GetShards() []*StreamShard { + if m != nil { + return m.Shards + } + return nil +} + +func (m *DescribeStreamResponse) GetNextShardId() string { + if m != nil && m.NextShardId != nil { + return *m.NextShardId + } + return "" +} + +type GetShardIteratorRequest struct { + StreamId *string `protobuf:"bytes,1,req,name=stream_id" json:"stream_id,omitempty"` + ShardId *string `protobuf:"bytes,2,req,name=shard_id" json:"shard_id,omitempty"` + Timestamp *int64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"` + Token *string `protobuf:"bytes,4,opt,name=token" json:"token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetShardIteratorRequest) Reset() { *m = GetShardIteratorRequest{} } +func (m *GetShardIteratorRequest) String() string { return proto.CompactTextString(m) } +func (*GetShardIteratorRequest) ProtoMessage() {} +func (*GetShardIteratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{55} } + +func (m *GetShardIteratorRequest) GetStreamId() string { + if m != nil && m.StreamId != nil { + return *m.StreamId + } + return "" +} + +func (m *GetShardIteratorRequest) GetShardId() string { + if m != nil && m.ShardId != nil { + return *m.ShardId + } + return "" +} + +func (m *GetShardIteratorRequest) GetTimestamp() int64 { + if m != nil && m.Timestamp != nil { + return *m.Timestamp + } + return 0 +} + +func (m *GetShardIteratorRequest) GetToken() string { + if m != nil && m.Token != nil { + return *m.Token + } + return "" +} + +type GetShardIteratorResponse struct { + ShardIterator *string `protobuf:"bytes,1,req,name=shard_iterator" json:"shard_iterator,omitempty"` + NextToken *string `protobuf:"bytes,2,opt,name=next_token" json:"next_token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetShardIteratorResponse) Reset() { *m = GetShardIteratorResponse{} } +func (m *GetShardIteratorResponse) String() string { return proto.CompactTextString(m) } +func (*GetShardIteratorResponse) ProtoMessage() {} +func (*GetShardIteratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{56} } + +func (m *GetShardIteratorResponse) GetShardIterator() string { + if m != nil && m.ShardIterator != nil { + return *m.ShardIterator + } + return "" +} + +func (m *GetShardIteratorResponse) GetNextToken() string { + if m != nil && m.NextToken != nil { + return *m.NextToken + } + return "" +} + +type GetStreamRecordRequest struct { + ShardIterator *string `protobuf:"bytes,1,req,name=shard_iterator" json:"shard_iterator,omitempty"` + Limit *int32 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetStreamRecordRequest) Reset() { *m = GetStreamRecordRequest{} } +func (m *GetStreamRecordRequest) String() string { return proto.CompactTextString(m) } +func (*GetStreamRecordRequest) ProtoMessage() {} +func (*GetStreamRecordRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{57} } + +func (m *GetStreamRecordRequest) GetShardIterator() string { + if m != nil && m.ShardIterator != nil { + return *m.ShardIterator + } + return "" +} + +func (m *GetStreamRecordRequest) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +type GetStreamRecordResponse struct { + StreamRecords []*GetStreamRecordResponse_StreamRecord `protobuf:"bytes,1,rep,name=stream_records" json:"stream_records,omitempty"` + NextShardIterator *string `protobuf:"bytes,2,opt,name=next_shard_iterator" json:"next_shard_iterator,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetStreamRecordResponse) Reset() { *m = GetStreamRecordResponse{} } +func (m *GetStreamRecordResponse) String() string { return proto.CompactTextString(m) } +func (*GetStreamRecordResponse) ProtoMessage() {} +func (*GetStreamRecordResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{58} } + +func (m *GetStreamRecordResponse) GetStreamRecords() []*GetStreamRecordResponse_StreamRecord { + if m != nil { + return m.StreamRecords + } + return nil +} + +func (m *GetStreamRecordResponse) GetNextShardIterator() string { + if m != nil && m.NextShardIterator != nil { + return *m.NextShardIterator + } + return "" +} + +type GetStreamRecordResponse_StreamRecord struct { + ActionType *ActionType `protobuf:"varint,1,req,name=action_type,enum=otsprotocol.ActionType" json:"action_type,omitempty"` + Record []byte `protobuf:"bytes,2,req,name=record" json:"record,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetStreamRecordResponse_StreamRecord) Reset() { *m = GetStreamRecordResponse_StreamRecord{} } +func (m *GetStreamRecordResponse_StreamRecord) String() string { return proto.CompactTextString(m) } +func (*GetStreamRecordResponse_StreamRecord) ProtoMessage() {} +func (*GetStreamRecordResponse_StreamRecord) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{58, 0} +} + +func (m *GetStreamRecordResponse_StreamRecord) GetActionType() ActionType { + if m != nil && m.ActionType != nil { + return *m.ActionType + } + return ActionType_PUT_ROW +} + +func (m *GetStreamRecordResponse_StreamRecord) GetRecord() []byte { + if m != nil { + return m.Record + } + return nil +} + +// +++++ ComputeSplitPointsBySize +++++ +type ComputeSplitPointsBySizeRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + SplitSize *int64 `protobuf:"varint,2,req,name=split_size" json:"split_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComputeSplitPointsBySizeRequest) Reset() { *m = ComputeSplitPointsBySizeRequest{} } +func (m *ComputeSplitPointsBySizeRequest) String() string { return proto.CompactTextString(m) } +func (*ComputeSplitPointsBySizeRequest) ProtoMessage() {} +func (*ComputeSplitPointsBySizeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{59} +} + +func (m *ComputeSplitPointsBySizeRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *ComputeSplitPointsBySizeRequest) GetSplitSize() int64 { + if m != nil && m.SplitSize != nil { + return *m.SplitSize + } + return 0 +} + +type ComputeSplitPointsBySizeResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Schema []*PrimaryKeySchema `protobuf:"bytes,2,rep,name=schema" json:"schema,omitempty"` + // * + // Split points between splits, in the increasing order + // + // A split is a consecutive range of primary keys, + // whose data size is about split_size specified in the request. + // The size could be hard to be precise. + // + // A split point is an array of primary-key column w.r.t. table schema, + // which is never longer than that of table schema. + // Tailing -inf will be omitted to reduce transmission payloads. + SplitPoints [][]byte `protobuf:"bytes,3,rep,name=split_points" json:"split_points,omitempty"` + Locations []*ComputeSplitPointsBySizeResponse_SplitLocation `protobuf:"bytes,4,rep,name=locations" json:"locations,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComputeSplitPointsBySizeResponse) Reset() { *m = ComputeSplitPointsBySizeResponse{} } +func (m *ComputeSplitPointsBySizeResponse) String() string { return proto.CompactTextString(m) } +func (*ComputeSplitPointsBySizeResponse) ProtoMessage() {} +func (*ComputeSplitPointsBySizeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{60} +} + +func (m *ComputeSplitPointsBySizeResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *ComputeSplitPointsBySizeResponse) GetSchema() []*PrimaryKeySchema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *ComputeSplitPointsBySizeResponse) GetSplitPoints() [][]byte { + if m != nil { + return m.SplitPoints + } + return nil +} + +func (m *ComputeSplitPointsBySizeResponse) GetLocations() []*ComputeSplitPointsBySizeResponse_SplitLocation { + if m != nil { + return m.Locations + } + return nil +} + +// * +// Locations where splits lies in. +// +// By the managed nature of TableStore, these locations are no more than hints. +// If a location is not suitable to be seen, an empty string will be placed. +type ComputeSplitPointsBySizeResponse_SplitLocation struct { + Location *string `protobuf:"bytes,1,req,name=location" json:"location,omitempty"` + Repeat *int64 `protobuf:"zigzag64,2,req,name=repeat" json:"repeat,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComputeSplitPointsBySizeResponse_SplitLocation) Reset() { + *m = ComputeSplitPointsBySizeResponse_SplitLocation{} +} +func (m *ComputeSplitPointsBySizeResponse_SplitLocation) String() string { + return proto.CompactTextString(m) +} +func (*ComputeSplitPointsBySizeResponse_SplitLocation) ProtoMessage() {} +func (*ComputeSplitPointsBySizeResponse_SplitLocation) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{60, 0} +} + +func (m *ComputeSplitPointsBySizeResponse_SplitLocation) GetLocation() string { + if m != nil && m.Location != nil { + return *m.Location + } + return "" +} + +func (m *ComputeSplitPointsBySizeResponse_SplitLocation) GetRepeat() int64 { + if m != nil && m.Repeat != nil { + return *m.Repeat + } + return 0 +} + +type DefinedColumnSchema struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Type *DefinedColumnType `protobuf:"varint,2,req,name=type,enum=otsprotocol.DefinedColumnType" json:"type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DefinedColumnSchema) Reset() { *m = DefinedColumnSchema{} } +func (m *DefinedColumnSchema) String() string { return proto.CompactTextString(m) } +func (*DefinedColumnSchema) ProtoMessage() {} +func (*DefinedColumnSchema) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{61} } + +func (m *DefinedColumnSchema) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DefinedColumnSchema) GetType() DefinedColumnType { + if m != nil && m.Type != nil { + return *m.Type + } + return DefinedColumnType_DCT_INTEGER +} + +type IndexMeta struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + PrimaryKey []string `protobuf:"bytes,2,rep,name=primary_key" json:"primary_key,omitempty"` + DefinedColumn []string `protobuf:"bytes,3,rep,name=defined_column" json:"defined_column,omitempty"` + IndexUpdateMode *IndexUpdateMode `protobuf:"varint,4,req,name=index_update_mode,enum=otsprotocol.IndexUpdateMode" json:"index_update_mode,omitempty"` + IndexType *IndexType `protobuf:"varint,5,req,name=index_type,enum=otsprotocol.IndexType" json:"index_type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexMeta) Reset() { *m = IndexMeta{} } +func (m *IndexMeta) String() string { return proto.CompactTextString(m) } +func (*IndexMeta) ProtoMessage() {} +func (*IndexMeta) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{62} } + +func (m *IndexMeta) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *IndexMeta) GetPrimaryKey() []string { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *IndexMeta) GetDefinedColumn() []string { + if m != nil { + return m.DefinedColumn + } + return nil +} + +func (m *IndexMeta) GetIndexUpdateMode() IndexUpdateMode { + if m != nil && m.IndexUpdateMode != nil { + return *m.IndexUpdateMode + } + return IndexUpdateMode_IUM_ASYNC_INDEX +} + +func (m *IndexMeta) GetIndexType() IndexType { + if m != nil && m.IndexType != nil { + return *m.IndexType + } + return IndexType_IT_GLOBAL_INDEX +} + +type CreateIndexRequest struct { + MainTableName *string `protobuf:"bytes,1,req,name=main_table_name" json:"main_table_name,omitempty"` + IndexMeta *IndexMeta `protobuf:"bytes,2,req,name=index_meta" json:"index_meta,omitempty"` + IncludeBaseData *bool `protobuf:"varint,3,opt,name=include_base_data" json:"include_base_data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateIndexRequest) Reset() { *m = CreateIndexRequest{} } +func (m *CreateIndexRequest) String() string { return proto.CompactTextString(m) } +func (*CreateIndexRequest) ProtoMessage() {} +func (*CreateIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{63} } + +func (m *CreateIndexRequest) GetMainTableName() string { + if m != nil && m.MainTableName != nil { + return *m.MainTableName + } + return "" +} + +func (m *CreateIndexRequest) GetIndexMeta() *IndexMeta { + if m != nil { + return m.IndexMeta + } + return nil +} + +func (m *CreateIndexRequest) GetIncludeBaseData() bool { + if m != nil && m.IncludeBaseData != nil { + return *m.IncludeBaseData + } + return false +} + +type CreateIndexResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateIndexResponse) Reset() { *m = CreateIndexResponse{} } +func (m *CreateIndexResponse) String() string { return proto.CompactTextString(m) } +func (*CreateIndexResponse) ProtoMessage() {} +func (*CreateIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{64} } + +type DropIndexRequest struct { + MainTableName *string `protobuf:"bytes,1,req,name=main_table_name" json:"main_table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,req,name=index_name" json:"index_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DropIndexRequest) Reset() { *m = DropIndexRequest{} } +func (m *DropIndexRequest) String() string { return proto.CompactTextString(m) } +func (*DropIndexRequest) ProtoMessage() {} +func (*DropIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{65} } + +func (m *DropIndexRequest) GetMainTableName() string { + if m != nil && m.MainTableName != nil { + return *m.MainTableName + } + return "" +} + +func (m *DropIndexRequest) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +type DropIndexResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *DropIndexResponse) Reset() { *m = DropIndexResponse{} } +func (m *DropIndexResponse) String() string { return proto.CompactTextString(m) } +func (*DropIndexResponse) ProtoMessage() {} +func (*DropIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{66} } + +// ########################################### LocalTransaction ########################################### +type StartLocalTransactionRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartLocalTransactionRequest) Reset() { *m = StartLocalTransactionRequest{} } +func (m *StartLocalTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*StartLocalTransactionRequest) ProtoMessage() {} +func (*StartLocalTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{67} } + +func (m *StartLocalTransactionRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *StartLocalTransactionRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type StartLocalTransactionResponse struct { + TransactionId *string `protobuf:"bytes,1,req,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartLocalTransactionResponse) Reset() { *m = StartLocalTransactionResponse{} } +func (m *StartLocalTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*StartLocalTransactionResponse) ProtoMessage() {} +func (*StartLocalTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{68} } + +func (m *StartLocalTransactionResponse) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type CommitTransactionRequest struct { + TransactionId *string `protobuf:"bytes,1,req,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitTransactionRequest) Reset() { *m = CommitTransactionRequest{} } +func (m *CommitTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*CommitTransactionRequest) ProtoMessage() {} +func (*CommitTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{69} } + +func (m *CommitTransactionRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type CommitTransactionResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitTransactionResponse) Reset() { *m = CommitTransactionResponse{} } +func (m *CommitTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*CommitTransactionResponse) ProtoMessage() {} +func (*CommitTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{70} } + +type AbortTransactionRequest struct { + TransactionId *string `protobuf:"bytes,1,req,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AbortTransactionRequest) Reset() { *m = AbortTransactionRequest{} } +func (m *AbortTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*AbortTransactionRequest) ProtoMessage() {} +func (*AbortTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{71} } + +func (m *AbortTransactionRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type AbortTransactionResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AbortTransactionResponse) Reset() { *m = AbortTransactionResponse{} } +func (m *AbortTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*AbortTransactionResponse) ProtoMessage() {} +func (*AbortTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{72} } + +func init() { + proto.RegisterType((*Error)(nil), "otsprotocol.Error") + proto.RegisterType((*PrimaryKeySchema)(nil), "otsprotocol.PrimaryKeySchema") + proto.RegisterType((*PartitionRange)(nil), "otsprotocol.PartitionRange") + proto.RegisterType((*TableOptions)(nil), "otsprotocol.TableOptions") + proto.RegisterType((*TableMeta)(nil), "otsprotocol.TableMeta") + proto.RegisterType((*Condition)(nil), "otsprotocol.Condition") + proto.RegisterType((*CapacityUnit)(nil), "otsprotocol.CapacityUnit") + proto.RegisterType((*ReservedThroughputDetails)(nil), "otsprotocol.ReservedThroughputDetails") + proto.RegisterType((*ReservedThroughput)(nil), "otsprotocol.ReservedThroughput") + proto.RegisterType((*ConsumedCapacity)(nil), "otsprotocol.ConsumedCapacity") + proto.RegisterType((*StreamSpecification)(nil), "otsprotocol.StreamSpecification") + proto.RegisterType((*StreamDetails)(nil), "otsprotocol.StreamDetails") + proto.RegisterType((*CreateTableRequest)(nil), "otsprotocol.CreateTableRequest") + proto.RegisterType((*CreateTableResponse)(nil), "otsprotocol.CreateTableResponse") + proto.RegisterType((*UpdateTableRequest)(nil), "otsprotocol.UpdateTableRequest") + proto.RegisterType((*UpdateTableResponse)(nil), "otsprotocol.UpdateTableResponse") + proto.RegisterType((*DescribeTableRequest)(nil), "otsprotocol.DescribeTableRequest") + proto.RegisterType((*DescribeTableResponse)(nil), "otsprotocol.DescribeTableResponse") + proto.RegisterType((*ListTableRequest)(nil), "otsprotocol.ListTableRequest") + proto.RegisterType((*ListTableResponse)(nil), "otsprotocol.ListTableResponse") + proto.RegisterType((*DeleteTableRequest)(nil), "otsprotocol.DeleteTableRequest") + proto.RegisterType((*DeleteTableResponse)(nil), "otsprotocol.DeleteTableResponse") + proto.RegisterType((*LoadTableRequest)(nil), "otsprotocol.LoadTableRequest") + proto.RegisterType((*LoadTableResponse)(nil), "otsprotocol.LoadTableResponse") + proto.RegisterType((*UnloadTableRequest)(nil), "otsprotocol.UnloadTableRequest") + proto.RegisterType((*UnloadTableResponse)(nil), "otsprotocol.UnloadTableResponse") + proto.RegisterType((*TimeRange)(nil), "otsprotocol.TimeRange") + proto.RegisterType((*ReturnContent)(nil), "otsprotocol.ReturnContent") + proto.RegisterType((*GetRowRequest)(nil), "otsprotocol.GetRowRequest") + proto.RegisterType((*GetRowResponse)(nil), "otsprotocol.GetRowResponse") + proto.RegisterType((*UpdateRowRequest)(nil), "otsprotocol.UpdateRowRequest") + proto.RegisterType((*UpdateRowResponse)(nil), "otsprotocol.UpdateRowResponse") + proto.RegisterType((*PutRowRequest)(nil), "otsprotocol.PutRowRequest") + proto.RegisterType((*PutRowResponse)(nil), "otsprotocol.PutRowResponse") + proto.RegisterType((*DeleteRowRequest)(nil), "otsprotocol.DeleteRowRequest") + proto.RegisterType((*DeleteRowResponse)(nil), "otsprotocol.DeleteRowResponse") + proto.RegisterType((*TableInBatchGetRowRequest)(nil), "otsprotocol.TableInBatchGetRowRequest") + proto.RegisterType((*BatchGetRowRequest)(nil), "otsprotocol.BatchGetRowRequest") + proto.RegisterType((*RowInBatchGetRowResponse)(nil), "otsprotocol.RowInBatchGetRowResponse") + proto.RegisterType((*TableInBatchGetRowResponse)(nil), "otsprotocol.TableInBatchGetRowResponse") + proto.RegisterType((*BatchGetRowResponse)(nil), "otsprotocol.BatchGetRowResponse") + proto.RegisterType((*RowInBatchWriteRowRequest)(nil), "otsprotocol.RowInBatchWriteRowRequest") + proto.RegisterType((*TableInBatchWriteRowRequest)(nil), "otsprotocol.TableInBatchWriteRowRequest") + proto.RegisterType((*BatchWriteRowRequest)(nil), "otsprotocol.BatchWriteRowRequest") + proto.RegisterType((*RowInBatchWriteRowResponse)(nil), "otsprotocol.RowInBatchWriteRowResponse") + proto.RegisterType((*TableInBatchWriteRowResponse)(nil), "otsprotocol.TableInBatchWriteRowResponse") + proto.RegisterType((*BatchWriteRowResponse)(nil), "otsprotocol.BatchWriteRowResponse") + proto.RegisterType((*GetRangeRequest)(nil), "otsprotocol.GetRangeRequest") + proto.RegisterType((*GetRangeResponse)(nil), "otsprotocol.GetRangeResponse") + proto.RegisterType((*ListStreamRequest)(nil), "otsprotocol.ListStreamRequest") + proto.RegisterType((*Stream)(nil), "otsprotocol.Stream") + proto.RegisterType((*ListStreamResponse)(nil), "otsprotocol.ListStreamResponse") + proto.RegisterType((*StreamShard)(nil), "otsprotocol.StreamShard") + proto.RegisterType((*DescribeStreamRequest)(nil), "otsprotocol.DescribeStreamRequest") + proto.RegisterType((*DescribeStreamResponse)(nil), "otsprotocol.DescribeStreamResponse") + proto.RegisterType((*GetShardIteratorRequest)(nil), "otsprotocol.GetShardIteratorRequest") + proto.RegisterType((*GetShardIteratorResponse)(nil), "otsprotocol.GetShardIteratorResponse") + proto.RegisterType((*GetStreamRecordRequest)(nil), "otsprotocol.GetStreamRecordRequest") + proto.RegisterType((*GetStreamRecordResponse)(nil), "otsprotocol.GetStreamRecordResponse") + proto.RegisterType((*GetStreamRecordResponse_StreamRecord)(nil), "otsprotocol.GetStreamRecordResponse.StreamRecord") + proto.RegisterType((*ComputeSplitPointsBySizeRequest)(nil), "otsprotocol.ComputeSplitPointsBySizeRequest") + proto.RegisterType((*ComputeSplitPointsBySizeResponse)(nil), "otsprotocol.ComputeSplitPointsBySizeResponse") + proto.RegisterType((*ComputeSplitPointsBySizeResponse_SplitLocation)(nil), "otsprotocol.ComputeSplitPointsBySizeResponse.SplitLocation") + proto.RegisterType((*DefinedColumnSchema)(nil), "otsprotocol.DefinedColumnSchema") + proto.RegisterType((*IndexMeta)(nil), "otsprotocol.IndexMeta") + proto.RegisterType((*CreateIndexRequest)(nil), "otsprotocol.CreateIndexRequest") + proto.RegisterType((*CreateIndexResponse)(nil), "otsprotocol.CreateIndexResponse") + proto.RegisterType((*DropIndexRequest)(nil), "otsprotocol.DropIndexRequest") + proto.RegisterType((*DropIndexResponse)(nil), "otsprotocol.DropIndexResponse") + proto.RegisterType((*StartLocalTransactionRequest)(nil), "otsprotocol.StartLocalTransactionRequest") + proto.RegisterType((*StartLocalTransactionResponse)(nil), "otsprotocol.StartLocalTransactionResponse") + proto.RegisterType((*CommitTransactionRequest)(nil), "otsprotocol.CommitTransactionRequest") + proto.RegisterType((*CommitTransactionResponse)(nil), "otsprotocol.CommitTransactionResponse") + proto.RegisterType((*AbortTransactionRequest)(nil), "otsprotocol.AbortTransactionRequest") + proto.RegisterType((*AbortTransactionResponse)(nil), "otsprotocol.AbortTransactionResponse") + proto.RegisterEnum("otsprotocol.PrimaryKeyType", PrimaryKeyType_name, PrimaryKeyType_value) + proto.RegisterEnum("otsprotocol.PrimaryKeyOption", PrimaryKeyOption_name, PrimaryKeyOption_value) + proto.RegisterEnum("otsprotocol.BloomFilterType", BloomFilterType_name, BloomFilterType_value) + proto.RegisterEnum("otsprotocol.TableStatus", TableStatus_name, TableStatus_value) + proto.RegisterEnum("otsprotocol.RowExistenceExpectation", RowExistenceExpectation_name, RowExistenceExpectation_value) + proto.RegisterEnum("otsprotocol.ReturnType", ReturnType_name, ReturnType_value) + proto.RegisterEnum("otsprotocol.OperationType", OperationType_name, OperationType_value) + proto.RegisterEnum("otsprotocol.Direction", Direction_name, Direction_value) + proto.RegisterEnum("otsprotocol.StreamStatus", StreamStatus_name, StreamStatus_value) + proto.RegisterEnum("otsprotocol.ActionType", ActionType_name, ActionType_value) + proto.RegisterEnum("otsprotocol.DefinedColumnType", DefinedColumnType_name, DefinedColumnType_value) + proto.RegisterEnum("otsprotocol.IndexUpdateMode", IndexUpdateMode_name, IndexUpdateMode_value) + proto.RegisterEnum("otsprotocol.IndexType", IndexType_name, IndexType_value) +} + +func init() { proto.RegisterFile("table_store.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 2751 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x59, 0x5b, 0x6f, 0xe3, 0xc6, + 0xf5, 0x0f, 0x49, 0x5d, 0xac, 0xa3, 0x8b, 0x29, 0x6a, 0x6d, 0xcb, 0xf6, 0x26, 0x71, 0xf8, 0x4f, + 0x36, 0x5a, 0x65, 0xe3, 0x24, 0xfe, 0x27, 0xd9, 0x5c, 0x0a, 0x04, 0xb2, 0x24, 0xbb, 0xc2, 0xca, + 0x92, 0x23, 0xd3, 0x4d, 0xb6, 0x2f, 0x2c, 0x4d, 0xce, 0xda, 0xc4, 0x4a, 0xa4, 0x4a, 0x8e, 0xd6, + 0xf6, 0xbe, 0x16, 0x79, 0x2b, 0xd0, 0x0f, 0x50, 0xa0, 0xaf, 0x7d, 0x28, 0x50, 0xa0, 0x7d, 0x6a, + 0x3f, 0x40, 0x81, 0xbe, 0xe5, 0x3b, 0xf4, 0xad, 0x5f, 0xa2, 0x28, 0xe6, 0x42, 0x89, 0xa4, 0x28, + 0xcb, 0xbb, 0x49, 0xba, 0x6f, 0xe4, 0xcc, 0x9c, 0xdb, 0xef, 0x9c, 0x39, 0x73, 0xe6, 0x0c, 0x94, + 0xb1, 0x71, 0x36, 0x44, 0xba, 0x8f, 0x5d, 0x0f, 0xed, 0x8e, 0x3d, 0x17, 0xbb, 0x4a, 0xde, 0xc5, + 0x3e, 0xfd, 0x32, 0xdd, 0xa1, 0x7a, 0x0f, 0xd2, 0x6d, 0xcf, 0x73, 0x3d, 0xa5, 0x00, 0x29, 0xd3, + 0xb5, 0x50, 0x55, 0xd8, 0x11, 0x6b, 0x39, 0x65, 0x15, 0xb2, 0x23, 0xe4, 0xfb, 0xc6, 0x39, 0xaa, + 0x8a, 0x3b, 0x42, 0x2d, 0xa7, 0x3e, 0x07, 0xf9, 0xd8, 0xb3, 0x47, 0x86, 0x77, 0xfd, 0x08, 0x5d, + 0x9f, 0x98, 0x17, 0x68, 0x64, 0x10, 0x12, 0xc7, 0x18, 0x05, 0x24, 0xf7, 0x21, 0x85, 0xaf, 0xc7, + 0x64, 0xbd, 0x58, 0x2b, 0xed, 0x6d, 0xef, 0x86, 0xa4, 0xec, 0xce, 0x48, 0xb5, 0xeb, 0x31, 0x52, + 0xde, 0x87, 0x8c, 0x3b, 0xc6, 0xb6, 0xeb, 0x54, 0xa5, 0x1d, 0xa1, 0x56, 0xda, 0x7b, 0x7d, 0xc1, + 0xe2, 0x3e, 0x5d, 0xa4, 0x3e, 0x80, 0xd2, 0xb1, 0xe1, 0x61, 0x9b, 0xfc, 0x0c, 0x0c, 0xe7, 0x1c, + 0x29, 0x45, 0x48, 0x9f, 0xa1, 0x73, 0xdb, 0xa1, 0xa2, 0x0b, 0x4a, 0x1e, 0x24, 0xe4, 0x58, 0x54, + 0x72, 0x41, 0xfd, 0xb3, 0x00, 0x05, 0x8d, 0x18, 0xcd, 0xa8, 0x7d, 0xe5, 0x0e, 0x14, 0xb0, 0x3d, + 0x42, 0x3a, 0x76, 0xf5, 0xa1, 0xfd, 0x8c, 0xa8, 0x2b, 0xd4, 0xd2, 0x64, 0x74, 0x64, 0x5c, 0xe9, + 0xcf, 0x90, 0xe7, 0x93, 0x55, 0xd4, 0xcc, 0xb4, 0xf2, 0x10, 0xca, 0x67, 0x43, 0xd7, 0x1d, 0xe9, + 0x4f, 0xec, 0x21, 0x46, 0x9e, 0x4e, 0x2d, 0x62, 0x4a, 0xde, 0x8d, 0x28, 0xb9, 0x4f, 0x56, 0x1d, + 0xd0, 0x45, 0xd4, 0x24, 0x05, 0xe0, 0x6c, 0xe8, 0x9a, 0x4f, 0x75, 0xdf, 0x7e, 0x8e, 0xaa, 0x29, + 0xca, 0xec, 0x1d, 0x78, 0xdd, 0x42, 0xcf, 0x6c, 0x83, 0xa8, 0xa1, 0x9b, 0x68, 0x38, 0x0c, 0xa4, + 0xe9, 0xb6, 0xa3, 0xfb, 0xc8, 0xac, 0xa6, 0x77, 0x84, 0x9a, 0xa4, 0xfe, 0x5d, 0x80, 0x1c, 0x55, + 0xf8, 0x08, 0x61, 0x83, 0x30, 0x62, 0x2e, 0x0b, 0x41, 0xbb, 0x07, 0xf9, 0x31, 0x03, 0x45, 0x7f, + 0x8a, 0xae, 0xab, 0xe2, 0x8e, 0x54, 0xcb, 0x2f, 0x04, 0x8d, 0x3b, 0xe7, 0x33, 0x28, 0x59, 0xe8, + 0x89, 0xed, 0x20, 0x4b, 0x37, 0xdd, 0xe1, 0x64, 0x44, 0xb0, 0x26, 0x64, 0x3b, 0x11, 0xb2, 0x16, + 0x5b, 0xd2, 0xa4, 0x2b, 0x38, 0x65, 0x1d, 0xc0, 0x76, 0x2c, 0x74, 0xa5, 0x8f, 0x10, 0x36, 0xaa, + 0x29, 0x4a, 0xb5, 0x1e, 0xa1, 0xea, 0x90, 0x69, 0xa2, 0xad, 0x7a, 0x06, 0xb9, 0xa6, 0xeb, 0x58, + 0xd4, 0x35, 0xca, 0x97, 0x50, 0xf4, 0xdc, 0x4b, 0x1d, 0x5d, 0xd9, 0x3e, 0x46, 0x8e, 0xc9, 0xb4, + 0x2f, 0xed, 0xbd, 0x1d, 0xa1, 0x1d, 0xb8, 0x97, 0xed, 0x60, 0x41, 0xfb, 0x6a, 0x8c, 0x4c, 0x4c, + 0xf1, 0x51, 0xaa, 0x20, 0x33, 0x3d, 0x75, 0x33, 0x60, 0x48, 0x7d, 0x52, 0x50, 0xdf, 0x83, 0x42, + 0xd3, 0x18, 0x1b, 0xa6, 0x8d, 0xaf, 0x4f, 0x1d, 0x1b, 0x93, 0xb0, 0xf3, 0x90, 0x61, 0x71, 0x3f, + 0x16, 0x21, 0x7d, 0xe9, 0xd9, 0x98, 0xc5, 0x69, 0x5a, 0xfd, 0x4e, 0x80, 0xcd, 0x01, 0xf2, 0x91, + 0xf7, 0x0c, 0x59, 0xda, 0x85, 0xe7, 0x4e, 0xce, 0x2f, 0xc6, 0x13, 0xdc, 0x42, 0xd8, 0xb0, 0x87, + 0xbe, 0xf2, 0x21, 0x14, 0x4d, 0xce, 0x4a, 0x9f, 0x38, 0x36, 0xa6, 0x1a, 0xe6, 0xf7, 0x36, 0x23, + 0x1a, 0x46, 0x84, 0x6d, 0x81, 0x32, 0x34, 0x7c, 0xac, 0xdb, 0x8e, 0xe9, 0x21, 0xc3, 0x47, 0x3a, + 0x09, 0x25, 0x1a, 0x69, 0xd2, 0x74, 0xce, 0x42, 0xe1, 0x39, 0x89, 0x3a, 0xf5, 0x00, 0x94, 0x79, + 0x35, 0x5e, 0x5c, 0xbe, 0xda, 0x02, 0xb9, 0xe9, 0x3a, 0xfe, 0x64, 0x84, 0xac, 0x60, 0xfc, 0x25, + 0xb8, 0xb4, 0xa1, 0x72, 0x82, 0x3d, 0x64, 0x8c, 0x4e, 0xc6, 0xc8, 0xb4, 0x9f, 0xd8, 0x26, 0xc3, + 0x7c, 0x0d, 0x8a, 0xc8, 0xe1, 0xf9, 0x81, 0xcc, 0x52, 0x46, 0x2b, 0xca, 0x06, 0xac, 0xa2, 0xab, + 0xb1, 0xed, 0xb1, 0xc0, 0xe5, 0x06, 0x13, 0x70, 0x87, 0x50, 0x64, 0x6c, 0x02, 0x3c, 0x17, 0x30, + 0x28, 0x43, 0x8e, 0xfd, 0xeb, 0xb6, 0xc5, 0xf2, 0x47, 0x12, 0x4f, 0x89, 0xfa, 0xaf, 0x0a, 0x32, + 0x05, 0x91, 0xf3, 0xa1, 0x33, 0x29, 0x0a, 0xe1, 0x3f, 0x44, 0x50, 0x9a, 0x1e, 0x32, 0x30, 0xa2, + 0xbb, 0x63, 0x80, 0x7e, 0x3d, 0x41, 0x3e, 0x26, 0xe1, 0xc9, 0x36, 0x08, 0x0d, 0x4f, 0x66, 0x7a, + 0x34, 0x3c, 0x67, 0x9b, 0xe9, 0x67, 0x50, 0xf1, 0xb8, 0x17, 0x74, 0x3c, 0x75, 0x03, 0x75, 0x5f, + 0x7e, 0xef, 0xcd, 0x68, 0x5c, 0x26, 0x7a, 0x8b, 0x49, 0x62, 0xc9, 0xca, 0xa7, 0x1a, 0xc7, 0x71, + 0x8e, 0xa4, 0x9a, 0x0f, 0x00, 0xc6, 0x41, 0xa6, 0xf2, 0xf9, 0xd6, 0x89, 0x65, 0xc2, 0x68, 0x22, + 0xfb, 0x04, 0xf2, 0x1c, 0x29, 0x7f, 0xcc, 0x13, 0x42, 0x7c, 0x8b, 0x26, 0x39, 0xee, 0x3d, 0xc8, + 0xcf, 0xb6, 0xa8, 0x5f, 0xcd, 0xde, 0xb8, 0x47, 0xd7, 0xa0, 0x12, 0x81, 0xd1, 0x1f, 0xbb, 0x8e, + 0x8f, 0xd4, 0xef, 0x05, 0x50, 0x4e, 0xc7, 0x56, 0x1c, 0xde, 0xa4, 0xfc, 0xb3, 0x10, 0x46, 0xe1, + 0xa7, 0x81, 0x31, 0x86, 0x4a, 0xea, 0x76, 0xa8, 0xa8, 0xff, 0x14, 0xa0, 0x12, 0xb1, 0x88, 0x59, + 0xaa, 0x3c, 0x82, 0xed, 0x04, 0xf5, 0x75, 0x8b, 0x05, 0x31, 0x0f, 0xa1, 0x7b, 0x4b, 0xcc, 0x08, + 0xa5, 0x90, 0xa8, 0x35, 0x62, 0xc2, 0xe6, 0x8b, 0x58, 0xb3, 0x07, 0x25, 0x6e, 0x4d, 0x20, 0x91, + 0x01, 0xb0, 0x95, 0x60, 0x10, 0x97, 0xa2, 0xd6, 0xe1, 0x4e, 0x0b, 0xf9, 0xa6, 0x67, 0x9f, 0x2d, + 0xf5, 0x8e, 0xfa, 0x6f, 0x11, 0xd6, 0x62, 0x8b, 0xb9, 0xe1, 0x2f, 0xb2, 0x55, 0x96, 0x80, 0x24, + 0xfe, 0x30, 0x90, 0xa4, 0x65, 0x20, 0xed, 0x42, 0x21, 0xa8, 0x54, 0x0c, 0x3c, 0x21, 0x7b, 0x87, + 0x1c, 0x1d, 0xd5, 0x79, 0x82, 0x13, 0x3a, 0x9f, 0x00, 0x6a, 0x7a, 0x19, 0xa8, 0xe4, 0xc8, 0xf7, + 0x2f, 0x0c, 0xcf, 0xd2, 0xfd, 0xf1, 0xd0, 0xc6, 0x7e, 0x35, 0xb3, 0x23, 0xd5, 0x0a, 0xf1, 0xbd, + 0xb4, 0x72, 0xe3, 0x5e, 0x52, 0x40, 0xee, 0xda, 0x3e, 0x0e, 0xfb, 0x44, 0xad, 0x41, 0x39, 0x34, + 0xc6, 0xa1, 0xaf, 0x40, 0x7e, 0xe6, 0x28, 0x12, 0x63, 0x52, 0x2d, 0xa7, 0xd6, 0x40, 0x69, 0xa1, + 0x21, 0x5a, 0xbe, 0xe3, 0xc8, 0x9e, 0x8d, 0xac, 0xe4, 0x7b, 0xf6, 0x1e, 0xc8, 0x5d, 0xd7, 0xb0, + 0x96, 0x92, 0x57, 0xa0, 0x1c, 0x5a, 0xc7, 0x89, 0x6b, 0xa0, 0x9c, 0x3a, 0xc3, 0xdb, 0x90, 0xaf, + 0x41, 0x25, 0xb2, 0x92, 0x33, 0xf8, 0x39, 0xe4, 0x34, 0x7b, 0x84, 0x58, 0xe6, 0x52, 0x00, 0x7c, + 0x6c, 0x78, 0x98, 0x65, 0x6c, 0x72, 0x16, 0x4b, 0x8a, 0x0c, 0x2b, 0xc8, 0xb1, 0x66, 0x27, 0x86, + 0x44, 0x0e, 0x08, 0x9f, 0xef, 0xd1, 0xf0, 0xe9, 0xf8, 0x4b, 0x28, 0x0e, 0x10, 0x9e, 0x78, 0x4e, + 0xd3, 0x75, 0x30, 0x72, 0xb0, 0xf2, 0x00, 0xf2, 0x1e, 0x1d, 0x60, 0x15, 0x97, 0x40, 0x2b, 0xae, + 0x8d, 0x58, 0xb4, 0x91, 0x79, 0x5a, 0x6c, 0x6d, 0x93, 0x7c, 0x44, 0x57, 0xf3, 0x92, 0x81, 0x81, + 0x2c, 0x52, 0x90, 0x7f, 0x27, 0x42, 0xf1, 0x10, 0xe1, 0x81, 0x7b, 0x79, 0x53, 0x4a, 0xab, 0xc4, + 0x4b, 0x2a, 0x52, 0x47, 0xae, 0x43, 0x89, 0x31, 0xf4, 0x49, 0xb1, 0x78, 0x8e, 0x30, 0xad, 0x99, + 0x72, 0x74, 0x1f, 0x91, 0x0a, 0xd2, 0x23, 0x96, 0xf3, 0x74, 0x14, 0xdb, 0x47, 0x53, 0x5c, 0xe2, + 0x75, 0x65, 0x9a, 0x9e, 0x72, 0x5b, 0x50, 0x30, 0x0d, 0xf3, 0x02, 0xe9, 0xb4, 0x48, 0x24, 0xa1, + 0x27, 0xd4, 0x56, 0xbe, 0x48, 0x61, 0x6f, 0x82, 0x94, 0x12, 0x64, 0x58, 0xb5, 0x59, 0xcd, 0x92, + 0x7a, 0x87, 0x86, 0x29, 0x45, 0x96, 0xd7, 0x6d, 0x2b, 0xf4, 0x00, 0x55, 0x00, 0x08, 0xb6, 0x7c, + 0x2c, 0x47, 0xc7, 0x8a, 0x90, 0xc6, 0xee, 0x53, 0xe4, 0x54, 0x81, 0x12, 0xae, 0x43, 0x09, 0x7b, + 0x86, 0xe3, 0x1b, 0x26, 0x3d, 0x64, 0x6d, 0xab, 0x9a, 0xa7, 0xb5, 0xfb, 0x19, 0x94, 0x02, 0x40, + 0x78, 0x74, 0x7e, 0x00, 0x2b, 0x26, 0xaf, 0x2a, 0x78, 0x5a, 0x88, 0x56, 0x93, 0x73, 0x25, 0x47, + 0x1e, 0x24, 0xcf, 0xbd, 0xe4, 0x30, 0x29, 0x00, 0x0e, 0xba, 0xc2, 0x3a, 0x93, 0x2d, 0xd1, 0x22, + 0xed, 0x2f, 0x02, 0xc8, 0x2c, 0xf7, 0x2e, 0x01, 0x5e, 0x01, 0x20, 0x45, 0xa2, 0x79, 0x41, 0xb1, + 0x64, 0x0c, 0xef, 0x43, 0x6e, 0x56, 0xf4, 0x49, 0x09, 0x69, 0x6a, 0x56, 0x63, 0xee, 0x41, 0x69, + 0xea, 0x7a, 0x1a, 0x3a, 0xdc, 0x1d, 0x5b, 0x09, 0xb1, 0x12, 0x04, 0xd7, 0x3c, 0x2e, 0x69, 0x8a, + 0xcb, 0xd7, 0x50, 0x0e, 0xa9, 0xfc, 0x83, 0xa1, 0x21, 0x30, 0xfc, 0x51, 0x80, 0xe2, 0xf1, 0x64, + 0x59, 0xf0, 0x45, 0xd0, 0x7c, 0x45, 0xc6, 0xf7, 0xa0, 0x14, 0x28, 0xfa, 0xa3, 0x58, 0xfe, 0x57, + 0x01, 0x64, 0x96, 0xb2, 0x5e, 0x66, 0xe7, 0xbd, 0xba, 0x08, 0x08, 0xe9, 0xfc, 0xa3, 0xe0, 0xf0, + 0x1b, 0x11, 0x36, 0x69, 0xda, 0xec, 0x38, 0xfb, 0x06, 0x36, 0x2f, 0x5e, 0x22, 0x15, 0x91, 0x53, + 0x69, 0xba, 0xb5, 0x25, 0xfa, 0x3b, 0x9f, 0x99, 0x52, 0x09, 0x99, 0x29, 0xfd, 0x42, 0x99, 0x29, + 0x93, 0x98, 0x99, 0xb2, 0x89, 0x99, 0x69, 0x25, 0x31, 0x33, 0xe5, 0x12, 0x32, 0x13, 0x50, 0x60, + 0xbb, 0xa0, 0x24, 0x58, 0xff, 0x29, 0x64, 0xa8, 0xf5, 0xec, 0x3c, 0x8c, 0x97, 0x13, 0x0b, 0x51, + 0x53, 0xff, 0x20, 0x40, 0x75, 0xe0, 0x5e, 0xc6, 0xe6, 0xb8, 0xbb, 0x8a, 0x90, 0xb6, 0x7d, 0xdd, + 0x7d, 0xca, 0xef, 0x1e, 0x6f, 0x41, 0x1a, 0x79, 0x9e, 0xeb, 0xf1, 0xea, 0x54, 0x89, 0x88, 0x60, + 0xad, 0x8e, 0xb0, 0x83, 0x59, 0x29, 0x76, 0x3b, 0x07, 0xa7, 0x28, 0x08, 0xd1, 0xec, 0x97, 0xa6, + 0x4e, 0x47, 0xb0, 0x95, 0xa4, 0x3d, 0xd7, 0x30, 0xc9, 0xe9, 0xff, 0x0f, 0x29, 0xcf, 0xbd, 0xf4, + 0xf9, 0x5d, 0xfe, 0x9d, 0xf8, 0x15, 0x39, 0x91, 0x91, 0xda, 0x83, 0x4a, 0x12, 0xff, 0x87, 0x31, + 0x58, 0xdf, 0x5d, 0x0a, 0x2b, 0xe7, 0xf7, 0x37, 0x72, 0x59, 0x9e, 0x0a, 0xfb, 0x86, 0x5c, 0xa3, + 0x43, 0xde, 0xaa, 0xf1, 0x86, 0x0e, 0xbb, 0xc5, 0x47, 0xb7, 0x57, 0x7f, 0x8c, 0xd8, 0x55, 0x2e, + 0x68, 0x7e, 0xfc, 0x8f, 0x73, 0xba, 0x7a, 0x0e, 0xdb, 0x61, 0xc3, 0xe2, 0xba, 0x27, 0x41, 0xfe, + 0x71, 0x04, 0xf2, 0x7b, 0x0b, 0x20, 0x8f, 0x71, 0x52, 0x2f, 0xe0, 0x4e, 0xa2, 0x84, 0xcf, 0x62, + 0xa0, 0xd7, 0x16, 0x82, 0x1e, 0xa7, 0x9c, 0x4f, 0x46, 0xac, 0xc5, 0xf6, 0x5b, 0x01, 0xb6, 0x92, + 0xf4, 0x78, 0x35, 0x71, 0xae, 0xda, 0x70, 0x37, 0xd9, 0x8a, 0x1b, 0xa2, 0xfa, 0x93, 0x08, 0xc4, + 0xef, 0x2e, 0x85, 0x98, 0xc7, 0xe1, 0x00, 0xd6, 0x92, 0x65, 0x7c, 0x1e, 0x03, 0xf9, 0xfe, 0x2d, + 0x40, 0xe6, 0x3c, 0xff, 0x23, 0xc2, 0x2a, 0x09, 0x77, 0x12, 0x92, 0x37, 0x45, 0xc5, 0x7d, 0xc8, + 0x59, 0xb6, 0x87, 0x4c, 0xde, 0x70, 0x22, 0xa1, 0x1e, 0x8d, 0xd3, 0x56, 0x30, 0xfb, 0x13, 0x96, + 0x87, 0x45, 0x48, 0x0f, 0xed, 0x91, 0x8d, 0x79, 0x4e, 0xfe, 0x3f, 0xd8, 0xb6, 0x1d, 0x73, 0x38, + 0xf1, 0xed, 0x67, 0xf4, 0x42, 0xe4, 0x61, 0x3d, 0x7c, 0x42, 0x64, 0xe9, 0x06, 0x7b, 0x0b, 0x36, + 0xd1, 0x55, 0xb0, 0x88, 0x24, 0xe0, 0xf0, 0x92, 0x15, 0xba, 0x24, 0x9e, 0xdb, 0x73, 0x89, 0xb9, + 0x1d, 0x12, 0x73, 0x7b, 0x3e, 0x21, 0xb7, 0x17, 0xa2, 0x55, 0x67, 0x71, 0x41, 0xd5, 0x59, 0xa2, + 0xe1, 0xfc, 0x9d, 0x00, 0xf2, 0xcc, 0x01, 0x2f, 0x7b, 0xb6, 0x16, 0xa6, 0x11, 0x45, 0x0c, 0x7a, + 0x03, 0xd6, 0x69, 0xee, 0x9d, 0xc7, 0x44, 0x4a, 0xc8, 0xcd, 0x2c, 0x8e, 0xdf, 0x65, 0xd7, 0x33, + 0x76, 0x15, 0x5c, 0x14, 0x09, 0x44, 0xe1, 0x03, 0xc8, 0xb0, 0x45, 0xd1, 0xfe, 0xd5, 0xb4, 0x6c, + 0x0d, 0x11, 0x88, 0x74, 0x6c, 0x0d, 0x8a, 0xa6, 0x87, 0x22, 0x1d, 0x2d, 0xb1, 0x26, 0xa9, 0x5f, + 0x80, 0x12, 0x16, 0xc8, 0x2d, 0x7f, 0x1b, 0xb2, 0x8c, 0x67, 0x10, 0xcb, 0x95, 0x84, 0x9b, 0xaa, + 0x7a, 0x04, 0x79, 0xde, 0xd9, 0x20, 0x17, 0x55, 0x72, 0xa1, 0x62, 0x37, 0xd6, 0xa9, 0x1e, 0x65, + 0xc8, 0x8d, 0x0d, 0x0f, 0x39, 0x78, 0xd6, 0x5a, 0xdb, 0x84, 0x32, 0x1f, 0xf2, 0xed, 0xb3, 0xa1, + 0xed, 0x9c, 0x93, 0x29, 0x89, 0x9a, 0x64, 0xcc, 0x3a, 0x03, 0x51, 0xfb, 0x13, 0x2c, 0xdc, 0x81, + 0x6a, 0x3c, 0xe8, 0xa6, 0xb2, 0x99, 0xa0, 0x0a, 0xe4, 0xd9, 0x08, 0x8b, 0x55, 0xda, 0xbf, 0x53, + 0xff, 0x25, 0xc0, 0x7a, 0x5c, 0x06, 0x37, 0x39, 0x41, 0x48, 0x62, 0x6b, 0x51, 0xac, 0xa5, 0x63, + 0xf8, 0x4a, 0xc9, 0xf8, 0xa6, 0x68, 0xdb, 0xf5, 0x43, 0x28, 0x06, 0xdd, 0x21, 0xd6, 0x2b, 0x48, + 0xd3, 0x5d, 0xbb, 0x99, 0xd4, 0x1f, 0x62, 0xcd, 0x82, 0x1a, 0x64, 0xa8, 0xe2, 0xec, 0xca, 0x9f, + 0x8f, 0xb5, 0x15, 0xc2, 0x80, 0xaf, 0x41, 0x91, 0x05, 0x58, 0x60, 0x79, 0x96, 0xe2, 0xf8, 0x2b, + 0xd8, 0x38, 0x44, 0x98, 0x2e, 0xe9, 0x60, 0x72, 0xf2, 0xb9, 0xde, 0x0d, 0x48, 0x86, 0xbd, 0x26, + 0x06, 0x5e, 0x23, 0x06, 0xf8, 0xd8, 0x18, 0x8d, 0xd9, 0x15, 0x78, 0xb6, 0x8b, 0x52, 0x3c, 0xf8, + 0xaa, 0xf3, 0x12, 0x38, 0x8e, 0xeb, 0x50, 0xe2, 0xfc, 0xf8, 0xcc, 0x2c, 0x26, 0x43, 0xd1, 0xce, + 0x0e, 0x91, 0xaf, 0x60, 0x9d, 0xf0, 0xe1, 0x8e, 0x30, 0x5d, 0xcf, 0x0a, 0x1d, 0x3b, 0x89, 0x5c, + 0xa6, 0xb9, 0x87, 0xf5, 0x78, 0xbf, 0x17, 0x98, 0xad, 0x11, 0x0e, 0x5c, 0x91, 0xce, 0xb4, 0xe9, + 0xe2, 0xd1, 0x89, 0x20, 0x94, 0x3f, 0x8a, 0xe0, 0xb9, 0x80, 0x7a, 0x37, 0x3c, 0x48, 0xae, 0xf0, + 0x61, 0xa0, 0x03, 0x95, 0xa8, 0x11, 0x5b, 0x5d, 0x28, 0x44, 0x16, 0x3f, 0x80, 0x3c, 0xcf, 0x2e, + 0xa1, 0x82, 0x24, 0xda, 0x1d, 0x68, 0x98, 0xd3, 0x6a, 0xa4, 0x04, 0x19, 0xa6, 0x1e, 0x7f, 0x10, + 0xea, 0xc0, 0x9b, 0x4d, 0x77, 0x34, 0x9e, 0x60, 0x74, 0x32, 0x1e, 0xda, 0xf8, 0xd8, 0xb5, 0x1d, + 0xec, 0xef, 0x5f, 0x9f, 0xd8, 0xcf, 0xd1, 0x92, 0x8b, 0x2a, 0xed, 0x13, 0xb1, 0x17, 0x1d, 0xda, + 0xf1, 0x57, 0x7f, 0x2f, 0xc2, 0xce, 0x62, 0x5e, 0x2f, 0x9b, 0xe3, 0xde, 0x87, 0x8c, 0x4f, 0x9f, + 0x5e, 0x6e, 0xf7, 0xb2, 0x43, 0x32, 0x35, 0x55, 0x6c, 0x4c, 0xa5, 0xf3, 0x1b, 0x42, 0x0f, 0x72, + 0x43, 0x97, 0x35, 0x42, 0x83, 0xce, 0xf3, 0x97, 0x31, 0xb1, 0x37, 0xeb, 0xbd, 0x4b, 0x67, 0xba, + 0x9c, 0xc7, 0xd6, 0x47, 0x50, 0x8c, 0x0c, 0x90, 0xa8, 0x0e, 0x04, 0x70, 0x84, 0x28, 0xd0, 0x63, + 0x64, 0xb0, 0x86, 0xba, 0xa2, 0x7e, 0x0d, 0x95, 0xa4, 0xf7, 0xa4, 0xe8, 0x33, 0xe1, 0x83, 0xc8, + 0x33, 0xe1, 0x1b, 0x8b, 0x5f, 0xa3, 0x88, 0x2f, 0xd5, 0x3f, 0x09, 0x90, 0x9b, 0x76, 0xdf, 0x62, + 0x9c, 0x12, 0xee, 0x4d, 0x39, 0x12, 0xe5, 0x09, 0xcf, 0x5e, 0x39, 0xe5, 0x21, 0x94, 0x59, 0x97, + 0x6f, 0x42, 0x6f, 0xfc, 0xfa, 0xc8, 0xb5, 0x10, 0x6f, 0x32, 0xde, 0x9d, 0xef, 0xf5, 0xb1, 0xb6, + 0xc0, 0x91, 0x6b, 0xa1, 0xd9, 0x6b, 0x18, 0xd5, 0x3a, 0x9d, 0x50, 0x20, 0x50, 0x0a, 0xaa, 0x2d, + 0x0e, 0x1e, 0x2c, 0xe8, 0x50, 0x10, 0x5c, 0x1b, 0xb0, 0x3a, 0x32, 0x6c, 0x47, 0x9f, 0x8b, 0xb0, + 0xe8, 0x43, 0x9b, 0x98, 0x50, 0x23, 0xcf, 0x4c, 0xdf, 0x24, 0xfa, 0x9b, 0xc3, 0x89, 0x85, 0xf4, + 0x33, 0xc3, 0x47, 0xba, 0x65, 0x60, 0x83, 0x66, 0x92, 0x95, 0x59, 0x7f, 0x9f, 0x4b, 0xe5, 0x05, + 0xd0, 0x57, 0x20, 0xb7, 0x3c, 0x77, 0x7c, 0x3b, 0x55, 0x94, 0x40, 0x95, 0xd9, 0xf1, 0xa6, 0x56, + 0xa0, 0x1c, 0x62, 0x30, 0xe5, 0x7a, 0xf7, 0x84, 0x9c, 0x0d, 0x24, 0x2c, 0x86, 0xda, 0xec, 0xdc, + 0x5f, 0xd2, 0xee, 0x98, 0xde, 0xf4, 0xd5, 0x87, 0xf0, 0xfa, 0x02, 0x06, 0xb3, 0x6c, 0x17, 0xab, + 0x27, 0x58, 0x53, 0x72, 0x0f, 0xaa, 0x4d, 0x77, 0x34, 0xb2, 0x71, 0x82, 0xd4, 0x45, 0x34, 0xdb, + 0xb0, 0x99, 0x40, 0xc3, 0x4d, 0xf9, 0x08, 0x36, 0x1a, 0x67, 0xae, 0xf7, 0x22, 0xfc, 0xb6, 0xa0, + 0x3a, 0x4f, 0xc2, 0xd8, 0xd5, 0x3f, 0x81, 0x52, 0xec, 0x99, 0x3b, 0x0f, 0xd9, 0x4e, 0x4f, 0x6b, + 0x1f, 0xb6, 0x07, 0xb2, 0xa0, 0x00, 0x64, 0x4e, 0xb4, 0x41, 0xa7, 0x77, 0x28, 0x8b, 0xe4, 0x7b, + 0xbf, 0xd3, 0x6b, 0x0c, 0x1e, 0xcb, 0x52, 0xfd, 0x5e, 0xf8, 0x61, 0x9d, 0x75, 0xc3, 0x15, 0x05, + 0x4a, 0x8d, 0x53, 0xad, 0xaf, 0x77, 0x7a, 0xcd, 0x41, 0xfb, 0xa8, 0xdd, 0xd3, 0x64, 0xa1, 0xbe, + 0x0b, 0xab, 0xf1, 0x37, 0xe7, 0x15, 0x48, 0xf5, 0xfa, 0xbd, 0xb6, 0x2c, 0x90, 0xaf, 0x66, 0xbb, + 0xdb, 0x95, 0x45, 0x25, 0x0b, 0xd2, 0xa0, 0xff, 0x8d, 0x2c, 0xd5, 0xbf, 0x86, 0x7c, 0xb8, 0x5f, + 0x0e, 0x90, 0x69, 0x34, 0xb5, 0xce, 0x2f, 0xc8, 0xea, 0x02, 0xac, 0x74, 0x7a, 0xfc, 0x4f, 0x24, + 0x5a, 0x76, 0xfb, 0x8d, 0x16, 0xd1, 0x8c, 0x9c, 0x4a, 0xb9, 0xd3, 0x5e, 0xf0, 0x9b, 0x22, 0x2b, + 0x4f, 0x8f, 0x5b, 0x0d, 0x8d, 0xfc, 0xa5, 0xeb, 0x47, 0xb0, 0xb1, 0xe8, 0xf5, 0x16, 0x20, 0xd3, + 0x39, 0xec, 0xf5, 0x07, 0x6d, 0xf9, 0x35, 0x45, 0x86, 0x42, 0xfb, 0xdb, 0xe3, 0x76, 0x53, 0xd3, + 0xdb, 0xdf, 0x76, 0x4e, 0x34, 0x59, 0x50, 0xee, 0x80, 0xcc, 0x47, 0x7a, 0xfd, 0x60, 0x54, 0xac, + 0x7f, 0x0e, 0x10, 0xea, 0xe9, 0xe6, 0x21, 0x3b, 0x20, 0xf3, 0x3d, 0xc2, 0x22, 0x07, 0xe9, 0x81, + 0xa6, 0x1f, 0x3f, 0x92, 0x05, 0xa5, 0x02, 0xab, 0x03, 0x4d, 0x6f, 0x1c, 0x68, 0xed, 0x81, 0x7e, + 0xd4, 0x6f, 0x75, 0x0e, 0x1e, 0xcb, 0x62, 0xfd, 0x43, 0x28, 0x46, 0x6f, 0xa0, 0x59, 0x90, 0x8e, + 0x4f, 0x35, 0x06, 0x33, 0xd5, 0xb8, 0xcd, 0x60, 0x6e, 0xb5, 0xbb, 0x6d, 0xad, 0x4d, 0x61, 0xce, + 0xcd, 0x0a, 0xf9, 0x3c, 0x64, 0x0f, 0xfa, 0x83, 0x6f, 0x1a, 0x83, 0x96, 0xfc, 0x1a, 0xb1, 0x71, + 0xbf, 0xd1, 0x7c, 0x44, 0xff, 0x84, 0xfa, 0xa7, 0xc1, 0xd1, 0xc3, 0x71, 0xab, 0xc0, 0xea, 0x89, + 0x36, 0x68, 0x37, 0x8e, 0xf4, 0x76, 0xaf, 0xb1, 0xdf, 0x25, 0x40, 0x08, 0x4a, 0x19, 0x8a, 0x7c, + 0x30, 0x40, 0x91, 0x18, 0x13, 0x3a, 0x82, 0xf2, 0x90, 0x3d, 0x3e, 0xd5, 0x74, 0xe2, 0x09, 0x41, + 0x29, 0x01, 0x30, 0x95, 0xe8, 0xbf, 0x48, 0xfe, 0x99, 0x5a, 0x3a, 0xf3, 0x94, 0x09, 0xe5, 0xb9, + 0xc4, 0xa7, 0xac, 0x42, 0xbe, 0xd5, 0xd4, 0xf4, 0x59, 0xfc, 0x10, 0xaa, 0xa6, 0xa6, 0xb7, 0xfa, + 0xa7, 0xfb, 0x5d, 0x62, 0x1c, 0x5f, 0xb0, 0xdf, 0xef, 0x77, 0xdb, 0x8d, 0x9e, 0x2c, 0x05, 0x0b, + 0x78, 0x90, 0x51, 0xdf, 0xd1, 0x05, 0xdd, 0xfe, 0xbe, 0x9c, 0xad, 0x7f, 0x01, 0xab, 0xf1, 0xcc, + 0x56, 0x81, 0xd5, 0xce, 0xe9, 0x91, 0xde, 0x38, 0x79, 0xdc, 0x6b, 0xea, 0x9d, 0x5e, 0xab, 0xfd, + 0xad, 0xfc, 0x1a, 0x09, 0x3d, 0x32, 0x18, 0x1a, 0x13, 0xea, 0x1f, 0xf3, 0x1c, 0x4c, 0x15, 0x23, + 0x54, 0x9a, 0x7e, 0xd8, 0xed, 0xef, 0x37, 0xba, 0x11, 0x2a, 0x4d, 0xef, 0xf6, 0x9b, 0xd3, 0x31, + 0xe1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9a, 0x12, 0x13, 0xc3, 0x7a, 0x22, 0x00, 0x00, +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.proto b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.proto new file mode 100644 index 000000000000..a2fab70a18de --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.proto @@ -0,0 +1,616 @@ +syntax = "proto2"; + +package otsprotocol; + +message Error { + required string code = 1; + optional string message = 2; +} + +enum PrimaryKeyType { + INTEGER = 1; + STRING = 2; + BINARY = 3; +} + +enum PrimaryKeyOption { + AUTO_INCREMENT = 1; +} + +message PrimaryKeySchema { + required string name = 1; + required PrimaryKeyType type = 2; + optional PrimaryKeyOption option = 3; +} + +message PartitionRange { + required bytes begin = 1; // encoded as SQLVariant + required bytes end = 2; // encoded as SQLVariant +} + +enum BloomFilterType { + NONE = 1; + CELL = 2; + ROW = 3; +} + +message TableOptions { + optional int32 time_to_live = 1; // 可以动态更改 + optional int32 max_versions = 2; // 可以动态更改 + optional BloomFilterType bloom_filter_type = 3; // 可以动态更改 + optional int32 block_size = 4; // 可以动态更改 + optional int64 deviation_cell_version_in_sec = 5; // 可以动态修改 +} + +message TableMeta { + required string table_name = 1; + repeated PrimaryKeySchema primary_key = 2; + repeated DefinedColumnSchema defined_column = 3; + repeated IndexMeta index_meta = 4; +} + +/** + * 表的状态变更只与用户的操作对应,内部的机器failover等状况不对应表的状态变更。 + * 有三个考虑: + * 一是一般场景下用户只会在做了对表的修改操作后才会去检查表的状态; + * 二是内部机器failover导致访问异常到用户能够查看到表的状态变更这两个时刻之间会有一段延迟,无法将表的不可服务状态与用户查看到的表的状态完全匹配上。 + * 三是内部机器failover后不能说是表的整个状态变更,而应该是partition的状态变更,对应表的状态就是PARTIAL_FAILOVER,这个partial的粒度无法体现,会让用户更加困惑。 + */ +enum TableStatus { + ACTIVE = 1; // 表处于可服务状态。 + INACTIVE = 2; // 用户通过UnloadTable将表禁用。 + LOADING = 3; // 表正在被创建,partition还未全部加载完毕;或者表刚从INACTIVE状态被Enable。 + UNLOADING = 4; // 表正在被删除(从delete table到partition完全unload的这段期间)或者表从ACTIVE状态被Unload。 + UPDATING = 5; // 表正在被更新(table属性变更、预留吞吐量变更)。 +} + +enum RowExistenceExpectation { + IGNORE = 0; + EXPECT_EXIST = 1; + EXPECT_NOT_EXIST = 2; +} + +message Condition { + required RowExistenceExpectation row_existence = 1; + optional bytes column_condition = 2; +} + +message CapacityUnit { + optional int32 read = 1; + optional int32 write = 2; +} + +message ReservedThroughputDetails { + required CapacityUnit capacity_unit = 1; // 表当前的预留吞吐量的值。 + required int64 last_increase_time = 2; // 最后一次上调预留吞吐量的时间。 + optional int64 last_decrease_time = 3; // 最后一次下调预留吞吐量的时间。 +} + +message ReservedThroughput { + required CapacityUnit capacity_unit = 1; +} + +message ConsumedCapacity { + required CapacityUnit capacity_unit = 1; +} + +message StreamSpecification { + required bool enable_stream = 1; + optional int32 expiration_time = 2; +} + +message StreamDetails { + required bool enable_stream = 1; + optional string stream_id = 2; + optional int32 expiration_time = 3; + optional int64 last_enable_time = 4; +} + +/* ############################################# CreateTable ############################################# */ +/** + * table_meta用于存储表中不可更改的schema属性,可以更改的ReservedThroughput和TableOptions独立出来,作为UpdateTable的参数。 + * 加入GlobalIndex和LocalIndex之后,结构会变为: + * message CreateTableRequest { + * required TableMeta table_meta = 1; + * required ReservedThroughput reserved_throughput = 2; + * required TableOptions table_options = 3; + * repeated LocalIndex local_indexes = 4; // LocalIndex不再单独包含ReservedThroughput和TableOptions,其与主表共享配置。 + * repeated GlobalIndex global_indexes = 5; // GlobalIndex内单独包含ReservedThroughput和TableOptions + * } + */ +message CreateTableRequest { + required TableMeta table_meta = 1; + required ReservedThroughput reserved_throughput = 2; // 未放在TableOptions内,原因是UpdateTableResponse中会返回ReservedThroughputDetails,而TableOptions没有类似的返回结构。 + optional TableOptions table_options = 3; + repeated PartitionRange partitions = 4; + optional StreamSpecification stream_spec = 5; + repeated IndexMeta index_metas = 7; +} + +message CreateTableResponse { +} + +/* ######################################################################################################### */ + + +/* ############################################# UpdateTable ############################################# */ +message UpdateTableRequest { + required string table_name = 1; + optional ReservedThroughput reserved_throughput = 2; + optional TableOptions table_options = 3; + optional StreamSpecification stream_spec = 4; +} + +message UpdateTableResponse { + required ReservedThroughputDetails reserved_throughput_details = 1; + required TableOptions table_options = 2; + optional StreamDetails stream_details = 3; +} +/* ######################################################################################################### */ + +/* ############################################# DescribeTable ############################################# */ +message DescribeTableRequest { + required string table_name = 1; +} + +message DescribeTableResponse { + required TableMeta table_meta = 1; + required ReservedThroughputDetails reserved_throughput_details = 2; + required TableOptions table_options = 3; + required TableStatus table_status = 4; + optional StreamDetails stream_details = 5; + repeated bytes shard_splits = 6; + repeated IndexMeta index_metas = 8; +} +/* ########################################################################################################### */ + +/* ############################################# ListTable ############################################# */ +message ListTableRequest { +} + +/** + * 当前只返回一个简单的名称列表,需要讨论是否有业务场景需要获取除了表名之外的其他信息。 + * 其他信息可以包含预留吞吐量以及表的状态,这个信息只能是一个粗略的信息,表的详细信息还是需要通过DescribeTable来获取。 + */ +message ListTableResponse { + repeated string table_names = 1; +} +/* ####################################################################################################### */ + +/* ############################################# DeleteTable ############################################# */ +message DeleteTableRequest { + required string table_name = 1; +} + +message DeleteTableResponse { +} +/* ######################################################################################################### */ + +/* ############################################# LoadTable ############################################# */ +message LoadTableRequest { + required string table_name = 1; +} + +message LoadTableResponse { +} +/* ######################################################################################################### */ + +/* ############################################# UnloadTable ############################################# */ +message UnloadTableRequest { + required string table_name = 1; +} + +message UnloadTableResponse { + +} +/* ########################################################################################################## */ + +/** + * 时间戳的取值最小值为0,最大值为INT64.MAX + * 1. 若要查询一个范围,则指定start_time和end_time + * 2. 若要查询一个特定时间戳,则指定specific_time + */ +message TimeRange { + optional int64 start_time = 1; + optional int64 end_time = 2; + optional int64 specific_time = 3; +} + +/* ############################################# GetRow ############################################# */ + +enum ReturnType { + RT_NONE = 0; + RT_PK = 1; + RT_AFTER_MODIFY = 2; +} + +message ReturnContent { + optional ReturnType return_type = 1; + repeated string return_column_names = 2; +} + +/** + * 1. 支持用户指定版本时间戳范围或者特定的版本时间来读取指定版本的列 + * 2. 目前暂不支持行内的断点 + */ +message GetRowRequest { + required string table_name = 1; + required bytes primary_key = 2; // encoded as InplaceRowChangeSet, but only has primary key + repeated string columns_to_get = 3; // 不指定则读出所有的列 + optional TimeRange time_range = 4; + optional int32 max_versions = 5; + optional bool cache_blocks = 6 [default = true]; // 本次读出的数据是否进入BlockCache + optional bytes filter = 7; + optional string start_column = 8; + optional string end_column = 9; + optional bytes token = 10; + optional string transaction_id = 11; +} + +message GetRowResponse { + required ConsumedCapacity consumed = 1; + required bytes row = 2; // encoded as InplaceRowChangeSet + optional bytes next_token = 3; +} +/* #################################################################################################### */ + +/* ############################################# UpdateRow ############################################# */ +message UpdateRowRequest { + required string table_name = 1; + required bytes row_change = 2; + required Condition condition = 3; + optional ReturnContent return_content = 4; + optional string transaction_id = 5; +} + +message UpdateRowResponse { + required ConsumedCapacity consumed = 1; + optional bytes row = 2; +} + +/* ####################################################################################################### */ + +/* ############################################# PutRow ############################################# */ + + +/** + * 这里允许用户为每列单独设置timestamp,而不是强制整行统一一个timestamp。 + * 原因是列都是用统一的结构,该结构本身是带timestamp的,其次强制统一timestamp增强了规范性但是丧失了灵活性,且该规范性没有明显的好处,反而带来了结构的复杂。 + */ +message PutRowRequest { + required string table_name = 1; + required bytes row = 2; // encoded as InplaceRowChangeSet + required Condition condition = 3; + optional ReturnContent return_content = 4; + optional string transaction_id = 5; +} + +message PutRowResponse { + required ConsumedCapacity consumed = 1; + optional bytes row = 2; +} +/* #################################################################################################### */ + +/* ############################################# DeleteRow ############################################# */ +/** + * OTS只支持删除该行的所有列所有版本,不支持: + * 1. 删除所有列的所有小于等于某个版本的所有版本 + */ +message DeleteRowRequest { + required string table_name = 1; + required bytes primary_key = 2; // encoded as InplaceRowChangeSet, but only has primary key + required Condition condition = 3; + optional ReturnContent return_content = 4; + optional string transaction_id = 5; +} + +message DeleteRowResponse { + required ConsumedCapacity consumed = 1; + optional bytes row = 2; +} +/* ####################################################################################################### */ + +/* ############################################# BatchGetRow ############################################# */ +/** + * HBase支持Batch操作的每行都拥有不同的查询参数,OTS不支持。 + */ +message TableInBatchGetRowRequest { + required string table_name = 1; + repeated bytes primary_key = 2; // encoded as InplaceRowChangeSet, but only has primary key + repeated bytes token = 3; + repeated string columns_to_get = 4; // 不指定则读出所有的列 + optional TimeRange time_range = 5; + optional int32 max_versions = 6; + optional bool cache_blocks = 7 [default = true]; // 本次读出的数据是否进入BlockCache + optional bytes filter = 8; + optional string start_column = 9; + optional string end_column = 10; +} + +message BatchGetRowRequest { + repeated TableInBatchGetRowRequest tables = 1; +} + +message RowInBatchGetRowResponse { + required bool is_ok = 1; + optional Error error = 2; + optional ConsumedCapacity consumed = 3; + optional bytes row = 4; // encoded as InplaceRowChangeSet + optional bytes next_token = 5; +} + +message TableInBatchGetRowResponse { + required string table_name = 1; + repeated RowInBatchGetRowResponse rows = 2; +} + +message BatchGetRowResponse { + repeated TableInBatchGetRowResponse tables = 1; +} +/* ######################################################################################################### */ + +/* ############################################# BatchWriteRow ############################################# */ + +enum OperationType { + PUT = 1; + UPDATE = 2; + DELETE = 3; +} + +message RowInBatchWriteRowRequest { + required OperationType type = 1; + required bytes row_change = 2; // encoded as InplaceRowChangeSet + required Condition condition = 3; + optional ReturnContent return_content = 4; +} + +message TableInBatchWriteRowRequest { + required string table_name = 1; + repeated RowInBatchWriteRowRequest rows = 2; +} + +message BatchWriteRowRequest { + repeated TableInBatchWriteRowRequest tables = 1; + optional string transaction_id = 2; +} + +message RowInBatchWriteRowResponse { + required bool is_ok = 1; + optional Error error = 2; + optional ConsumedCapacity consumed = 3; + optional bytes row = 4; +} + +message TableInBatchWriteRowResponse { + required string table_name = 1; + repeated RowInBatchWriteRowResponse rows = 2; +} + +message BatchWriteRowResponse { + repeated TableInBatchWriteRowResponse tables = 1; +} +/* ########################################################################################################### */ + +/* ############################################# GetRange ############################################# */ +enum Direction { + FORWARD = 0; + BACKWARD = 1; +} + +/** + * HBase支持以下参数: + * 1. TimeRange或指定time + * 2. Filter(根据列值或列名来过滤) + * 我们只支持给同版本的选择条件。 + */ +message GetRangeRequest { + required string table_name = 1; + required Direction direction = 2; + repeated string columns_to_get = 3; // 不指定则读出所有的列 + optional TimeRange time_range = 4; + optional int32 max_versions = 5; + optional int32 limit = 6; + required bytes inclusive_start_primary_key = 7; // encoded as InplaceRowChangeSet, but only has primary key + required bytes exclusive_end_primary_key = 8; // encoded as InplaceRowChangeSet, but only has primary key + optional bool cache_blocks = 9 [default = true]; // 本次读出的数据是否进入BlockCache + optional bytes filter = 10; + optional string start_column = 11; + optional string end_column = 12; + optional bytes token = 13; + optional string transaction_id = 14; +} + +message GetRangeResponse { + required ConsumedCapacity consumed = 1; + required bytes rows = 2; // encoded as InplaceRowChangeSet + optional bytes next_start_primary_key = 3; // 若为空,则代表数据全部读取完毕. encoded as InplaceRowChangeSet, but only has primary key + optional bytes next_token = 4; +} +/* ###################################################################################################### */ +/* ############################################# Stream ############################################# */ + +message ListStreamRequest { + optional string table_name = 1; +} + +message Stream { + required string stream_id = 1; + required string table_name = 2; + required int64 creation_time = 3; +} + +message ListStreamResponse { + repeated Stream streams = 1; +} + +message StreamShard { + required string shard_id = 1; + optional string parent_id = 2; + optional string parent_sibling_id = 3; +} + +enum StreamStatus { + STREAM_ENABLING = 1; + STREAM_ACTIVE = 2; +} + +message DescribeStreamRequest { + required string stream_id = 1; + optional string inclusive_start_shard_id = 2; + optional int32 shard_limit = 3; +} + +message DescribeStreamResponse { + required string stream_id = 1; + required int32 expiration_time = 2; + required string table_name = 3; + required int64 creation_time = 4; + required StreamStatus stream_status = 5; + repeated StreamShard shards = 6; + optional string next_shard_id = 7; +} + +message GetShardIteratorRequest { + required string stream_id = 1; + required string shard_id = 2; + optional int64 timestamp = 3; + optional string token = 4; +} + +message GetShardIteratorResponse { + required string shard_iterator = 1; + optional string next_token = 2; +} + +message GetStreamRecordRequest { + required string shard_iterator = 1; + optional int32 limit = 2; +} + +enum ActionType { + PUT_ROW = 1; + UPDATE_ROW = 2; + DELETE_ROW = 3; +} + +message GetStreamRecordResponse { + message StreamRecord { + required ActionType action_type = 1; + required bytes record = 2; + } + repeated StreamRecord stream_records = 1; + optional string next_shard_iterator = 2; +} + +/* +++++ ComputeSplitPointsBySize +++++ */ +message ComputeSplitPointsBySizeRequest { + required string table_name = 1; + required int64 split_size = 2; // in 100MB +} + +message ComputeSplitPointsBySizeResponse { + required ConsumedCapacity consumed = 1; + repeated PrimaryKeySchema schema = 2; + + /** + * Split points between splits, in the increasing order + * + * A split is a consecutive range of primary keys, + * whose data size is about split_size specified in the request. + * The size could be hard to be precise. + * + * A split point is an array of primary-key column w.r.t. table schema, + * which is never longer than that of table schema. + * Tailing -inf will be omitted to reduce transmission payloads. + */ + repeated bytes split_points = 3; + + /** + * Locations where splits lies in. + * + * By the managed nature of TableStore, these locations are no more than hints. + * If a location is not suitable to be seen, an empty string will be placed. + */ + message SplitLocation { + required string location = 1; + required sint64 repeat = 2; + } + repeated SplitLocation locations = 4; +} +/* -------------------------------------- */ + +enum DefinedColumnType { + DCT_INTEGER = 1; + DCT_DOUBLE = 2; + DCT_BOOLEAN = 3; + DCT_STRING = 4; + // field 5 is reserved for date type, not supported yet + // field 6 is reserved for decimal type, not supported yet + DCT_BLOB = 7; +} + +message DefinedColumnSchema { + required string name = 1; + required DefinedColumnType type = 2; +} + +enum IndexUpdateMode { + IUM_ASYNC_INDEX = 0; + IUM_SYNC_INDEX = 1; +} + +enum IndexType { + IT_GLOBAL_INDEX = 0; + IT_LOCAL_INDEX = 1; +} + +message IndexMeta { + required string name = 1; + repeated string primary_key = 2; + repeated string defined_column = 3; + required IndexUpdateMode index_update_mode = 4; + required IndexType index_type = 5; +} + +message CreateIndexRequest { + required string main_table_name = 1; + required IndexMeta index_meta = 2; + optional bool include_base_data = 3; +} + +message CreateIndexResponse { +} + +message DropIndexRequest { + required string main_table_name = 1; + required string index_name = 2; +} + +message DropIndexResponse { +} + +/* ########################################### LocalTransaction ########################################### */ +message StartLocalTransactionRequest { + required string table_name = 1; + required bytes key = 2; // encoded as SQLVariant +} + +message StartLocalTransactionResponse { + required string transaction_id = 1; +}; + +message CommitTransactionRequest { + required string transaction_id = 1; +} + +message CommitTransactionResponse { +}; + +message AbortTransactionRequest { + required string transaction_id = 1; +} + +message AbortTransactionResponse { +}; + +/* ######################################################################################################### */ \ No newline at end of file diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/plain_buffer.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/plain_buffer.go new file mode 100644 index 000000000000..681d3b08f09c --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/plain_buffer.go @@ -0,0 +1,473 @@ +package tablestore + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + HEADER = 0x75 + + // tag type + TAG_ROW_PK = 0x1 + TAG_ROW_DATA = 0x2 + TAG_CELL = 0x3 + TAG_CELL_NAME = 0x4 + TAG_CELL_VALUE = 0x5 + TAG_CELL_TYPE = 0x6 + TAG_CELL_TIMESTAMP = 0x7 + TAG_DELETE_ROW_MARKER = 0x8 + TAG_ROW_CHECKSUM = 0x9 + TAG_CELL_CHECKSUM = 0x0A + TAG_EXTENSION = 0x0B + TAG_SEQ_INFO = 0x0C + TAG_SEQ_INFO_EPOCH = 0x0D + TAG_SEQ_INFO_TS = 0x0E + TAG_SEQ_INFO_ROW_INDEX = 0x0F + + // cell op type + DELETE_ALL_VERSION = 0x1 + DELETE_ONE_VERSION = 0x3 + INCREMENT = 0x4; + + // variant type + VT_INTEGER = 0x0 + VT_DOUBLE = 0x1 + VT_BOOLEAN = 0x2 + VT_STRING = 0x3 + + //public final static byte VT_NULL = 0x6; + VT_BLOB = 0x7 + VT_INF_MIN = 0x9 + VT_INF_MAX = 0xa + VT_AUTO_INCREMENT = 0xb + + LITTLE_ENDIAN_32_SIZE = 4 + LITTLE_ENDIAN_64_SIZE = 8 +) + +const spaceSize = 256 + +var crc8Table = make([]byte, spaceSize) + +func init() { + for i := 0; i < spaceSize; i++ { + x := byte(i) + for j := 8; j > 0; j-- { + if (x & 0x80) != 0 { + x = (x << 1) ^ 0x07 + } else { + x = (x << 1) ^ 0 + } + } + crc8Table[i] = x + } +} + +func crc8Byte(crc, in byte) byte { + return crc8Table[(crc^in)&0xff] +} + +func crc8Int32(crc byte, in int32) byte { + for i := 0; i < 4; i++ { + crc = crc8Byte(crc, byte((in & 0xff))) + in >>= 8 + } + + return crc +} + +func crc8Int64(crc byte, in int64) byte { + for i := 0; i < 8; i++ { + crc = crc8Byte(crc, byte((in & 0xff))) + in >>= 8 + } + + return crc +} + +func crc8Bytes(crc byte, in []byte) byte { + for i := 0; i < len(in); i++ { + crc = crc8Byte(crc, in[i]) + } + + return crc +} + +func writeRawByte(w io.Writer, value byte) { + w.Write([]byte{value}) +} + +/*func writeRawByteInt8(w io.Writer, value int) { + w.Write([]byte{byte(value)}) +}*/ + +func writeRawLittleEndian32(w io.Writer, value int32) { + w.Write([]byte{byte((value) & 0xFF)}) + w.Write([]byte{byte((value >> 8) & 0xFF)}) + w.Write([]byte{byte((value >> 16) & 0xFF)}) + w.Write([]byte{byte((value >> 24) & 0xFF)}) +} + +func writeRawLittleEndian64(w io.Writer, value int64) { + w.Write([]byte{byte((value) & 0xFF)}) + w.Write([]byte{byte((value >> 8) & 0xFF)}) + w.Write([]byte{byte((value >> 16) & 0xFF)}) + w.Write([]byte{byte((value >> 24) & 0xFF)}) + w.Write([]byte{byte((value >> 32) & 0xFF)}) + w.Write([]byte{byte((value >> 40) & 0xFF)}) + w.Write([]byte{byte((value >> 48) & 0xFF)}) + w.Write([]byte{byte((value >> 56) & 0xFF)}) +} + +func writeDouble(w io.Writer, value float64) { + writeRawLittleEndian64(w, int64(math.Float64bits(value))) +} + +func writeBoolean(w io.Writer, value bool) { + if value { + w.Write([]byte{byte(1)}) + } else { + w.Write([]byte{byte(0)}) + } +} + +func writeBytes(w io.Writer, value []byte) { + w.Write(value) +} + +func writeHeader(w io.Writer) { + writeRawLittleEndian32(w, HEADER) +} + +func writeTag(w io.Writer, tag byte) { + writeRawByte(w, tag) +} + +func writeCellName(w io.Writer, name []byte) { + writeTag(w, TAG_CELL_NAME) + writeRawLittleEndian32(w, int32(len(name))) + writeBytes(w, name) +} + +type PlainBufferCell struct { + cellName []byte + cellValue *ColumnValue + cellTimestamp int64 + cellType byte + ignoreValue bool + hasCellTimestamp bool + hasCellType bool +} + +func (cell *PlainBufferCell) writeCell(w io.Writer) { + writeTag(w, TAG_CELL) + writeCellName(w, cell.cellName) + if cell.ignoreValue == false { + cell.cellValue.writeCellValue(w) + } + + if cell.hasCellType { + writeTag(w, TAG_CELL_TYPE) + writeRawByte(w, cell.cellType) + } + + if cell.hasCellTimestamp { + writeTag(w, TAG_CELL_TIMESTAMP) + writeRawLittleEndian64(w, cell.cellTimestamp) + } + + writeTag(w, TAG_CELL_CHECKSUM) + writeRawByte(w, cell.getCheckSum(byte(0x0))) +} + +func (cell *PlainBufferCell) getCheckSum(crc byte) byte { + crc = crc8Bytes(crc, cell.cellName) + if cell.ignoreValue == false { + crc = cell.cellValue.getCheckSum(crc) + } + + if cell.hasCellTimestamp { + crc = crc8Int64(crc, cell.cellTimestamp) + } + if cell.hasCellType { + crc = crc8Byte(crc, cell.cellType) + } + return crc +} + +type PlainBufferRow struct { + primaryKey []*PlainBufferCell + cells []*PlainBufferCell + hasDeleteMarker bool + extension *RecordSequenceInfo // optional +} + +func (row *PlainBufferRow) writeRow(w io.Writer) { + /* pk */ + writeTag(w, TAG_ROW_PK) + for _, pk := range row.primaryKey { + pk.writeCell(w) + } + + if len(row.cells) > 0 { + writeTag(w, TAG_ROW_DATA) + for _, cell := range row.cells { + cell.writeCell(w) + } + } + + writeTag(w, TAG_ROW_CHECKSUM) + writeRawByte(w, row.getCheckSum(byte(0x0))) +} + +func (row *PlainBufferRow) writeRowWithHeader(w io.Writer) { + writeHeader(w) + row.writeRow(w) +} + +func (row *PlainBufferRow) getCheckSum(crc byte) byte { + for _, cell := range row.primaryKey { + crcCell := cell.getCheckSum(byte(0x0)) + crc = crc8Byte(crc, crcCell) + } + + for _, cell := range row.cells { + crcCell := cell.getCheckSum(byte(0x0)) + crc = crc8Byte(crc, crcCell) + } + + del := byte(0x0) + if row.hasDeleteMarker { + del = byte(0x1) + } + + crc = crc8Byte(crc, del) + + return crc +} + +func readRawByte(r *bytes.Reader) byte { + if r.Len() == 0 { + panic(errUnexpectIoEnd) + } + + b, _ := r.ReadByte() + + return b +} + +func readTag(r *bytes.Reader) int { + return int(readRawByte(r)) +} + +func readRawLittleEndian64(r *bytes.Reader) int64 { + if r.Len() < 8 { + panic(errUnexpectIoEnd) + } + + var v int64 + binary.Read(r, binary.LittleEndian, &v) + + return v +} + +func readRawLittleEndian32(r *bytes.Reader) int32 { + if r.Len() < 4 { + panic(errUnexpectIoEnd) + } + + var v int32 + binary.Read(r, binary.LittleEndian, &v) + + return v +} + +func readBoolean(r *bytes.Reader) bool { + return readRawByte(r) != 0 +} + +func readBytes(r *bytes.Reader, size int32) []byte { + if int32(r.Len()) < size { + panic(errUnexpectIoEnd) + } + v := make([]byte, size) + r.Read(v) + return v +} + +func readCellValue(r *bytes.Reader) *ColumnValue { + value := new(ColumnValue) + readRawLittleEndian32(r) + tp := readRawByte(r) + switch tp { + case VT_INTEGER: + value.Type = ColumnType_INTEGER + value.Value = readRawLittleEndian64(r) + case VT_DOUBLE: + value.Type = ColumnType_DOUBLE + value.Value = math.Float64frombits(uint64(readRawLittleEndian64(r))) + case VT_BOOLEAN: + value.Type = ColumnType_BOOLEAN + value.Value = readBoolean(r) + case VT_STRING: + value.Type = ColumnType_STRING + value.Value = string(readBytes(r, readRawLittleEndian32(r))) + case VT_BLOB: + value.Type = ColumnType_BINARY + value.Value = []byte(readBytes(r, readRawLittleEndian32(r))) + } + return value +} + +func readCell(r *bytes.Reader) *PlainBufferCell { + cell := new(PlainBufferCell) + tag := readTag(r) + if tag != TAG_CELL_NAME { + panic(errTag) + } + + cell.cellName = readBytes(r, readRawLittleEndian32(r)) + tag = readTag(r) + + if tag == TAG_CELL_VALUE { + cell.cellValue = readCellValue(r) + tag = readTag(r) + } + if tag == TAG_CELL_TYPE { + readRawByte(r) + tag = readTag(r) + } + + if tag == TAG_CELL_TIMESTAMP { + cell.cellTimestamp = readRawLittleEndian64(r) + tag = readTag(r) + } + + if tag == TAG_CELL_CHECKSUM { + readRawByte(r) + } else { + panic(errNoChecksum) + } + + return cell +} + +func readRowPk(r *bytes.Reader) []*PlainBufferCell { + primaryKeyColumns := make([]*PlainBufferCell, 0, 4) + + tag := readTag(r) + for tag == TAG_CELL { + primaryKeyColumns = append(primaryKeyColumns, readCell(r)) + tag = readTag(r) + } + + r.Seek(-1, 1) + + return primaryKeyColumns +} + +func readRowData(r *bytes.Reader) []*PlainBufferCell { + columns := make([]*PlainBufferCell, 0, 10) + + tag := readTag(r) + for tag == TAG_CELL { + columns = append(columns, readCell(r)) + tag = readTag(r) + } + + r.Seek(-1, 1) + + return columns +} + +func readRow(r *bytes.Reader) *PlainBufferRow { + row := new(PlainBufferRow) + tag := readTag(r) + if tag == TAG_ROW_PK { + row.primaryKey = readRowPk(r) + tag = readTag(r) + } + + if tag == TAG_ROW_DATA { + row.cells = readRowData(r) + tag = readTag(r) + } + + if tag == TAG_DELETE_ROW_MARKER { + row.hasDeleteMarker = true + tag = readTag(r) + } + + if tag == TAG_EXTENSION { + row.extension = readRowExtension(r) + tag = readTag(r) + } + + if tag == TAG_ROW_CHECKSUM { + readRawByte(r) + } else { + panic(errNoChecksum) + } + return row +} + +func readRowsWithHeader(r *bytes.Reader) (rows []*PlainBufferRow, err error) { + defer func() { + if err2 := recover(); err2 != nil { + if _, ok := err2.(error); ok { + err = err2.(error) + } + return + } + }() + + // TODO: panic + if readRawLittleEndian32(r) != HEADER { + return nil, fmt.Errorf("Invalid header from plain buffer") + } + + rows = make([]*PlainBufferRow, 0, 10) + + for r.Len() > 0 { + rows = append(rows, readRow(r)) + } + + return rows, nil +} + +func readRowExtension(r *bytes.Reader) *RecordSequenceInfo { + readRawLittleEndian32(r) // useless + tag := readTag(r) + if tag != TAG_SEQ_INFO { + panic(errTag) + } + + readRawLittleEndian32(r) // useless + tag = readTag(r) + if tag != TAG_SEQ_INFO_EPOCH { + panic(errTag) + } + epoch := readRawLittleEndian32(r) + + tag = readTag(r) + if tag != TAG_SEQ_INFO_TS { + panic(errTag) + } + ts := readRawLittleEndian64(r) + + tag = readTag(r) + if tag != TAG_SEQ_INFO_ROW_INDEX { + panic(errTag) + } + rowIndex := readRawLittleEndian32(r) + + ext := RecordSequenceInfo{} + ext.Epoch = epoch + ext.Timestamp = ts + ext.RowIndex = rowIndex + return &ext +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/collapse.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/collapse.go new file mode 100644 index 000000000000..60b19cba0325 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/collapse.go @@ -0,0 +1,14 @@ +package search + +import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + +type Collapse struct { + FieldName string +} + +func (c *Collapse) ProtoBuffer() (*otsprotocol.Collapse, error) { + pb := &otsprotocol.Collapse{ + FieldName: &c.FieldName, + } + return pb, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query.go new file mode 100644 index 000000000000..bcd62bbbe26f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query.go @@ -0,0 +1,85 @@ +package search + +import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + +type QueryType int + +const ( + QueryType_None QueryType = 0 + QueryType_MatchQuery QueryType = 1 + QueryType_MatchPhraseQuery QueryType = 2 + QueryType_TermQuery QueryType = 3 + QueryType_RangeQuery QueryType = 4 + QueryType_PrefixQuery QueryType = 5 + QueryType_BoolQuery QueryType = 6 + QueryType_ConstScoreQuery QueryType = 7 + QueryType_FunctionScoreQuery QueryType = 8 + QueryType_NestedQuery QueryType = 9 + QueryType_WildcardQuery QueryType = 10 + QueryType_MatchAllQuery QueryType = 11 + QueryType_GeoBoundingBoxQuery QueryType = 12 + QueryType_GeoDistanceQuery QueryType = 13 + QueryType_GeoPolygonQuery QueryType = 14 + QueryType_TermsQuery QueryType = 15 +) + +func (q QueryType) Enum() *QueryType { + newQuery := q + return &newQuery +} + +func (q QueryType) ToPB() *otsprotocol.QueryType { + switch q { + case QueryType_None: + return nil + case QueryType_MatchQuery: + return otsprotocol.QueryType_MATCH_QUERY.Enum() + case QueryType_MatchPhraseQuery: + return otsprotocol.QueryType_MATCH_PHRASE_QUERY.Enum() + case QueryType_TermQuery: + return otsprotocol.QueryType_TERM_QUERY.Enum() + case QueryType_RangeQuery: + return otsprotocol.QueryType_RANGE_QUERY.Enum() + case QueryType_PrefixQuery: + return otsprotocol.QueryType_PREFIX_QUERY.Enum() + case QueryType_BoolQuery: + return otsprotocol.QueryType_BOOL_QUERY.Enum() + case QueryType_ConstScoreQuery: + return otsprotocol.QueryType_CONST_SCORE_QUERY.Enum() + case QueryType_FunctionScoreQuery: + return otsprotocol.QueryType_FUNCTION_SCORE_QUERY.Enum() + case QueryType_NestedQuery: + return otsprotocol.QueryType_NESTED_QUERY.Enum() + case QueryType_WildcardQuery: + return otsprotocol.QueryType_WILDCARD_QUERY.Enum() + case QueryType_MatchAllQuery: + return otsprotocol.QueryType_MATCH_ALL_QUERY.Enum() + case QueryType_GeoBoundingBoxQuery: + return otsprotocol.QueryType_GEO_BOUNDING_BOX_QUERY.Enum() + case QueryType_GeoDistanceQuery: + return otsprotocol.QueryType_GEO_DISTANCE_QUERY.Enum() + case QueryType_GeoPolygonQuery: + return otsprotocol.QueryType_GEO_POLYGON_QUERY.Enum() + case QueryType_TermsQuery: + return otsprotocol.QueryType_TERMS_QUERY.Enum() + default: + panic("unexpected") + } +} + +type Query interface { + Type() QueryType + Serialize() ([]byte, error) + ProtoBuffer() (*otsprotocol.Query, error) +} + +func BuildPBForQuery(q Query) (*otsprotocol.Query, error) { + query := &otsprotocol.Query{} + query.Type = q.Type().ToPB() + data, err := q.Serialize() + if err != nil { + return nil, err + } + query.Query = data + return query, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_bool.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_bool.go new file mode 100644 index 000000000000..230d2e00f39e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_bool.go @@ -0,0 +1,75 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type BoolQuery struct { + MustQueries []Query + MustNotQueries []Query + FilterQueries []Query + ShouldQueries []Query + MinimumShouldMatch *int32 +} + +func (q *BoolQuery) Type() QueryType { + return QueryType_BoolQuery +} + +func (q *BoolQuery) Serialize() ([]byte, error) { + query := &otsprotocol.BoolQuery{} + if q.MustQueries != nil { + pbMustQs := make([]*otsprotocol.Query, 0) + for _, mustQ := range q.MustQueries { + pbQ, err := mustQ.ProtoBuffer() + if err != nil { + return nil, err + } + pbMustQs = append(pbMustQs, pbQ) + } + query.MustQueries = pbMustQs + } + if q.MustNotQueries != nil { + pbMustNotQs := make([]*otsprotocol.Query, 0) + for _, mustNotQ := range q.MustNotQueries { + pbQ, err := mustNotQ.ProtoBuffer() + if err != nil { + return nil, err + } + pbMustNotQs = append(pbMustNotQs, pbQ) + } + query.MustNotQueries = pbMustNotQs + } + if q.FilterQueries != nil { + pbFilterQs := make([]*otsprotocol.Query, 0) + for _, filterQ := range q.FilterQueries { + pbQ, err := filterQ.ProtoBuffer() + if err != nil { + return nil, err + } + pbFilterQs = append(pbFilterQs, pbQ) + } + query.FilterQueries = pbFilterQs + } + if q.ShouldQueries != nil { + pbShouldQs := make([]*otsprotocol.Query, 0) + for _, shouldQ := range q.ShouldQueries { + pbQ, err := shouldQ.ProtoBuffer() + if err != nil { + return nil, err + } + pbShouldQs = append(pbShouldQs, pbQ) + } + query.ShouldQueries = pbShouldQs + } + if (q.MinimumShouldMatch != nil) { + query.MinimumShouldMatch = q.MinimumShouldMatch + } + data, err := proto.Marshal(query) + return data, err +} + +func (q *BoolQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_const_score.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_const_score.go new file mode 100644 index 000000000000..124d826281c3 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_const_score.go @@ -0,0 +1,29 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type ConstScoreQuery struct { + Filter Query +} + +func (q *ConstScoreQuery) Type() QueryType { + return QueryType_ConstScoreQuery +} + +func (q *ConstScoreQuery) Serialize() ([]byte, error) { + query := &otsprotocol.ConstScoreQuery{} + pbQ, err := q.Filter.ProtoBuffer() + if err != nil { + return nil, err + } + query.Filter = pbQ + data, err := proto.Marshal(query) + return data, err +} + +func (q *ConstScoreQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_function_score.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_function_score.go new file mode 100644 index 000000000000..c1115305ebf8 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_function_score.go @@ -0,0 +1,49 @@ +package search + +import ( + "errors" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type FieldValueFactor struct { + FieldName string +} + +func (f *FieldValueFactor) ProtoBuffer() (*otsprotocol.FieldValueFactor, error) { + pb := &otsprotocol.FieldValueFactor{} + pb.FieldName = &f.FieldName + return pb, nil +} + +type FunctionScoreQuery struct { + Query Query + FieldValueFactor *FieldValueFactor +} + +func (q *FunctionScoreQuery) Type() QueryType { + return QueryType_FunctionScoreQuery +} + +func (q *FunctionScoreQuery) Serialize() ([]byte, error) { + if q.Query == nil || q.FieldValueFactor == nil { + return nil, errors.New("FunctionScoreQuery: Query or FieldValueFactor is nil") + } + query := &otsprotocol.FunctionScoreQuery{} + pbQ, err := q.Query.ProtoBuffer() + if err != nil { + return nil, err + } + query.Query = pbQ + pbF, err := q.FieldValueFactor.ProtoBuffer() + if err != nil { + return nil, err + } + query.FieldValueFactor = pbF + data, err := proto.Marshal(query) + return data, err +} + +func (q *FunctionScoreQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_bounding_box.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_bounding_box.go new file mode 100644 index 000000000000..9dcbfe81012a --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_bounding_box.go @@ -0,0 +1,29 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type GeoBoundingBoxQuery struct { + FieldName string + TopLeft string + BottomRight string +} + +func (q *GeoBoundingBoxQuery) Type() QueryType { + return QueryType_GeoBoundingBoxQuery +} + +func (q *GeoBoundingBoxQuery) Serialize() ([]byte, error) { + query := &otsprotocol.GeoBoundingBoxQuery{} + query.FieldName = &q.FieldName + query.TopLeft = &q.TopLeft + query.BottomRight = &q.BottomRight + data, err := proto.Marshal(query) + return data, err +} + +func (q *GeoBoundingBoxQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_distance.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_distance.go new file mode 100644 index 000000000000..5906cb692b90 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_distance.go @@ -0,0 +1,29 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type GeoDistanceQuery struct { + FieldName string + CenterPoint string + DistanceInMeter float64 +} + +func (q *GeoDistanceQuery) Type() QueryType { + return QueryType_GeoDistanceQuery +} + +func (q *GeoDistanceQuery) Serialize() ([]byte, error) { + query := &otsprotocol.GeoDistanceQuery{} + query.FieldName = &q.FieldName + query.CenterPoint = &q.CenterPoint + query.Distance = &q.DistanceInMeter + data, err := proto.Marshal(query) + return data, err +} + +func (q *GeoDistanceQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_polygon.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_polygon.go new file mode 100644 index 000000000000..f38efe7aa118 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_polygon.go @@ -0,0 +1,27 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type GeoPolygonQuery struct { + FieldName string + Points []string +} + +func (q *GeoPolygonQuery) Type() QueryType { + return QueryType_GeoPolygonQuery +} + +func (q *GeoPolygonQuery) Serialize() ([]byte, error) { + query := &otsprotocol.GeoPolygonQuery{} + query.FieldName = &q.FieldName + query.Points = q.Points + data, err := proto.Marshal(query) + return data, err +} + +func (q *GeoPolygonQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match.go new file mode 100644 index 000000000000..d02f85c00c62 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match.go @@ -0,0 +1,68 @@ +package search + +import ( + "errors" + "fmt" + + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type QueryOperator int8 + +const ( + QueryOperator_OR QueryOperator = 0 + QueryOperator_AND QueryOperator = 1 +) + +func (x QueryOperator) Enum() *QueryOperator { + p := new(QueryOperator) + *p = x + return p +} + +func (o *QueryOperator) ProtoBuffer() (*otsprotocol.QueryOperator, error) { + if o == nil { + return nil, errors.New("query operator is nil") + } + if *o == QueryOperator_OR { + return otsprotocol.QueryOperator_OR.Enum(), nil + } else if *o == QueryOperator_AND { + return otsprotocol.QueryOperator_AND.Enum(), nil + } else { + return nil, errors.New("unknown query operator: " + fmt.Sprintf("%#v", *o)) + } +} + +type MatchQuery struct { + FieldName string + Text string + MinimumShouldMatch *int32 + Operator *QueryOperator +} + +func (q *MatchQuery) Type() QueryType { + return QueryType_MatchQuery +} + +func (q *MatchQuery) Serialize() ([]byte, error) { + query := &otsprotocol.MatchQuery{} + query.FieldName = &q.FieldName + query.Text = &q.Text + if q.MinimumShouldMatch != nil { + query.MinimumShouldMatch = q.MinimumShouldMatch + } + if q.Operator != nil { + pbOperator, err := q.Operator.ProtoBuffer() + if err != nil { + return nil, err + } + query.Operator = pbOperator + } + data, err := proto.Marshal(query) + return data, err +} + +func (q *MatchQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match_phrase.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match_phrase.go new file mode 100644 index 000000000000..fc4511bdef46 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match_phrase.go @@ -0,0 +1,27 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type MatchPhraseQuery struct { + FieldName string + Text string +} + +func (q *MatchPhraseQuery) Type() QueryType { + return QueryType_MatchPhraseQuery +} + +func (q *MatchPhraseQuery) Serialize() ([]byte, error) { + query := &otsprotocol.MatchPhraseQuery{} + query.FieldName = &q.FieldName + query.Text = &q.Text + data, err := proto.Marshal(query) + return data, err +} + +func (q *MatchPhraseQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_matchall.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_matchall.go new file mode 100644 index 000000000000..778dbecdc928 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_matchall.go @@ -0,0 +1,23 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type MatchAllQuery struct { +} + +func (q *MatchAllQuery) Type() QueryType { + return QueryType_MatchAllQuery +} + +func (q *MatchAllQuery) Serialize() ([]byte, error) { + query := &otsprotocol.MatchAllQuery{} + data, err := proto.Marshal(query) + return data, err +} + +func (q *MatchAllQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_nested.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_nested.go new file mode 100644 index 000000000000..15a9bad2210c --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_nested.go @@ -0,0 +1,54 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type ScoreModeType int + +const ( + ScoreMode_None ScoreModeType = 1 + ScoreMode_Avg ScoreModeType = 2 + ScoreMode_Max ScoreModeType = 3 + ScoreMode_Total ScoreModeType = 4 + ScoreMode_Min ScoreModeType = 5 +) + +type NestedQuery struct { + Path string + Query Query + ScoreMode ScoreModeType +} + +func (q *NestedQuery) Type() QueryType { + return QueryType_NestedQuery +} + +func (q *NestedQuery) Serialize() ([]byte, error) { + query := &otsprotocol.NestedQuery{} + pbQ, err := q.Query.ProtoBuffer() + if err != nil { + return nil, err + } + query.Query = pbQ + query.Path = &q.Path + switch q.ScoreMode { + case ScoreMode_None: + query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_NONE.Enum() + case ScoreMode_Avg: + query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_AVG.Enum() + case ScoreMode_Max: + query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_MAX.Enum() + case ScoreMode_Min: + query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_MIN.Enum() + case ScoreMode_Total: + query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_TOTAL.Enum() + } + data, err := proto.Marshal(query) + return data, err +} + +func (q *NestedQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_prefix.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_prefix.go new file mode 100644 index 000000000000..a94d0cdb634e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_prefix.go @@ -0,0 +1,27 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type PrefixQuery struct { + FieldName string + Prefix string +} + +func (q *PrefixQuery) Type() QueryType { + return QueryType_PrefixQuery +} + +func (q *PrefixQuery) Serialize() ([]byte, error) { + query := &otsprotocol.PrefixQuery{} + query.FieldName = &q.FieldName + query.Prefix = &q.Prefix + data, err := proto.Marshal(query) + return data, err +} + +func (q *PrefixQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_range.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_range.go new file mode 100644 index 000000000000..28e3353bba3e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_range.go @@ -0,0 +1,75 @@ +package search + +import ( + "errors" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type RangeQuery struct { + FieldName string + From interface{} + To interface{} + IncludeLower bool + IncludeUpper bool +} + +func (q *RangeQuery) GT(value interface{}) { + q.from(value, false) +} + +func (q *RangeQuery) GTE(value interface{}) { + q.from(value, true) +} + +func (q *RangeQuery) LT(value interface{}) { + q.to(value, false) +} + +func (q *RangeQuery) LTE(value interface{}) { + q.to(value, true) +} + +func (q *RangeQuery) from(value interface{}, includeLower bool) { + q.From = value + q.IncludeLower = includeLower +} + +func (q *RangeQuery) to(value interface{}, includeUpper bool) { + q.To = value + q.IncludeUpper = includeUpper +} + +func (q *RangeQuery) Type() QueryType { + return QueryType_RangeQuery +} + +func (q *RangeQuery) Serialize() ([]byte, error) { + if q.FieldName == "" { + return nil, errors.New("RangeQuery: fieldName not set.") + } + query := &otsprotocol.RangeQuery{} + query.FieldName = &q.FieldName + if q.From != nil { + vFrom, err := ToVariantValue(q.From) + if err != nil { + return nil, err + } + query.RangeFrom = ([]byte)(vFrom) + } + if q.To != nil { + vTo, err := ToVariantValue(q.To) + if err != nil { + return nil, err + } + query.RangeTo = ([]byte)(vTo) + } + query.IncludeLower = &q.IncludeLower + query.IncludeUpper = &q.IncludeUpper + data, err := proto.Marshal(query) + return data, err +} + +func (q *RangeQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_term.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_term.go new file mode 100644 index 000000000000..1aac71808922 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_term.go @@ -0,0 +1,31 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type TermQuery struct { + FieldName string + Term interface{} +} + +func (q *TermQuery) Type() QueryType { + return QueryType_TermQuery +} + +func (q *TermQuery) Serialize() ([]byte, error) { + term := &otsprotocol.TermQuery{} + term.FieldName = &q.FieldName + vt, err := ToVariantValue(q.Term) + if err != nil { + return nil, err + } + term.Term = []byte(vt) + data, err := proto.Marshal(term) + return data, err +} + +func (q *TermQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_terms.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_terms.go new file mode 100644 index 000000000000..1401ff48f4e1 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_terms.go @@ -0,0 +1,35 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type TermsQuery struct { + FieldName string + Terms []interface{} +} + +func (q *TermsQuery) Type() QueryType { + return QueryType_TermsQuery +} + +func (q *TermsQuery) Serialize() ([]byte, error) { + term := &otsprotocol.TermsQuery{} + term.FieldName = &q.FieldName + term.Terms = make([][]byte, 0) + + for _, value := range q.Terms { + vt, err := ToVariantValue(value) + if err != nil { + return nil, err + } + term.Terms = append(term.Terms, []byte(vt)) + } + data, err := proto.Marshal(term) + return data, err +} + +func (q *TermsQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_wildcard.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_wildcard.go new file mode 100644 index 000000000000..f885ca6e941a --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_wildcard.go @@ -0,0 +1,27 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type WildcardQuery struct { + FieldName string + Value string +} + +func (q *WildcardQuery) Type() QueryType { + return QueryType_WildcardQuery +} + +func (q *WildcardQuery) Serialize() ([]byte, error) { + query := &otsprotocol.WildcardQuery{} + query.FieldName = &q.FieldName + query.Value = &q.Value + data, err := proto.Marshal(query) + return data, err +} + +func (q *WildcardQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/search_query.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/search_query.go new file mode 100644 index 000000000000..9bc47ebe0a0a --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/search_query.go @@ -0,0 +1,101 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type SearchQuery interface { + Serialize() ([]byte, error) +} + +type searchQuery struct { + Offset int32 + Limit int32 + Query Query + Collapse *Collapse + Sort *Sort + GetTotalCount bool + Token []byte +} + +func NewSearchQuery() *searchQuery { + return &searchQuery{ + Offset: -1, + Limit: -1, + GetTotalCount: false, + } +} + +func (s *searchQuery) SetOffset(offset int32) *searchQuery { + s.Offset = offset + return s +} + +func (s *searchQuery) SetLimit(limit int32) *searchQuery { + s.Limit = limit + return s +} + +func (s *searchQuery) SetQuery(query Query) *searchQuery { + s.Query = query + return s +} + +func (s *searchQuery) SetCollapse(collapse *Collapse) *searchQuery { + s.Collapse = collapse + return s +} + +func (s *searchQuery) SetSort(sort *Sort) *searchQuery { + s.Sort = sort + return s +} + +func (s *searchQuery) SetGetTotalCount(getTotalCount bool) *searchQuery { + s.GetTotalCount = getTotalCount + return s +} + +func (s *searchQuery) SetToken(token []byte) *searchQuery { + s.Token = token + s.Sort = nil + return s +} + +func (s *searchQuery) Serialize() ([]byte, error) { + search_query := &otsprotocol.SearchQuery{} + if s.Offset >= 0 { + search_query.Offset = &s.Offset + } + if s.Limit >= 0 { + search_query.Limit = &s.Limit + } + if s.Query != nil { + pbQuery, err := s.Query.ProtoBuffer() + if err != nil { + return nil, err + } + search_query.Query = pbQuery + } + if s.Collapse != nil { + pbCollapse, err := s.Collapse.ProtoBuffer() + if err != nil { + return nil, err + } + search_query.Collapse = pbCollapse + } + if s.Sort != nil { + pbSort, err := s.Sort.ProtoBuffer() + if err != nil { + return nil, err + } + search_query.Sort = pbSort + } + search_query.GetTotalCount = &s.GetTotalCount + if s.Token != nil && len(s.Token) > 0 { + search_query.Token = s.Token + } + data, err := proto.Marshal(search_query) + return data, err +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort.go new file mode 100644 index 000000000000..3b3e4a1136a7 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort.go @@ -0,0 +1,27 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" +) + +type Sorter interface { + ProtoBuffer() (*otsprotocol.Sorter, error) +} + +type Sort struct { + Sorters []Sorter +} + +func (s *Sort) ProtoBuffer() (*otsprotocol.Sort, error) { + pbSort := &otsprotocol.Sort{} + pbSortors := make([]*otsprotocol.Sorter, 0) + for _, fs := range s.Sorters { + pbFs, err := fs.ProtoBuffer() + if err != nil { + return nil, err + } + pbSortors = append(pbSortors, pbFs) + } + pbSort.Sorter = pbSortors + return pbSort, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_field.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_field.go new file mode 100644 index 000000000000..c4272a86deab --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_field.go @@ -0,0 +1,67 @@ +package search + +import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + +type NestedFilter struct { + Path string + Filter Query +} + +func (f *NestedFilter) ProtoBuffer() (*otsprotocol.NestedFilter, error) { + pbF := &otsprotocol.NestedFilter{ + Path: &f.Path, + } + pbQ, err := f.Filter.ProtoBuffer() + if err != nil { + return nil, err + } + pbF.Filter = pbQ + return pbF, err +} + +type FieldSort struct { + FieldName string + Order *SortOrder + Mode *SortMode + NestedFilter *NestedFilter +} + +func NewFieldSort(fieldName string, order SortOrder) *FieldSort { + return &FieldSort{ + FieldName: fieldName, + Order: order.Enum(), + } +} + +func (s *FieldSort) ProtoBuffer() (*otsprotocol.Sorter, error) { + pbFieldSort := &otsprotocol.FieldSort{ + FieldName: &s.FieldName, + } + if s.Order != nil { + pbOrder, err := s.Order.ProtoBuffer() + if err != nil { + return nil, err + } + pbFieldSort.Order = pbOrder + } + if s.Mode != nil { + pbMode, err := s.Mode.ProtoBuffer() + if err != nil { + return nil, err + } + if pbMode != nil { + pbFieldSort.Mode = pbMode + } + } + if s.NestedFilter != nil { + pbFilter, err := s.NestedFilter.ProtoBuffer() + if err != nil { + return nil, err + } + pbFieldSort.NestedFilter = pbFilter + } + pbSorter := &otsprotocol.Sorter{ + FieldSort: pbFieldSort, + } + return pbSorter, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_geo_distance.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_geo_distance.go new file mode 100644 index 000000000000..de4b07c3029b --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_geo_distance.go @@ -0,0 +1,77 @@ +package search + +import ( + "errors" + "fmt" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" +) + +type GeoDistanceType int8 + +const ( + GeoDistanceType_ARC GeoDistanceType = 0 + GeoDistanceType_PLANE GeoDistanceType = 0 +) + +func (t *GeoDistanceType) ProtoBuffer() (*otsprotocol.GeoDistanceType, error) { + if t == nil { + return nil, errors.New("type is nil") + } + if *t == GeoDistanceType_ARC { + return otsprotocol.GeoDistanceType_GEO_DISTANCE_ARC.Enum(), nil + } else if *t == GeoDistanceType_PLANE { + return otsprotocol.GeoDistanceType_GEO_DISTANCE_PLANE.Enum(), nil + } else { + return nil, errors.New("unknown distance type: " + fmt.Sprintf("%#v", *t)) + } +} + +type GeoDistanceSort struct { + FieldName string + Points []string + Order *SortOrder + Mode *SortMode + GeoDistanceType *GeoDistanceType + NestedFilter *NestedFilter +} + +func (s *GeoDistanceSort) ProtoBuffer() (*otsprotocol.Sorter, error) { + pbGeoDistanceSort := &otsprotocol.GeoDistanceSort{ + FieldName: &s.FieldName, + Points: s.Points, + } + if s.Order != nil { + pbOrder, err := s.Order.ProtoBuffer() + if err != nil { + return nil, err + } + pbGeoDistanceSort.Order = pbOrder + } + if s.Mode != nil { + pbMode, err := s.Mode.ProtoBuffer() + if err != nil { + return nil, err + } + if pbMode != nil { + pbGeoDistanceSort.Mode = pbMode + } + } + if s.GeoDistanceType != nil { + pbGeoDisType, err := s.GeoDistanceType.ProtoBuffer() + if err != nil { + return nil, err + } + pbGeoDistanceSort.DistanceType = pbGeoDisType + } + if s.NestedFilter != nil { + pbFilter, err := s.NestedFilter.ProtoBuffer() + if err != nil { + return nil, err + } + pbGeoDistanceSort.NestedFilter = pbFilter + } + pbSorter := &otsprotocol.Sorter{ + GeoDistanceSort: pbGeoDistanceSort, + } + return pbSorter, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_mode.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_mode.go new file mode 100644 index 000000000000..4b04ac1bed63 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_mode.go @@ -0,0 +1,36 @@ +package search + +import ( + "errors" + "fmt" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" +) + +type SortMode int8 + +const ( + SortMode_Min SortMode = 0 + SortMode_Max SortMode = 1 + SortMode_Avg SortMode = 2 +) + +func (x SortMode) Enum() *SortMode { + p := new(SortMode) + *p = x + return p +} + +func (m *SortMode) ProtoBuffer() (*otsprotocol.SortMode, error) { + if m == nil { + return nil, errors.New("sort mode is nil") + } + if *m == SortMode_Min { + return otsprotocol.SortMode_SORT_MODE_MIN.Enum(), nil + } else if *m == SortMode_Max { + return otsprotocol.SortMode_SORT_MODE_MAX.Enum(), nil + } else if *m == SortMode_Avg { + return otsprotocol.SortMode_SORT_MODE_AVG.Enum(), nil + } else { + return nil, errors.New("unknown sort mode: " + fmt.Sprintf("%#v", *m)) + } +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_order.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_order.go new file mode 100644 index 000000000000..b936bfe7c909 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_order.go @@ -0,0 +1,47 @@ +package search + +import ( + "errors" + "fmt" + + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" +) + +type SortOrder int8 + +const ( + SortOrder_ASC SortOrder = 0 + SortOrder_DESC SortOrder = 1 +) + +func (x SortOrder) Enum() *SortOrder { + p := new(SortOrder) + *p = x + return p +} + +func (o *SortOrder) ProtoBuffer() (*otsprotocol.SortOrder, error) { + if o == nil { + return nil, errors.New("sort order is nil") + } + if *o == SortOrder_ASC { + return otsprotocol.SortOrder_SORT_ORDER_ASC.Enum(), nil + } else if *o == SortOrder_DESC { + return otsprotocol.SortOrder_SORT_ORDER_DESC.Enum(), nil + } else { + return nil, errors.New("unknown sort order: " + fmt.Sprintf("%#v", *o)) + } +} + +func ParseSortOrder(order *otsprotocol.SortOrder) *SortOrder { + if order == nil { + return nil + } + if *order == otsprotocol.SortOrder_SORT_ORDER_ASC { + return SortOrder_ASC.Enum() + } else if *order == otsprotocol.SortOrder_SORT_ORDER_DESC { + return SortOrder_DESC.Enum() + } else { + return nil + } +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_primary_key.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_primary_key.go new file mode 100644 index 000000000000..4a54a3522689 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_primary_key.go @@ -0,0 +1,28 @@ +package search + +import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + +type PrimaryKeySort struct { + Order *SortOrder +} + +func NewPrimaryKeySort() *PrimaryKeySort { + return &PrimaryKeySort{ + Order: SortOrder_ASC.Enum(), + } +} + +func (s *PrimaryKeySort) ProtoBuffer() (*otsprotocol.Sorter, error) { + pbPrimaryKeySort := &otsprotocol.PrimaryKeySort{} + if s.Order != nil { + pbOrder, err := s.Order.ProtoBuffer() + if err != nil { + return nil, err + } + pbPrimaryKeySort.Order = pbOrder + } + pbSorter := &otsprotocol.Sorter{ + PkSort: pbPrimaryKeySort, + } + return pbSorter, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_score.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_score.go new file mode 100644 index 000000000000..00eef31db379 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_score.go @@ -0,0 +1,28 @@ +package search + +import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + +type ScoreSort struct { + Order *SortOrder +} + +func NewScoreSort() *ScoreSort { + return &ScoreSort{ + Order: SortOrder_DESC.Enum(), + } +} + +func (s *ScoreSort) ProtoBuffer() (*otsprotocol.Sorter, error) { + pbScoreSort := &otsprotocol.ScoreSort{} + if s.Order != nil { + pbOrder, err := s.Order.ProtoBuffer() + if err != nil { + return nil, err + } + pbScoreSort.Order = pbOrder + } + pbSorter := &otsprotocol.Sorter{ + ScoreSort: pbScoreSort, + } + return pbSorter, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/variant_types.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/variant_types.go new file mode 100644 index 000000000000..7ec743f7b14f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/variant_types.go @@ -0,0 +1,74 @@ +package search + +import ( + "encoding/binary" + "errors" + "math" + "reflect" +) + +type VariantValue []byte +type VariantType byte + +const ( + // variant type + VT_INTEGER VariantType = 0x0 + VT_DOUBLE VariantType = 0x1 + VT_BOOLEAN VariantType = 0x2 + VT_STRING VariantType = 0x3 +) + +func ToVariantValue(value interface{}) (VariantValue, error) { + t := reflect.TypeOf(value) + switch t.Kind() { + case reflect.String: + return VTString(value.(string)), nil + case reflect.Int: + return VTInteger(int64(value.(int))), nil + case reflect.Int64: + return VTInteger(value.(int64)), nil + case reflect.Float64: + return VTDouble(value.(float64)), nil + case reflect.Bool: + return VTBoolean(value.(bool)), nil + default: + return nil, errors.New("interface{} type must be string/int64/float64.") + } +} + +func (v *VariantValue) GetType() VariantType { + return VariantType(([]byte)(*v)[0]) +} + +func VTInteger(v int64) VariantValue { + buf := make([]byte, 9) + buf[0] = byte(VT_INTEGER) + binary.LittleEndian.PutUint64(buf[1:9], uint64(v)) + return (VariantValue)(buf) +} + +func VTDouble(v float64) VariantValue { + buf := make([]byte, 9) + buf[0] = byte(VT_DOUBLE) + binary.LittleEndian.PutUint64(buf[1:9], math.Float64bits(v)) + return (VariantValue)(buf) +} + +func VTString(v string) VariantValue { + buf := make([]byte, 5+len(v)) + buf[0] = byte(VT_STRING) + binary.LittleEndian.PutUint32(buf[1:5], uint32(len(v))) + copy(buf[5:], v) + return (VariantValue)(buf) +} + +func VTBoolean(b bool) VariantValue { + buf := make([]byte, 2) + buf[0] = byte(VT_BOOLEAN) + if b { + buf[1] = 1 + } else { + buf[1] = 0 + } + return (VariantValue)(buf) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_api.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_api.go new file mode 100644 index 000000000000..29e0c1ec91ea --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_api.go @@ -0,0 +1,136 @@ +package tablestore + +import ( + "bytes" + "errors" + "fmt" + + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +func (tableStoreClient *TableStoreClient) CreateSearchIndex(request *CreateSearchIndexRequest) (*CreateSearchIndexResponse, error) { + req := new(otsprotocol.CreateSearchIndexRequest) + req.TableName = proto.String(request.TableName) + req.IndexName = proto.String(request.IndexName) + var err error + req.Schema, err = convertToPbSchema(request.IndexSchema) + if err != nil { + return nil, err + } + resp := new(otsprotocol.CreateSearchIndexRequest) + response := &CreateSearchIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(createSearchIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + return response, nil +} + +func (tableStoreClient *TableStoreClient) DeleteSearchIndex(request *DeleteSearchIndexRequest) (*DeleteSearchIndexResponse, error) { + req := new(otsprotocol.DeleteSearchIndexRequest) + req.TableName = proto.String(request.TableName) + req.IndexName = proto.String(request.IndexName) + + resp := new(otsprotocol.DeleteSearchIndexResponse) + response := &DeleteSearchIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(deleteSearchIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + return response, nil +} + +func (tableStoreClient *TableStoreClient) ListSearchIndex(request *ListSearchIndexRequest) (*ListSearchIndexResponse, error) { + req := new(otsprotocol.ListSearchIndexRequest) + req.TableName = proto.String(request.TableName) + + resp := new(otsprotocol.ListSearchIndexResponse) + response := &ListSearchIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(listSearchIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + indexs := make([]*IndexInfo, 0) + for _, info := range resp.Indices { + indexs = append(indexs, &IndexInfo{ + TableName: *info.TableName, + IndexName: *info.IndexName, + }) + } + response.IndexInfo = indexs + return response, nil +} + +func (tableStoreClient *TableStoreClient) DescribeSearchIndex(request *DescribeSearchIndexRequest) (*DescribeSearchIndexResponse, error) { + req := new(otsprotocol.DescribeSearchIndexRequest) + req.TableName = proto.String(request.TableName) + req.IndexName = proto.String(request.IndexName) + + resp := new(otsprotocol.DescribeSearchIndexResponse) + response := &DescribeSearchIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(describeSearchIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + schema, err := parseFromPbSchema(resp.Schema) + if err != nil { + return nil, err + } + response.Schema = schema + if resp.SyncStat != nil { + response.SyncStat = &SyncStat{ + CurrentSyncTimestamp: resp.SyncStat.CurrentSyncTimestamp, + } + syncPhase := resp.SyncStat.SyncPhase + if syncPhase == nil { + return nil, errors.New("missing [SyncPhase] in DescribeSearchIndexResponse") + } else if *syncPhase == otsprotocol.SyncPhase_FULL { + response.SyncStat.SyncPhase = SyncPhase_FULL + } else if *syncPhase == otsprotocol.SyncPhase_INCR { + response.SyncStat.SyncPhase = SyncPhase_INCR + } else { + return nil, errors.New(fmt.Sprintf("unknown SyncPhase: %v", syncPhase)) + } + } + return response, nil +} + +func (tableStoreClient *TableStoreClient) Search(request *SearchRequest) (*SearchResponse, error) { + req, err := request.ProtoBuffer() + if err != nil { + return nil, err + } + resp := new(otsprotocol.SearchResponse) + response := &SearchResponse{} + if err := tableStoreClient.doRequestWithRetry(searchUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + response.TotalCount = *resp.TotalHits + + rows := make([]*PlainBufferRow, 0) + for _, buf := range resp.Rows { + row, err := readRowsWithHeader(bytes.NewReader(buf)) + if err != nil { + return nil, err + } + rows = append(rows, row[0]) + } + + for _, row := range rows { + currentRow := &Row{} + currentPk := new(PrimaryKey) + for _, pk := range row.primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + currentPk.PrimaryKeys = append(currentPk.PrimaryKeys, pkColumn) + } + currentRow.PrimaryKey = currentPk + for _, cell := range row.cells { + dataColumn := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp} + currentRow.Columns = append(currentRow.Columns, dataColumn) + } + response.Rows = append(response.Rows, currentRow) + } + + response.IsAllSuccess = *resp.IsAllSucceeded + if resp.NextToken != nil && len(resp.NextToken) > 0 { + response.NextToken = resp.NextToken + } + return response, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_model.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_model.go new file mode 100644 index 000000000000..6ec022e5f234 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_model.go @@ -0,0 +1,327 @@ +package tablestore + +import ( + "encoding/json" + "errors" + + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search" + "github.com/golang/protobuf/proto" +) + +type ColumnsToGet struct { + Columns []string + ReturnAll bool +} + +type SearchRequest struct { + TableName string + IndexName string + SearchQuery search.SearchQuery + ColumnsToGet *ColumnsToGet + RoutingValues []*PrimaryKey +} + +func (r *SearchRequest) SetTableName(tableName string) *SearchRequest { + r.TableName = tableName + return r +} + +func (r *SearchRequest) SetIndexName(indexName string) *SearchRequest { + r.IndexName = indexName + return r +} + +func (r *SearchRequest) SetSearchQuery(searchQuery search.SearchQuery) *SearchRequest { + r.SearchQuery = searchQuery + return r +} + +func (r *SearchRequest) SetColumnsToGet(columnToGet *ColumnsToGet) *SearchRequest { + r.ColumnsToGet = columnToGet + return r +} + +func (r *SearchRequest) SetRoutingValues(routingValues []*PrimaryKey) *SearchRequest { + r.RoutingValues = routingValues + return r +} + +func (r *SearchRequest) AddRoutingValue(routingValue *PrimaryKey) *SearchRequest { + r.RoutingValues = append(r.RoutingValues, routingValue) + return r +} + +func (r *SearchRequest) ProtoBuffer() (*otsprotocol.SearchRequest, error) { + req := &otsprotocol.SearchRequest{} + req.TableName = &r.TableName + req.IndexName = &r.IndexName + query, err := r.SearchQuery.Serialize() + if err != nil { + return nil, err + } + req.SearchQuery = query + pbColumns := &otsprotocol.ColumnsToGet{} + pbColumns.ReturnType = otsprotocol.ColumnReturnType_RETURN_NONE.Enum() + if r.ColumnsToGet != nil { + if r.ColumnsToGet.ReturnAll { + pbColumns.ReturnType = otsprotocol.ColumnReturnType_RETURN_ALL.Enum() + } else if len(r.ColumnsToGet.Columns) > 0 { + pbColumns.ReturnType = otsprotocol.ColumnReturnType_RETURN_SPECIFIED.Enum() + pbColumns.ColumnNames = r.ColumnsToGet.Columns + } + } + req.ColumnsToGet = pbColumns + if r.RoutingValues != nil { + for _, routingValue := range r.RoutingValues { + req.RoutingValues = append(req.RoutingValues, routingValue.Build(false)) + } + } + return req, err +} + +type SearchResponse struct { + TotalCount int64 + Rows []*Row + IsAllSuccess bool + NextToken []byte + ResponseInfo +} + +func convertFieldSchemaToPBFieldSchema(fieldSchemas []*FieldSchema) []*otsprotocol.FieldSchema { + var schemas []*otsprotocol.FieldSchema + for _, value := range fieldSchemas { + field := new(otsprotocol.FieldSchema) + + field.FieldName = proto.String(*value.FieldName) + field.FieldType = otsprotocol.FieldType(int32(value.FieldType)).Enum() + + if value.Index != nil { + field.Index = proto.Bool(*value.Index) + } else if value.FieldType != FieldType_NESTED { + field.Index = proto.Bool(true) + } + if value.IndexOptions != nil { + field.IndexOptions = otsprotocol.IndexOptions(int32(*value.IndexOptions)).Enum() + } + if value.Analyzer != nil { + field.Analyzer = proto.String(string(*value.Analyzer)) + } + if value.EnableSortAndAgg != nil { + field.DocValues = proto.Bool(*value.EnableSortAndAgg) + } + if value.Store != nil { + field.Store = proto.Bool(*value.Store) + } else if value.FieldType != FieldType_NESTED { + if *field.FieldType == otsprotocol.FieldType_TEXT { + field.Store = proto.Bool(false) + } else { + field.Store = proto.Bool(true) + } + } + if value.IsArray != nil { + field.IsArray = proto.Bool(*value.IsArray) + } + if value.FieldType == FieldType_NESTED { + field.FieldSchemas = convertFieldSchemaToPBFieldSchema(value.FieldSchemas) + } + + schemas = append(schemas, field) + } + + return schemas +} + +func convertToPbSchema(schema *IndexSchema) (*otsprotocol.IndexSchema, error) { + indexSchema := new(otsprotocol.IndexSchema) + indexSchema.FieldSchemas = convertFieldSchemaToPBFieldSchema(schema.FieldSchemas) + indexSchema.IndexSetting = new(otsprotocol.IndexSetting) + var defaultNumberOfShards int32 = 1 + indexSchema.IndexSetting.NumberOfShards = &defaultNumberOfShards + if schema.IndexSetting != nil { + indexSchema.IndexSetting.RoutingFields = schema.IndexSetting.RoutingFields + } + if schema.IndexSort != nil { + pbSort, err := schema.IndexSort.ProtoBuffer() + if err != nil { + return nil, err + } + indexSchema.IndexSort = pbSort + } + return indexSchema, nil +} + +func parseFieldSchemaFromPb(pbFieldSchemas []*otsprotocol.FieldSchema) []*FieldSchema { + var schemas []*FieldSchema + for _, value := range pbFieldSchemas { + field := new(FieldSchema) + field.FieldName = value.FieldName + field.FieldType = FieldType(*value.FieldType) + field.Index = value.Index + if value.IndexOptions != nil { + indexOption := IndexOptions(*value.IndexOptions) + field.IndexOptions = &indexOption + } + field.Analyzer = (*Analyzer)(value.Analyzer) + field.EnableSortAndAgg = value.DocValues + field.Store = value.Store + field.IsArray = value.IsArray + if field.FieldType == FieldType_NESTED { + field.FieldSchemas = parseFieldSchemaFromPb(value.FieldSchemas) + } + schemas = append(schemas, field) + } + return schemas +} + +func parseIndexSortFromPb(pbIndexSort *otsprotocol.Sort) (*search.Sort, error) { + indexSort := &search.Sort{ + Sorters: make([]search.Sorter, 0), + } + for _, sorter := range pbIndexSort.GetSorter() { + if sorter.GetFieldSort() != nil { + fieldSort := &search.FieldSort{ + FieldName: *sorter.GetFieldSort().FieldName, + Order: search.ParseSortOrder(sorter.GetFieldSort().Order), + } + indexSort.Sorters = append(indexSort.Sorters, fieldSort) + } else if sorter.GetPkSort() != nil { + pkSort := &search.PrimaryKeySort{ + Order: search.ParseSortOrder(sorter.GetPkSort().Order), + } + indexSort.Sorters = append(indexSort.Sorters, pkSort) + } else { + return nil, errors.New("unknown index sort type") + } + } + return indexSort, nil +} + +func parseFromPbSchema(pbSchema *otsprotocol.IndexSchema) (*IndexSchema, error) { + schema := &IndexSchema{ + IndexSetting: &IndexSetting{ + RoutingFields: pbSchema.IndexSetting.RoutingFields, + }, + } + schema.FieldSchemas = parseFieldSchemaFromPb(pbSchema.GetFieldSchemas()) + indexSort, err := parseIndexSortFromPb(pbSchema.GetIndexSort()) + if err != nil { + return nil, err + } + schema.IndexSort = indexSort + return schema, nil +} + +type IndexSchema struct { + IndexSetting *IndexSetting + FieldSchemas []*FieldSchema + IndexSort *search.Sort +} + +type FieldType int32 + +const ( + FieldType_LONG FieldType = 1 + FieldType_DOUBLE FieldType = 2 + FieldType_BOOLEAN FieldType = 3 + FieldType_KEYWORD FieldType = 4 + FieldType_TEXT FieldType = 5 + FieldType_NESTED FieldType = 6 + FieldType_GEO_POINT FieldType = 7 +) + +type IndexOptions int32 + +const ( + IndexOptions_DOCS IndexOptions = 1 + IndexOptions_FREQS IndexOptions = 2 + IndexOptions_POSITIONS IndexOptions = 3 + IndexOptions_OFFSETS IndexOptions = 4 +) + +type Analyzer string + +const ( + Analyzer_SingleWord Analyzer = "single_word" + Analyzer_MaxWord Analyzer = "max_word" +) + +type FieldSchema struct { + FieldName *string + FieldType FieldType + Index *bool + IndexOptions *IndexOptions + Analyzer *Analyzer + EnableSortAndAgg *bool + Store *bool + IsArray *bool + FieldSchemas []*FieldSchema +} + +func (fs *FieldSchema) String() string { + out, err := json.Marshal(fs) + if err != nil { + panic(err) + } + return string(out) +} + +type IndexSetting struct { + RoutingFields []string +} + +type CreateSearchIndexRequest struct { + TableName string + IndexName string + IndexSchema *IndexSchema +} + +type CreateSearchIndexResponse struct { + ResponseInfo ResponseInfo +} + +type DescribeSearchIndexRequest struct { + TableName string + IndexName string +} + +type SyncPhase int32 + +const ( + SyncPhase_FULL SyncPhase = 1 + SyncPhase_INCR SyncPhase = 2 +) + +type SyncStat struct { + SyncPhase SyncPhase + CurrentSyncTimestamp *int64 +} + +type DescribeSearchIndexResponse struct { + Schema *IndexSchema + SyncStat *SyncStat + ResponseInfo ResponseInfo +} + +type ListSearchIndexRequest struct { + TableName string +} + +type IndexInfo struct { + TableName string + IndexName string +} + +type ListSearchIndexResponse struct { + IndexInfo []*IndexInfo + ResponseInfo ResponseInfo +} + +type DeleteSearchIndexRequest struct { + TableName string + IndexName string +} + +type DeleteSearchIndexResponse struct { + ResponseInfo ResponseInfo +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/util.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/util.go new file mode 100644 index 000000000000..cedcce71c832 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/util.go @@ -0,0 +1,982 @@ +package tablestore + +import ( + "bytes" + "fmt" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" + "io" + "io/ioutil" + "math" + "net/http" + "reflect" + "sort" +) + +const ( + maxTableNameLength = 100 + maxPrimaryKeyLength = 255 + maxPrimaryKeyNum = 4 + maxMultiDeleteRows = 100 +) + +type ColumnType int32 + +const ( + ColumnType_STRING ColumnType = 1 + ColumnType_INTEGER ColumnType = 2 + ColumnType_BOOLEAN ColumnType = 3 + ColumnType_DOUBLE ColumnType = 4 + ColumnType_BINARY ColumnType = 5 +) + +const ( + Version = "1.0" + ApiVersion = "2015-12-31" + xOtsDateFormat = "2006-01-02T15:04:05.123Z" + xOtsInstanceName = "x-ots-instancename" + xOtsRequestId = "x-ots-requestid" +) + +type ColumnValue struct { + Type ColumnType + Value interface{} +} + +func (cv *ColumnValue) writeCellValue(w io.Writer) { + writeTag(w, TAG_CELL_VALUE) + if cv == nil { + writeRawLittleEndian32(w, 1) + writeRawByte(w, VT_AUTO_INCREMENT) + return + } + + switch cv.Type { + case ColumnType_STRING: + v := cv.Value.(string) + + writeRawLittleEndian32(w, int32(LITTLE_ENDIAN_32_SIZE+1+len(v))) // length + type + value + writeRawByte(w, VT_STRING) + writeRawLittleEndian32(w, int32(len(v))) + writeBytes(w, []byte(v)) + + case ColumnType_INTEGER: + v := cv.Value.(int64) + writeRawLittleEndian32(w, int32(LITTLE_ENDIAN_64_SIZE+1)) + writeRawByte(w, VT_INTEGER) + writeRawLittleEndian64(w, v) + case ColumnType_BOOLEAN: + v := cv.Value.(bool) + writeRawLittleEndian32(w, 2) + writeRawByte(w, VT_BOOLEAN) + writeBoolean(w, v) + + case ColumnType_DOUBLE: + v := cv.Value.(float64) + + writeRawLittleEndian32(w, LITTLE_ENDIAN_64_SIZE+1) + writeRawByte(w, VT_DOUBLE) + writeDouble(w, v) + + case ColumnType_BINARY: + v := cv.Value.([]byte) + + writeRawLittleEndian32(w, int32(LITTLE_ENDIAN_32_SIZE+1+len(v))) // length + type + value + writeRawByte(w, VT_BLOB) + writeRawLittleEndian32(w, int32(len(v))) + writeBytes(w, v) + } +} + +func (cv *ColumnValue) writeCellValueWithoutLengthPrefix() []byte { + var b bytes.Buffer + w := &b + switch cv.Type { + case ColumnType_STRING: + v := cv.Value.(string) + + writeRawByte(w, VT_STRING) + writeRawLittleEndian32(w, int32(len(v))) + writeBytes(w, []byte(v)) + + case ColumnType_INTEGER: + v := cv.Value.(int64) + writeRawByte(w, VT_INTEGER) + writeRawLittleEndian64(w, v) + case ColumnType_BOOLEAN: + v := cv.Value.(bool) + writeRawByte(w, VT_BOOLEAN) + writeBoolean(w, v) + + case ColumnType_DOUBLE: + v := cv.Value.(float64) + + writeRawByte(w, VT_DOUBLE) + writeDouble(w, v) + + case ColumnType_BINARY: + v := cv.Value.([]byte) + + writeRawByte(w, VT_BLOB) + writeRawLittleEndian32(w, int32(len(v))) + writeBytes(w, v) + } + + return b.Bytes() +} + +func (cv *ColumnValue) getCheckSum(crc byte) byte { + if cv == nil { + return crc8Byte(crc, VT_AUTO_INCREMENT) + } + + switch cv.Type { + case ColumnType_STRING: + v := cv.Value.(string) + crc = crc8Byte(crc, VT_STRING) + crc = crc8Int32(crc, int32(len(v))) + crc = crc8Bytes(crc, []byte(v)) + case ColumnType_INTEGER: + v := cv.Value.(int64) + crc = crc8Byte(crc, VT_INTEGER) + crc = crc8Int64(crc, v) + case ColumnType_BOOLEAN: + v := cv.Value.(bool) + crc = crc8Byte(crc, VT_BOOLEAN) + if v { + crc = crc8Byte(crc, 0x1) + } else { + crc = crc8Byte(crc, 0x0) + } + + case ColumnType_DOUBLE: + v := cv.Value.(float64) + crc = crc8Byte(crc, VT_DOUBLE) + crc = crc8Int64(crc, int64(math.Float64bits(v))) + case ColumnType_BINARY: + v := cv.Value.([]byte) + crc = crc8Byte(crc, VT_BLOB) + crc = crc8Int32(crc, int32(len(v))) + crc = crc8Bytes(crc, v) + } + + return crc +} + +type Column struct { + Name []byte + Value ColumnValue + Type byte + Timestamp int64 + HasType bool + HasTimestamp bool + IgnoreValue bool +} + +func NewColumn(name []byte, value interface{}) *Column { + + v := &Column{} + v.Name = name + + if value != nil { + t := reflect.TypeOf(value) + switch t.Kind() { + case reflect.String: + v.Value.Type = ColumnType_STRING + + case reflect.Int64: + v.Value.Type = ColumnType_INTEGER + + case reflect.Bool: + v.Value.Type = ColumnType_BOOLEAN + + case reflect.Float64: + v.Value.Type = ColumnType_DOUBLE + + case reflect.Slice: + v.Value.Type = ColumnType_BINARY + default: + panic(errInvalidInput) + } + + v.Value.Value = value + } + + return v +} + +func (c *Column) toPlainBufferCell(ignoreValue bool) *PlainBufferCell { + cell := &PlainBufferCell{} + cell.cellName = c.Name + cell.ignoreValue = ignoreValue + if ignoreValue == false { + cell.cellValue = &c.Value + } + + if c.HasType { + cell.hasCellType = c.HasType + cell.cellType = byte(c.Type) + } + + if c.HasTimestamp { + cell.hasCellTimestamp = c.HasTimestamp + cell.cellTimestamp = c.Timestamp + } + + return cell +} + +type PrimaryKeyColumnInner struct { + Name []byte + Type otsprotocol.PrimaryKeyType + Value interface{} +} + +func NewPrimaryKeyColumnINF_MAX(name []byte) *PrimaryKeyColumnInner { + v := &PrimaryKeyColumnInner{} + v.Name = name + v.Type = 0 + v.Value = "INF_MAX" + + return v +} + +func NewPrimaryKeyColumnINF_MIN(name []byte) *PrimaryKeyColumnInner { + v := &PrimaryKeyColumnInner{} + v.Name = name + v.Type = 0 + v.Value = "INF_MIN" + + return v +} + +func NewPrimaryKeyColumnAuto_Increment(name []byte) *PrimaryKeyColumnInner { + v := &PrimaryKeyColumnInner{} + v.Name = name + v.Type = 0 + v.Value = "AUTO_INCRMENT" + return v +} + +func NewPrimaryKeyColumn(name []byte, value interface{}, option PrimaryKeyOption) *PrimaryKeyColumnInner { + + if option == NONE { + v := &PrimaryKeyColumnInner{} + v.Name = name + + t := reflect.TypeOf(value) + switch t.Kind() { + case reflect.String: + v.Type = otsprotocol.PrimaryKeyType_STRING + + case reflect.Int64: + v.Type = otsprotocol.PrimaryKeyType_INTEGER + + case reflect.Slice: + v.Type = otsprotocol.PrimaryKeyType_BINARY + + default: + panic(errInvalidInput) + } + + v.Value = value + + return v + } else if option == AUTO_INCREMENT { + return NewPrimaryKeyColumnAuto_Increment(name) + } else if option == MIN { + return NewPrimaryKeyColumnINF_MIN(name) + } else { + return NewPrimaryKeyColumnINF_MAX(name) + } +} + +func (pkc *PrimaryKeyColumnInner) toColumnValue() *ColumnValue { + switch pkc.Type { + case otsprotocol.PrimaryKeyType_INTEGER: + return &ColumnValue{ColumnType_INTEGER, pkc.Value} + case otsprotocol.PrimaryKeyType_STRING: + return &ColumnValue{ColumnType_STRING, pkc.Value} + case otsprotocol.PrimaryKeyType_BINARY: + return &ColumnValue{ColumnType_BINARY, pkc.Value} + } + + return nil +} + +func (pkc *PrimaryKeyColumnInner) toPlainBufferCell() *PlainBufferCell { + cell := &PlainBufferCell{} + cell.cellName = pkc.Name + cell.cellValue = pkc.toColumnValue() + return cell +} + +func (pkc *PrimaryKeyColumnInner) isInfMin() bool { + if pkc.Type == 0 && pkc.Value.(string) == "INF_MIN" { + return true + } + + return false +} + +func (pkc *PrimaryKeyColumnInner) isInfMax() bool { + if pkc.Type == 0 && pkc.Value.(string) == "INF_MAX" { + return true + } + + return false +} + +func (pkc *PrimaryKeyColumnInner) isAutoInc() bool { + if pkc.Type == 0 && pkc.Value.(string) == "AUTO_INCRMENT" { + return true + } + return false +} + +func (pkc *PrimaryKeyColumnInner) getCheckSum(crc byte) byte { + if pkc.isInfMin() { + return crc8Byte(crc, VT_INF_MIN) + } + if pkc.isInfMax() { + return crc8Byte(crc, VT_INF_MAX) + } + if pkc.isAutoInc() { + return crc8Byte(crc, VT_AUTO_INCREMENT) + } + + return pkc.toColumnValue().getCheckSum(crc) +} + +func (pkc *PrimaryKeyColumnInner) writePrimaryKeyColumn(w io.Writer) { + writeTag(w, TAG_CELL) + writeCellName(w, []byte(pkc.Name)) + if pkc.isInfMin() { + writeTag(w, TAG_CELL_VALUE) + writeRawLittleEndian32(w, 1) + writeRawByte(w, VT_INF_MIN) + return + } + if pkc.isInfMax() { + writeTag(w, TAG_CELL_VALUE) + writeRawLittleEndian32(w, 1) + writeRawByte(w, VT_INF_MAX) + return + } + if pkc.isAutoInc() { + writeTag(w, TAG_CELL_VALUE) + writeRawLittleEndian32(w, 1) + writeRawByte(w, VT_AUTO_INCREMENT) + return + } + pkc.toColumnValue().writeCellValue(w) +} + +type PrimaryKey2 struct { + primaryKey []*PrimaryKeyColumnInner +} + +func (pk *PrimaryKey) Build(isDelete bool) []byte { + var b bytes.Buffer + writeHeader(&b) + writeTag(&b, TAG_ROW_PK) + + rowChecksum := byte(0x0) + var cellChecksum byte + + for _, column := range pk.PrimaryKeys { + primaryKeyColumn := NewPrimaryKeyColumn([]byte(column.ColumnName), column.Value, column.PrimaryKeyOption) + + cellChecksum = crc8Bytes(byte(0x0), []byte(primaryKeyColumn.Name)) + cellChecksum = primaryKeyColumn.getCheckSum(cellChecksum) + rowChecksum = crc8Byte(rowChecksum, cellChecksum) + primaryKeyColumn.writePrimaryKeyColumn(&b) + + writeTag(&b, TAG_CELL_CHECKSUM) + writeRawByte(&b, cellChecksum) + } + + // 没有deleteMarker, 要与0x0做crc. + if isDelete { + writeTag(&b, TAG_DELETE_ROW_MARKER) + rowChecksum = crc8Byte(rowChecksum, byte(0x1)) + } else { + rowChecksum = crc8Byte(rowChecksum, byte(0x0)) + } + writeTag(&b, TAG_ROW_CHECKSUM) + writeRawByte(&b, rowChecksum) + + return b.Bytes() +} + +type RowPutChange struct { + primaryKey []*PrimaryKeyColumnInner + columnsToPut []*Column +} + +type RowUpdateChange struct { + primaryKey []*PrimaryKeyColumnInner + columnsToUpdate []*Column +} + +func (rpc *RowPutChange) Build() []byte { + pkCells := make([]*PlainBufferCell, len(rpc.primaryKey)) + for i, pkc := range rpc.primaryKey { + pkCells[i] = pkc.toPlainBufferCell() + } + + cells := make([]*PlainBufferCell, len(rpc.columnsToPut)) + for i, c := range rpc.columnsToPut { + cells[i] = c.toPlainBufferCell(false) + } + + row := &PlainBufferRow{ + primaryKey: pkCells, + cells: cells} + var b bytes.Buffer + row.writeRowWithHeader(&b) + + return b.Bytes() +} + +func (ruc *RowUpdateChange) Build() []byte { + pkCells := make([]*PlainBufferCell, len(ruc.primaryKey)) + for i, pkc := range ruc.primaryKey { + pkCells[i] = pkc.toPlainBufferCell() + } + + cells := make([]*PlainBufferCell, len(ruc.columnsToUpdate)) + for i, c := range ruc.columnsToUpdate { + cells[i] = c.toPlainBufferCell(c.IgnoreValue) + } + + row := &PlainBufferRow{ + primaryKey: pkCells, + cells: cells} + var b bytes.Buffer + row.writeRowWithHeader(&b) + + return b.Bytes() +} + +const ( + MaxValue = "_get_range_max" + MinValue = "_get_range_min" +) + +func (comparatorType *ComparatorType) ConvertToPbComparatorType() otsprotocol.ComparatorType { + switch *comparatorType { + case CT_EQUAL: + return otsprotocol.ComparatorType_CT_EQUAL + case CT_NOT_EQUAL: + return otsprotocol.ComparatorType_CT_NOT_EQUAL + case CT_GREATER_THAN: + return otsprotocol.ComparatorType_CT_GREATER_THAN + case CT_GREATER_EQUAL: + return otsprotocol.ComparatorType_CT_GREATER_EQUAL + case CT_LESS_THAN: + return otsprotocol.ComparatorType_CT_LESS_THAN + default: + return otsprotocol.ComparatorType_CT_LESS_EQUAL + } +} + +func (columnType DefinedColumnType) ConvertToPbDefinedColumnType() otsprotocol.DefinedColumnType { + switch columnType { + case DefinedColumn_INTEGER: + return otsprotocol.DefinedColumnType_DCT_INTEGER + case DefinedColumn_DOUBLE: + return otsprotocol.DefinedColumnType_DCT_DOUBLE + case DefinedColumn_BOOLEAN: + return otsprotocol.DefinedColumnType_DCT_BOOLEAN + case DefinedColumn_STRING: + return otsprotocol.DefinedColumnType_DCT_STRING + default: + return otsprotocol.DefinedColumnType_DCT_BLOB + } +} + +func (loType *LogicalOperator) ConvertToPbLoType() otsprotocol.LogicalOperator { + switch *loType { + case LO_NOT: + return otsprotocol.LogicalOperator_LO_NOT + case LO_AND: + return otsprotocol.LogicalOperator_LO_AND + default: + return otsprotocol.LogicalOperator_LO_OR + } +} + +func ConvertToPbCastType(variantType VariantType) *otsprotocol.VariantType { + switch variantType { + case Variant_INTEGER: + return otsprotocol.VariantType_VT_INTEGER.Enum() + case Variant_DOUBLE: + return otsprotocol.VariantType_VT_DOUBLE.Enum() + case Variant_STRING: + return otsprotocol.VariantType_VT_STRING.Enum() + default: + panic("invalid VariantType") + } +} + +func NewValueTransferRule(regex string, vt VariantType) *ValueTransferRule{ + return &ValueTransferRule{Regex: regex, Cast_type: vt} +} + +func NewSingleColumnValueRegexFilter(columnName string, comparator ComparatorType, rule *ValueTransferRule, value interface{}) *SingleColumnCondition { + return &SingleColumnCondition{ColumnName: &columnName, Comparator: &comparator, ColumnValue: value, TransferRule: rule} +} + +func NewSingleColumnValueFilter(condition *SingleColumnCondition) *otsprotocol.SingleColumnValueFilter { + filter := new(otsprotocol.SingleColumnValueFilter) + + comparatorType := condition.Comparator.ConvertToPbComparatorType() + filter.Comparator = &comparatorType + filter.ColumnName = condition.ColumnName + col := NewColumn([]byte(*condition.ColumnName), condition.ColumnValue) + filter.ColumnValue = col.toPlainBufferCell(false).cellValue.writeCellValueWithoutLengthPrefix() + filter.FilterIfMissing = proto.Bool(condition.FilterIfMissing) + filter.LatestVersionOnly = proto.Bool(condition.LatestVersionOnly) + if condition.TransferRule != nil { + filter.ValueTransRule = &otsprotocol.ValueTransferRule{ Regex: proto.String(condition.TransferRule.Regex), CastType: ConvertToPbCastType(condition.TransferRule.Cast_type) } + } + return filter +} + +func NewCompositeFilter(filters []ColumnFilter, lo LogicalOperator) *otsprotocol.CompositeColumnValueFilter { + ccvfilter := new(otsprotocol.CompositeColumnValueFilter) + combinator := lo.ConvertToPbLoType() + ccvfilter.Combinator = &combinator + for _, cf := range filters { + filter := cf.ToFilter() + ccvfilter.SubFilters = append(ccvfilter.SubFilters, filter) + } + + return ccvfilter +} + +func NewPaginationFilter(filter *PaginationFilter) *otsprotocol.ColumnPaginationFilter { + pageFilter := new(otsprotocol.ColumnPaginationFilter) + pageFilter.Offset = proto.Int32(filter.Offset) + pageFilter.Limit = proto.Int32(filter.Limit) + return pageFilter +} + +func (otsClient *TableStoreClient) postReq(req *http.Request, url string) ([]byte, error, string) { + resp, err := otsClient.httpClient.Do(req) + if err != nil { + return nil, err, "" + } + defer resp.Body.Close() + + reqId := getRequestId(resp) + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err, reqId + } + + if (resp.StatusCode >= 200 && resp.StatusCode < 300) == false { + var retErr *OtsError + perr := new(otsprotocol.Error) + errUm := proto.Unmarshal(body, perr) + if errUm != nil { + retErr = rawHttpToOtsError(resp.StatusCode, body, reqId) + } else { + retErr = pbErrToOtsError(perr, reqId) + } + return nil, retErr, reqId + } + + return body, nil, reqId +} + +func rawHttpToOtsError(code int, body []byte, reqId string) *OtsError { + oerr := &OtsError{ + Message: string(body), + RequestId: reqId, + } + if code >= 500 && code < 600 { + oerr.Code = SERVER_UNAVAILABLE + } else { + oerr.Code = OTS_CLIENT_UNKNOWN + } + return oerr +} + +func pbErrToOtsError(pbErr *otsprotocol.Error, reqId string) *OtsError { + return &OtsError{ + Code: pbErr.GetCode(), + Message: pbErr.GetMessage(), + RequestId: reqId, + } +} + +func getRequestId(response *http.Response) string { + if response == nil || response.Header == nil { + return "" + } + + return response.Header.Get(xOtsRequestId) +} + +func buildRowPutChange(primarykey *PrimaryKey, columns []AttributeColumn) *RowPutChange { + row := new(RowPutChange) + row.primaryKey = make([]*PrimaryKeyColumnInner, len(primarykey.PrimaryKeys)) + for i, p := range primarykey.PrimaryKeys { + row.primaryKey[i] = NewPrimaryKeyColumn([]byte(p.ColumnName), p.Value, p.PrimaryKeyOption) + } + + row.columnsToPut = make([]*Column, len(columns)) + for i, p := range columns { + row.columnsToPut[i] = NewColumn([]byte(p.ColumnName), p.Value) + if p.Timestamp != 0 { + row.columnsToPut[i].HasTimestamp = true + row.columnsToPut[i].Timestamp = p.Timestamp + } + } + + return row +} + +func buildRowUpdateChange(primarykey *PrimaryKey, columns []ColumnToUpdate) *RowUpdateChange { + row := new(RowUpdateChange) + row.primaryKey = make([]*PrimaryKeyColumnInner, len(primarykey.PrimaryKeys)) + for i, p := range primarykey.PrimaryKeys { + row.primaryKey[i] = NewPrimaryKeyColumn([]byte(p.ColumnName), p.Value, p.PrimaryKeyOption) + } + + row.columnsToUpdate = make([]*Column, len(columns)) + for i, p := range columns { + row.columnsToUpdate[i] = NewColumn([]byte(p.ColumnName), p.Value) + row.columnsToUpdate[i].HasTimestamp = p.HasTimestamp + row.columnsToUpdate[i].HasType = p.HasType + row.columnsToUpdate[i].Type = p.Type + row.columnsToUpdate[i].Timestamp = p.Timestamp + row.columnsToUpdate[i].IgnoreValue = p.IgnoreValue + } + + return row +} + +func (condition *RowCondition) buildCondition() *otsprotocol.RowExistenceExpectation { + switch condition.RowExistenceExpectation { + case RowExistenceExpectation_IGNORE: + return otsprotocol.RowExistenceExpectation_IGNORE.Enum() + case RowExistenceExpectation_EXPECT_EXIST: + return otsprotocol.RowExistenceExpectation_EXPECT_EXIST.Enum() + case RowExistenceExpectation_EXPECT_NOT_EXIST: + return otsprotocol.RowExistenceExpectation_EXPECT_NOT_EXIST.Enum() + } + + panic(errInvalidInput) +} + +// build primary key for create table, put row, delete row and update row +// value only support int64,string,[]byte or you will get panic +func buildPrimaryKey(primaryKeyName string, value interface{}) *PrimaryKeyColumn { + // Todo: validate the input + return &PrimaryKeyColumn{ColumnName: primaryKeyName, Value: value, PrimaryKeyOption: NONE} +} + +// value only support int64,string,bool,float64,[]byte. other type will get panic +func (rowchange *PutRowChange) AddColumn(columnName string, value interface{}) { + // Todo: validate the input + column := &AttributeColumn{ColumnName: columnName, Value: value} + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (rowchange *PutRowChange) SetReturnPk() { + rowchange.ReturnType = ReturnType(ReturnType_RT_PK) +} + +func (rowchange *UpdateRowChange) SetReturnIncrementValue() { + rowchange.ReturnType = ReturnType(ReturnType_RT_AFTER_MODIFY) +} + +func (rowchange *UpdateRowChange) AppendIncrementColumnToReturn(name string) { + rowchange.ColumnNamesToReturn = append(rowchange.ColumnNamesToReturn, name) +} + +// value only support int64,string,bool,float64,[]byte. other type will get panic +func (rowchange *PutRowChange) AddColumnWithTimestamp(columnName string, value interface{}, timestamp int64) { + // Todo: validate the input + column := &AttributeColumn{ColumnName: columnName, Value: value} + column.Timestamp = timestamp + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (pk *PrimaryKey) AddPrimaryKeyColumn(primaryKeyName string, value interface{}) { + pk.PrimaryKeys = append(pk.PrimaryKeys, buildPrimaryKey(primaryKeyName, value)) +} + +func (pk *PrimaryKey) AddPrimaryKeyColumnWithAutoIncrement(primaryKeyName string) { + pk.PrimaryKeys = append(pk.PrimaryKeys, &PrimaryKeyColumn{ColumnName: primaryKeyName, PrimaryKeyOption: AUTO_INCREMENT}) +} + +func (pk *PrimaryKey) AddPrimaryKeyColumnWithMinValue(primaryKeyName string) { + pk.PrimaryKeys = append(pk.PrimaryKeys, &PrimaryKeyColumn{ColumnName: primaryKeyName, PrimaryKeyOption: MIN}) +} + +// Only used for range query +func (pk *PrimaryKey) AddPrimaryKeyColumnWithMaxValue(primaryKeyName string) { + pk.PrimaryKeys = append(pk.PrimaryKeys, &PrimaryKeyColumn{ColumnName: primaryKeyName, PrimaryKeyOption: MAX}) +} + +func (rowchange *PutRowChange) SetCondition(rowExistenceExpectation RowExistenceExpectation) { + rowchange.Condition = &RowCondition{RowExistenceExpectation: rowExistenceExpectation} +} + +func (rowchange *DeleteRowChange) SetCondition(rowExistenceExpectation RowExistenceExpectation) { + rowchange.Condition = &RowCondition{RowExistenceExpectation: rowExistenceExpectation} +} + +func (Criteria *SingleRowQueryCriteria) SetFilter(filter ColumnFilter) { + Criteria.Filter = filter +} + +func (Criteria *MultiRowQueryCriteria) SetFilter(filter ColumnFilter) { + Criteria.Filter = filter +} + +func NewSingleColumnCondition(columnName string, comparator ComparatorType, value interface{}) *SingleColumnCondition { + return &SingleColumnCondition{ColumnName: &columnName, Comparator: &comparator, ColumnValue: value} +} + +func NewCompositeColumnCondition(lo LogicalOperator) *CompositeColumnValueFilter { + return &CompositeColumnValueFilter{Operator: lo} +} + +func (rowchange *PutRowChange) SetColumnCondition(condition ColumnFilter) { + rowchange.Condition.ColumnCondition = condition +} + +func (rowchange *UpdateRowChange) SetCondition(rowExistenceExpectation RowExistenceExpectation) { + rowchange.Condition = &RowCondition{RowExistenceExpectation: rowExistenceExpectation} +} + +func (rowchange *UpdateRowChange) SetColumnCondition(condition ColumnFilter) { + rowchange.Condition.ColumnCondition = condition +} + +func (rowchange *DeleteRowChange) SetColumnCondition(condition ColumnFilter) { + rowchange.Condition.ColumnCondition = condition +} + +func (meta *TableMeta) AddPrimaryKeyColumn(name string, keyType PrimaryKeyType) { + meta.SchemaEntry = append(meta.SchemaEntry, &PrimaryKeySchema{Name: &name, Type: &keyType}) +} + +func (meta *TableMeta) AddPrimaryKeyColumnOption(name string, keyType PrimaryKeyType, keyOption PrimaryKeyOption) { + meta.SchemaEntry = append(meta.SchemaEntry, &PrimaryKeySchema{Name: &name, Type: &keyType, Option: &keyOption}) +} + +// value only support int64,string,bool,float64,[]byte. other type will get panic +func (rowchange *UpdateRowChange) PutColumn(columnName string, value interface{}) { + // Todo: validate the input + column := &ColumnToUpdate{ColumnName: columnName, Value: value} + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (rowchange *UpdateRowChange) DeleteColumn(columnName string) { + // Todo: validate the input + column := &ColumnToUpdate{ColumnName: columnName, Value: nil, Type: DELETE_ALL_VERSION, HasType: true, IgnoreValue: true} + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (rowchange *UpdateRowChange) DeleteColumnWithTimestamp(columnName string, timestamp int64) { + // Todo: validate the input + column := &ColumnToUpdate{ColumnName: columnName, Value: nil, Type: DELETE_ONE_VERSION, HasType: true, HasTimestamp: true, Timestamp: timestamp, IgnoreValue: true} + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (rowchange *UpdateRowChange) IncrementColumn(columnName string, value int64) { + // Todo: validate the input + column := &ColumnToUpdate{ColumnName: columnName, Value: value, Type: INCREMENT, HasType: true, IgnoreValue: false} + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (rowchange *DeleteRowChange) Serialize() []byte { + return rowchange.PrimaryKey.Build(true) +} + +func (rowchange *PutRowChange) Serialize() []byte { + row := buildRowPutChange(rowchange.PrimaryKey, rowchange.Columns) + return row.Build() +} + +func (rowchange *UpdateRowChange) Serialize() []byte { + row := buildRowUpdateChange(rowchange.PrimaryKey, rowchange.Columns) + return row.Build() +} + +func (rowchange *DeleteRowChange) GetTableName() string { + return rowchange.TableName +} + +func (rowchange *PutRowChange) GetTableName() string { + return rowchange.TableName +} + +func (rowchange *UpdateRowChange) GetTableName() string { + return rowchange.TableName +} + +func (rowchange *DeleteRowChange) getOperationType() otsprotocol.OperationType { + return otsprotocol.OperationType_DELETE +} + +func (rowchange *PutRowChange) getOperationType() otsprotocol.OperationType { + return otsprotocol.OperationType_PUT +} + +func (rowchange *UpdateRowChange) getOperationType() otsprotocol.OperationType { + return otsprotocol.OperationType_UPDATE +} + +func (rowchange *DeleteRowChange) getCondition() *otsprotocol.Condition { + condition := new(otsprotocol.Condition) + condition.RowExistence = rowchange.Condition.buildCondition() + if rowchange.Condition.ColumnCondition != nil { + condition.ColumnCondition = rowchange.Condition.ColumnCondition.Serialize() + } + return condition +} + +func (rowchange *UpdateRowChange) getCondition() *otsprotocol.Condition { + condition := new(otsprotocol.Condition) + condition.RowExistence = rowchange.Condition.buildCondition() + if rowchange.Condition.ColumnCondition != nil { + condition.ColumnCondition = rowchange.Condition.ColumnCondition.Serialize() + } + return condition +} + +func (rowchange *PutRowChange) getCondition() *otsprotocol.Condition { + condition := new(otsprotocol.Condition) + condition.RowExistence = rowchange.Condition.buildCondition() + if rowchange.Condition.ColumnCondition != nil { + condition.ColumnCondition = rowchange.Condition.ColumnCondition.Serialize() + } + return condition +} + +func (request *BatchWriteRowRequest) AddRowChange(change RowChange) { + if request.RowChangesGroupByTable == nil { + request.RowChangesGroupByTable = make(map[string][]RowChange) + } + request.RowChangesGroupByTable[change.GetTableName()] = append(request.RowChangesGroupByTable[change.GetTableName()], change) +} + +func (direction Direction) ToDirection() otsprotocol.Direction { + if direction == FORWARD { + return otsprotocol.Direction_FORWARD + } else { + return otsprotocol.Direction_BACKWARD + } +} + +func (columnMap *ColumnMap) GetRange(start int, count int) ([]*AttributeColumn, error) { + columns := []*AttributeColumn{} + + end := start + count + if len(columnMap.columnsKey) < end { + return nil, fmt.Errorf("invalid arugment") + } + + for i := start; i < end; i++ { + subColumns := columnMap.Columns[columnMap.columnsKey[i]] + for _, column := range subColumns { + columns = append(columns, column) + } + } + + return columns, nil +} + +func (response *GetRowResponse) GetColumnMap() *ColumnMap { + if response == nil { + return nil + } + if response.columnMap != nil { + return response.columnMap + } else { + response.columnMap = &ColumnMap{} + response.columnMap.Columns = make(map[string][]*AttributeColumn) + + if len(response.Columns) == 0 { + return response.columnMap + } else { + for _, column := range response.Columns { + if _, ok := response.columnMap.Columns[column.ColumnName]; ok { + response.columnMap.Columns[column.ColumnName] = append(response.columnMap.Columns[column.ColumnName], column) + } else { + response.columnMap.columnsKey = append(response.columnMap.columnsKey, column.ColumnName) + value := []*AttributeColumn{} + value = append(value, column) + response.columnMap.Columns[column.ColumnName] = value + } + } + + sort.Strings(response.columnMap.columnsKey) + return response.columnMap + } + } +} + +func Assert(cond bool, msg string) { + if !cond { + panic(msg) + } +} + +func (meta *TableMeta) AddDefinedColumn(name string, definedType DefinedColumnType) { + meta.DefinedColumns = append(meta.DefinedColumns, &DefinedColumnSchema{Name: name, ColumnType: definedType}) +} + +func (meta *IndexMeta) AddDefinedColumn(name string) { + meta.DefinedColumns = append(meta.DefinedColumns, name) +} + +func (meta *IndexMeta) AddPrimaryKeyColumn(name string) { + meta.Primarykey = append(meta.Primarykey, name) +} + +func (request *CreateTableRequest) AddIndexMeta(meta *IndexMeta) { + request.IndexMetas = append(request.IndexMetas, meta) +} + +func (meta *IndexMeta) ConvertToPbIndexMeta() *otsprotocol.IndexMeta { + return &otsprotocol.IndexMeta { + Name: &meta.IndexName, + PrimaryKey: meta.Primarykey, + DefinedColumn: meta.DefinedColumns, + IndexUpdateMode: otsprotocol.IndexUpdateMode_IUM_ASYNC_INDEX.Enum(), + IndexType: otsprotocol.IndexType_IT_GLOBAL_INDEX.Enum(), + } +} + +func ConvertPbIndexTypeToIndexType(indexType *otsprotocol.IndexType) IndexType { + switch *indexType { + case otsprotocol.IndexType_IT_GLOBAL_INDEX: + return IT_GLOBAL_INDEX + default: + return IT_LOCAL_INDEX + } +} +func ConvertPbIndexMetaToIndexMeta(meta *otsprotocol.IndexMeta) *IndexMeta { + indexmeta := &IndexMeta { + IndexName: *meta.Name, + IndexType: ConvertPbIndexTypeToIndexType(meta.IndexType), + } + + for _, pk := range meta.PrimaryKey { + indexmeta.Primarykey = append(indexmeta.Primarykey, pk) + } + + for _, col := range meta.DefinedColumn { + indexmeta.DefinedColumns = append(indexmeta.DefinedColumns, col) + } + + return indexmeta +} diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 000000000000..955dc0be5fa6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 000000000000..15556530a854 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,4 @@ +/vendor +/bug_test.go +/coverage.txt +/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 000000000000..449e67cd01ac --- /dev/null +++ b/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.8.x + - 1.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 000000000000..c8a9fbb3871b --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 000000000000..313a0f887b6f --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 000000000000..2cf4f5ab28e9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 000000000000..54d5afe9576d --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,91 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) + +``` +Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com +``` + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --- | --- | --- | --- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +* [thockin](https://github.com/thockin) +* [mattn](https://github.com/mattn) +* [cch123](https://github.com/cch123) +* [Oleg Shaldybin](https://github.com/olegshaldybin) +* [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 000000000000..e674d0f397ed --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 000000000000..daecfed615e7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,321 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 000000000000..0449e9aa428a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 000000000000..9452324af5b1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 000000000000..35fdb09497fa --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 000000000000..1b56f399150d --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 000000000000..c440d72b6d3a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 000000000000..1d859eac3274 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 000000000000..d04cb54c11c1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 000000000000..9d1e901a66ad --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 000000000000..c44ef5c989a4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 000000000000..a4b93c78c822 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 000000000000..656bbd33d7ee --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 000000000000..7df2fce33ba9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 000000000000..b45ef688313e --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 000000000000..8c58fcba5922 --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + iter.Read() + if iter.Error != nil { + stream.WriteRaw("null") + } else { + cfg.ReturnIterator(iter) + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 000000000000..3095662b0610 --- /dev/null +++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 000000000000..95ae54fbfe4d --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,322 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 000000000000..6188cb4577ab --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,58 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + return false + } + return true + } + return true + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 000000000000..4f883c0959f7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,347 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 000000000000..2142320355e8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,345 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 000000000000..1c575767130d --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,251 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 000000000000..f58beb9137bf --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,129 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +type captureBuffer struct { + startedAt int + captured []byte +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = make([]byte, 0, 32) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + if len(captured) == 0 { + copied := make([]byte, len(remaining)) + copy(copied, remaining) + return copied + } + captured = append(captured, remaining...) + return captured +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 000000000000..8fcdc3b69bdf --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,144 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + case ']': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + case '}': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 000000000000..f67bc2e83151 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,89 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import "fmt" + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + iter.ReadFloat32() + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 000000000000..adc487ea8048 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 000000000000..c2934f916eb3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 000000000000..e2389b56cfff --- /dev/null +++ b/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 000000000000..4459e203fb85 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,332 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 000000000000..13a0b7b0878c --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 000000000000..8b6bc8b43328 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 000000000000..04f68756bf35 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag { + continue + } + tagParts := strings.Split(tag, ",") + if tag == "-" { + continue + } + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 000000000000..98d45c1ec255 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 000000000000..f2619936c88f --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,60 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 000000000000..7f66a88b043d --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,326 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(textMarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textMarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + if c != '"' { + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + subStream.buf = make([]byte, 0, 64) + key, elem := mapIter.UnsafeNext() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer() + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer(), + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 000000000000..58ac959ad8bf --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,218 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 000000000000..9042eb0cb989 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,451 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 000000000000..43ec71d6dadf --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,133 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { + return &dereferenceDecoder{elemType, decoder} + } + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 000000000000..9441d79df33b --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 000000000000..355d2d116b4c --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1048 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + msg := "found unknown field: " + field + if decoder.disallowUnknownFields { + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 000000000000..d0759cf6418c --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,210 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 000000000000..17662fdedcb5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + n, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[n:] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) + stream.Flush() +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 000000000000..f318d2c59da3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,94 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 000000000000..d1059ee4c20e --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 000000000000..54c2ba0b3a2d --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML