diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 9cb07172d2ae..1f257c343eb3 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -1456,6 +1456,10 @@
{
"ImportPath": "speter.net/go/exp/math/dec/inf",
"Rev": "42ca6cd68aa922bc3f32f1e056e61b65945d9ad7"
+ },
+ {
+ "ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
+ "Rev": "97d9593768bbbbd316f9c055dfc5f780933cd7fc"
}
]
}
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.gitignore b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.gitignore
new file mode 100644
index 000000000000..c4c1f537bbef
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.gitignore
@@ -0,0 +1,29 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+# Editor swap files
+*.swp
+*~
+.DS_Store
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.travis.yml b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.travis.yml
new file mode 100644
index 000000000000..e6fabccbc588
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/.travis.yml
@@ -0,0 +1,19 @@
+sudo: false
+
+language: go
+
+before_script:
+ - go get -u golang.org/x/tools/cmd/vet
+ - go get -u github.com/golang/lint/golint
+
+go: tip
+script:
+ - test -z "$(gofmt -s -l -w management | tee /dev/stderr)"
+ - test -z "$(gofmt -s -l -w storage | tee /dev/stderr)"
+ - go build -v ./...
+ - go test -v ./storage/... -check.v
+ - test -z "$(golint ./storage/... | tee /dev/stderr)"
+ - go vet ./storage/...
+ - go test -v ./management/...
+ - test -z "$(golint ./management/... | grep -v 'should have comment' | grep -v 'stutters' | tee /dev/stderr)"
+ - go vet ./management/...
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/LICENSE b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/LICENSE
new file mode 100644
index 000000000000..d64569567334
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/README.md b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/README.md
new file mode 100644
index 000000000000..13d548570e4a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/README.md
@@ -0,0 +1,88 @@
+# Microsoft Azure SDK for Go
+
+This project provides various Go packages to perform operations
+on Microsoft Azure REST APIs.
+
+[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) [![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go)
+
+See list of implemented API clients [here](http://godoc.org/github.com/Azure/azure-sdk-for-go).
+
+> **NOTE:** This repository is under heavy ongoing development and
+is likely to break over time. We currently do not have any releases
+yet. If you are planning to use the repository, please consider vendoring
+the packages in your project and update them when a stable tag is out.
+
+# Installation
+
+ go get -d github.com/Azure/azure-sdk-for-go/management
+
+# Usage
+
+Read Godoc of the repository at: http://godoc.org/github.com/Azure/azure-sdk-for-go/
+
+The client currently supports authentication to the Service Management
+API with certificates or Azure `.publishSettings` file. You can
+download the `.publishSettings` file for your subscriptions
+[here](https://manage.windowsazure.com/publishsettings).
+
+### Example: Creating a Linux Virtual Machine
+
+```go
+package main
+
+import (
+ "encoding/base64"
+ "fmt"
+
+ "github.com/Azure/azure-sdk-for-go/management"
+ "github.com/Azure/azure-sdk-for-go/management/hostedservice"
+ "github.com/Azure/azure-sdk-for-go/management/virtualmachine"
+ "github.com/Azure/azure-sdk-for-go/management/vmutils"
+)
+
+func main() {
+ dnsName := "test-vm-from-go"
+ storageAccount := "mystorageaccount"
+ location := "West US"
+ vmSize := "Small"
+ vmImage := "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB"
+ userName := "testuser"
+ userPassword := "Test123"
+
+ client, err := management.ClientFromPublishSettingsFile("path/to/downloaded.publishsettings", "")
+ if err != nil {
+ panic(err)
+ }
+
+ // create hosted service
+ if err := hostedservice.NewClient(client).CreateHostedService(hostedservice.CreateHostedServiceParameters{
+ ServiceName: dnsName,
+ Location: location,
+ Label: base64.StdEncoding.EncodeToString([]byte(dnsName))}); err != nil {
+ panic(err)
+ }
+
+ // create virtual machine
+ role := vmutils.NewVMConfiguration(dnsName, vmSize)
+ vmutils.ConfigureDeploymentFromPlatformImage(
+ &role,
+ vmImage,
+ fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", storageAccount, dnsName),
+ "")
+ vmutils.ConfigureForLinux(&role, dnsName, userName, userPassword)
+ vmutils.ConfigureWithPublicSSH(&role)
+
+ operationID, err := virtualmachine.NewClient(client).
+ CreateDeployment(role, dnsName, virtualmachine.CreateDeploymentOptions{})
+ if err != nil {
+ panic(err)
+ }
+ if err := client.WaitForOperation(operationID, nil); err != nil {
+ panic(err)
+ }
+}
+```
+
+# License
+
+This project is published under [Apache 2.0 License](LICENSE).
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob.go b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob.go
new file mode 100644
index 000000000000..a6e2eb8ab6df
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob.go
@@ -0,0 +1,858 @@
+package storage
+
+import (
+ "bytes"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// BlobStorageClient contains operations for Microsoft Azure Blob Storage
+// Service.
+type BlobStorageClient struct {
+ client Client
+}
+
+// A Container is an entry in ContainerListResponse.
+type Container struct {
+ Name string `xml:"Name"`
+ Properties ContainerProperties `xml:"Properties"`
+ // TODO (ahmetalpbalkan) Metadata
+}
+
+// ContainerProperties contains various properties of a container returned from
+// various endpoints like ListContainers.
+type ContainerProperties struct {
+ LastModified string `xml:"Last-Modified"`
+ Etag string `xml:"Etag"`
+ LeaseStatus string `xml:"LeaseStatus"`
+ LeaseState string `xml:"LeaseState"`
+ LeaseDuration string `xml:"LeaseDuration"`
+ // TODO (ahmetalpbalkan) remaining fields
+}
+
+// ContainerListResponse contains the response fields from
+// ListContainers call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
+type ContainerListResponse struct {
+ XMLName xml.Name `xml:"EnumerationResults"`
+ Xmlns string `xml:"xmlns,attr"`
+ Prefix string `xml:"Prefix"`
+ Marker string `xml:"Marker"`
+ NextMarker string `xml:"NextMarker"`
+ MaxResults int64 `xml:"MaxResults"`
+ Containers []Container `xml:"Containers>Container"`
+}
+
+// A Blob is an entry in BlobListResponse.
+type Blob struct {
+ Name string `xml:"Name"`
+ Properties BlobProperties `xml:"Properties"`
+ // TODO (ahmetalpbalkan) Metadata
+}
+
+// BlobProperties contains various properties of a blob
+// returned in various endpoints like ListBlobs or GetBlobProperties.
+type BlobProperties struct {
+ LastModified string `xml:"Last-Modified"`
+ Etag string `xml:"Etag"`
+ ContentMD5 string `xml:"Content-MD5"`
+ ContentLength int64 `xml:"Content-Length"`
+ ContentType string `xml:"Content-Type"`
+ ContentEncoding string `xml:"Content-Encoding"`
+ BlobType BlobType `xml:"x-ms-blob-blob-type"`
+ SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
+ CopyID string `xml:"CopyId"`
+ CopyStatus string `xml:"CopyStatus"`
+ CopySource string `xml:"CopySource"`
+ CopyProgress string `xml:"CopyProgress"`
+ CopyCompletionTime string `xml:"CopyCompletionTime"`
+ CopyStatusDescription string `xml:"CopyStatusDescription"`
+}
+
+// BlobListResponse contains the response fields from ListBlobs call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
+type BlobListResponse struct {
+ XMLName xml.Name `xml:"EnumerationResults"`
+ Xmlns string `xml:"xmlns,attr"`
+ Prefix string `xml:"Prefix"`
+ Marker string `xml:"Marker"`
+ NextMarker string `xml:"NextMarker"`
+ MaxResults int64 `xml:"MaxResults"`
+ Blobs []Blob `xml:"Blobs>Blob"`
+}
+
+// ListContainersParameters defines the set of customizable parameters to make a
+// List Containers call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
+type ListContainersParameters struct {
+ Prefix string
+ Marker string
+ Include string
+ MaxResults uint
+ Timeout uint
+}
+
+func (p ListContainersParameters) getParameters() url.Values {
+ out := url.Values{}
+
+ if p.Prefix != "" {
+ out.Set("prefix", p.Prefix)
+ }
+ if p.Marker != "" {
+ out.Set("marker", p.Marker)
+ }
+ if p.Include != "" {
+ out.Set("include", p.Include)
+ }
+ if p.MaxResults != 0 {
+ out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
+ }
+ if p.Timeout != 0 {
+ out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
+ }
+
+ return out
+}
+
+// ListBlobsParameters defines the set of customizable
+// parameters to make a List Blobs call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
+type ListBlobsParameters struct {
+ Prefix string
+ Delimiter string
+ Marker string
+ Include string
+ MaxResults uint
+ Timeout uint
+}
+
+func (p ListBlobsParameters) getParameters() url.Values {
+ out := url.Values{}
+
+ if p.Prefix != "" {
+ out.Set("prefix", p.Prefix)
+ }
+ if p.Delimiter != "" {
+ out.Set("delimiter", p.Delimiter)
+ }
+ if p.Marker != "" {
+ out.Set("marker", p.Marker)
+ }
+ if p.Include != "" {
+ out.Set("include", p.Include)
+ }
+ if p.MaxResults != 0 {
+ out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults))
+ }
+ if p.Timeout != 0 {
+ out.Set("timeout", fmt.Sprintf("%v", p.Timeout))
+ }
+
+ return out
+}
+
+// BlobType defines the type of the Azure Blob.
+type BlobType string
+
+// Types of page blobs
+const (
+ BlobTypeBlock BlobType = "BlockBlob"
+ BlobTypePage BlobType = "PageBlob"
+)
+
+// PageWriteType defines the type updates that are going to be
+// done on the page blob.
+type PageWriteType string
+
+// Types of operations on page blobs
+const (
+ PageWriteTypeUpdate PageWriteType = "update"
+ PageWriteTypeClear PageWriteType = "clear"
+)
+
+const (
+ blobCopyStatusPending = "pending"
+ blobCopyStatusSuccess = "success"
+ blobCopyStatusAborted = "aborted"
+ blobCopyStatusFailed = "failed"
+)
+
+// BlockListType is used to filter out types of blocks in a Get Blocks List call
+// for a block blob.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
+// block types.
+type BlockListType string
+
+// Filters for listing blocks in block blobs
+const (
+ BlockListTypeAll BlockListType = "all"
+ BlockListTypeCommitted BlockListType = "committed"
+ BlockListTypeUncommitted BlockListType = "uncommitted"
+)
+
+// ContainerAccessType defines the access level to the container from a public
+// request.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
+// blob-public-access" header.
+type ContainerAccessType string
+
+// Access options for containers
+const (
+ ContainerAccessTypePrivate ContainerAccessType = ""
+ ContainerAccessTypeBlob ContainerAccessType = "blob"
+ ContainerAccessTypeContainer ContainerAccessType = "container"
+)
+
+// Maximum sizes (per REST API) for various concepts
+const (
+ MaxBlobBlockSize = 4 * 1024 * 1024
+ MaxBlobPageSize = 4 * 1024 * 1024
+)
+
+// BlockStatus defines states a block for a block blob can
+// be in.
+type BlockStatus string
+
+// List of statuses that can be used to refer to a block in a block list
+const (
+ BlockStatusUncommitted BlockStatus = "Uncommitted"
+ BlockStatusCommitted BlockStatus = "Committed"
+ BlockStatusLatest BlockStatus = "Latest"
+)
+
+// Block is used to create Block entities for Put Block List
+// call.
+type Block struct {
+ ID string
+ Status BlockStatus
+}
+
+// BlockListResponse contains the response fields from Get Block List call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
+type BlockListResponse struct {
+ XMLName xml.Name `xml:"BlockList"`
+ CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
+ UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"`
+}
+
+// BlockResponse contains the block information returned
+// in the GetBlockListCall.
+type BlockResponse struct {
+ Name string `xml:"Name"`
+ Size int64 `xml:"Size"`
+}
+
+// GetPageRangesResponse contains the reponse fields from
+// Get Page Ranges call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
+type GetPageRangesResponse struct {
+ XMLName xml.Name `xml:"PageList"`
+ PageList []PageRange `xml:"PageRange"`
+}
+
+// PageRange contains information about a page of a page blob from
+// Get Pages Range call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
+type PageRange struct {
+ Start int64 `xml:"Start"`
+ End int64 `xml:"End"`
+}
+
+var (
+ errBlobCopyAborted = errors.New("storage: blob copy is aborted")
+ errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
+)
+
+// ListContainers returns the list of containers in a storage account along with
+// pagination token and other response details.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
+func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) {
+ q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
+ uri := b.client.getEndpoint(blobServiceName, "", q)
+ headers := b.client.getStandardHeaders()
+
+ var out ContainerListResponse
+ resp, err := b.client.exec("GET", uri, headers, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.body.Close()
+
+ err = xmlUnmarshal(resp.body, &out)
+ return out, err
+}
+
+// CreateContainer creates a blob container within the storage account
+// with given name and access level. Returns error if container already exists.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
+func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error {
+ resp, err := b.createContainer(name, access)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// CreateContainerIfNotExists creates a blob container if it does not exist. Returns
+// true if container is newly created or false if container already exists.
+func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) {
+ resp, err := b.createContainer(name, access)
+ if resp != nil {
+ defer resp.body.Close()
+ if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
+ return resp.statusCode == http.StatusCreated, nil
+ }
+ }
+ return false, err
+}
+
+func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) {
+ verb := "PUT"
+ uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
+
+ headers := b.client.getStandardHeaders()
+ headers["Content-Length"] = "0"
+ if access != "" {
+ headers["x-ms-blob-public-access"] = string(access)
+ }
+ return b.client.exec(verb, uri, headers, nil)
+}
+
+// ContainerExists returns true if a container with given name exists
+// on the storage account, otherwise returns false.
+func (b BlobStorageClient) ContainerExists(name string) (bool, error) {
+ verb := "HEAD"
+ uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
+ headers := b.client.getStandardHeaders()
+
+ resp, err := b.client.exec(verb, uri, headers, nil)
+ if resp != nil {
+ defer resp.body.Close()
+ if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusOK, nil
+ }
+ }
+ return false, err
+}
+
+// DeleteContainer deletes the container with given name on the storage
+// account. If the container does not exist returns error.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
+func (b BlobStorageClient) DeleteContainer(name string) error {
+ resp, err := b.deleteContainer(name)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
+}
+
+// DeleteContainerIfExists deletes the container with given name on the storage
+// account if it exists. Returns true if container is deleted with this call, or
+// false if the container did not exist at the time of the Delete Container
+// operation.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
+func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) {
+ resp, err := b.deleteContainer(name)
+ if resp != nil {
+ defer resp.body.Close()
+ if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusAccepted, nil
+ }
+ }
+ return false, err
+}
+
+func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) {
+ verb := "DELETE"
+ uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}})
+
+ headers := b.client.getStandardHeaders()
+ return b.client.exec(verb, uri, headers, nil)
+}
+
+// ListBlobs returns an object that contains list of blobs in the container,
+// pagination token and other information in the response of List Blobs call.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
+func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) {
+ q := mergeParams(params.getParameters(), url.Values{
+ "restype": {"container"},
+ "comp": {"list"}})
+ uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q)
+ headers := b.client.getStandardHeaders()
+
+ var out BlobListResponse
+ resp, err := b.client.exec("GET", uri, headers, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.body.Close()
+
+ err = xmlUnmarshal(resp.body, &out)
+ return out, err
+}
+
+// BlobExists returns true if a blob with given name exists on the specified
+// container of the storage account.
+func (b BlobStorageClient) BlobExists(container, name string) (bool, error) {
+ verb := "HEAD"
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+
+ headers := b.client.getStandardHeaders()
+ resp, err := b.client.exec(verb, uri, headers, nil)
+ if resp != nil {
+ defer resp.body.Close()
+ if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
+ return resp.statusCode == http.StatusOK, nil
+ }
+ }
+ return false, err
+}
+
+// GetBlobURL gets the canonical URL to the blob with the specified name in the
+// specified container. This method does not create a publicly accessible URL if
+// the blob or container is private and this method does not check if the blob
+// exists.
+func (b BlobStorageClient) GetBlobURL(container, name string) string {
+ if container == "" {
+ container = "$root"
+ }
+ return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+}
+
+// GetBlob returns a stream to read the blob. Caller must call Close() the
+// reader to close on the underlying connection.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
+func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) {
+ resp, err := b.getBlobRange(container, name, "")
+ if err != nil {
+ return nil, err
+ }
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+// GetBlobRange reads the specified range of a blob to a stream. The bytesRange
+// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
+func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) {
+ resp, err := b.getBlobRange(container, name, bytesRange)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil {
+ return nil, err
+ }
+ return resp.body, nil
+}
+
+func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*storageResponse, error) {
+ verb := "GET"
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+
+ headers := b.client.getStandardHeaders()
+ if bytesRange != "" {
+ headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange)
+ }
+ resp, err := b.client.exec(verb, uri, headers, nil)
+ if err != nil {
+ return nil, err
+ }
+ return resp, err
+}
+
+// GetBlobProperties provides various information about the specified
+// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
+func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) {
+ verb := "HEAD"
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+
+ headers := b.client.getStandardHeaders()
+ resp, err := b.client.exec(verb, uri, headers, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.body.Close()
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return nil, err
+ }
+
+ var contentLength int64
+ contentLengthStr := resp.headers.Get("Content-Length")
+ if contentLengthStr != "" {
+ contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var sequenceNum int64
+ sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number")
+ if sequenceNumStr != "" {
+ sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &BlobProperties{
+ LastModified: resp.headers.Get("Last-Modified"),
+ Etag: resp.headers.Get("Etag"),
+ ContentMD5: resp.headers.Get("Content-MD5"),
+ ContentLength: contentLength,
+ ContentEncoding: resp.headers.Get("Content-Encoding"),
+ SequenceNumber: sequenceNum,
+ CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"),
+ CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"),
+ CopyID: resp.headers.Get("x-ms-copy-id"),
+ CopyProgress: resp.headers.Get("x-ms-copy-progress"),
+ CopySource: resp.headers.Get("x-ms-copy-source"),
+ CopyStatus: resp.headers.Get("x-ms-copy-status"),
+ BlobType: BlobType(resp.headers.Get("x-ms-blob-type")),
+ }, nil
+}
+
+// CreateBlockBlob initializes an empty block blob with no blocks.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx
+func (b BlobStorageClient) CreateBlockBlob(container, name string) error {
+ path := fmt.Sprintf("%s/%s", container, name)
+ uri := b.client.getEndpoint(blobServiceName, path, url.Values{})
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypeBlock)
+ headers["Content-Length"] = fmt.Sprintf("%v", 0)
+
+ resp, err := b.client.exec("PUT", uri, headers, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// PutBlock saves the given data chunk to the specified block blob with
+// given ID.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
+func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error {
+ return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk))
+}
+
+// PutBlockWithLength saves the given data stream of exactly specified size to
+// the block blob with given ID. It is an alternative to PutBlocks where data
+// comes as stream but the length is known in advance.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
+func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader) error {
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}})
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypeBlock)
+ headers["Content-Length"] = fmt.Sprintf("%v", size)
+
+ resp, err := b.client.exec("PUT", uri, headers, blob)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// PutBlockList saves list of blocks to the specified block blob.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx
+func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error {
+ blockListXML := prepareBlockListRequest(blocks)
+
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}})
+ headers := b.client.getStandardHeaders()
+ headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
+
+ resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML))
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// GetBlockList retrieves list of blocks in the specified block blob.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
+func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) {
+ params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}}
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
+ headers := b.client.getStandardHeaders()
+
+ var out BlockListResponse
+ resp, err := b.client.exec("GET", uri, headers, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.body.Close()
+
+ err = xmlUnmarshal(resp.body, &out)
+ return out, err
+}
+
+// PutPageBlob initializes an empty page blob with specified name and maximum
+// size in bytes (size must be aligned to a 512-byte boundary). A page blob must
+// be created using this method before writing pages.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx
+func (b BlobStorageClient) PutPageBlob(container, name string, size int64) error {
+ path := fmt.Sprintf("%s/%s", container, name)
+ uri := b.client.getEndpoint(blobServiceName, path, url.Values{})
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypePage)
+ headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size)
+ headers["Content-Length"] = fmt.Sprintf("%v", 0)
+
+ resp, err := b.client.exec("PUT", uri, headers, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// PutPage writes a range of pages to a page blob or clears the given range.
+// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned
+// with 512-byte boundaries and chunk must be of size multiplies by 512.
+//
+// See https://msdn.microsoft.com/en-us/library/ee691975.aspx
+func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte) error {
+ path := fmt.Sprintf("%s/%s", container, name)
+ uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}})
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypePage)
+ headers["x-ms-page-write"] = string(writeType)
+ headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte)
+
+ var contentLength int64
+ var data io.Reader
+ if writeType == PageWriteTypeClear {
+ contentLength = 0
+ data = bytes.NewReader([]byte{})
+ } else {
+ contentLength = int64(len(chunk))
+ data = bytes.NewReader(chunk)
+ }
+ headers["Content-Length"] = fmt.Sprintf("%v", contentLength)
+
+ resp, err := b.client.exec("PUT", uri, headers, data)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// GetPageRanges returns the list of valid page ranges for a page blob.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
+func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) {
+ path := fmt.Sprintf("%s/%s", container, name)
+ uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}})
+ headers := b.client.getStandardHeaders()
+
+ var out GetPageRangesResponse
+ resp, err := b.client.exec("GET", uri, headers, nil)
+ if err != nil {
+ return out, err
+ }
+ defer resp.body.Close()
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
+ return out, err
+ }
+ err = xmlUnmarshal(resp.body, &out)
+ return out, err
+}
+
+// CopyBlob starts a blob copy operation and waits for the operation to
+// complete. sourceBlob parameter must be a canonical URL to the blob (can be
+// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore
+// this helper method works faster on smaller files.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx
+func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error {
+ copyID, err := b.startBlobCopy(container, name, sourceBlob)
+ if err != nil {
+ return err
+ }
+
+ return b.waitForBlobCopy(container, name, copyID)
+}
+
+func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) {
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+
+ headers := b.client.getStandardHeaders()
+ headers["Content-Length"] = "0"
+ headers["x-ms-copy-source"] = sourceBlob
+
+ resp, err := b.client.exec("PUT", uri, headers, nil)
+ if err != nil {
+ return "", err
+ }
+ defer resp.body.Close()
+
+ if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
+ return "", err
+ }
+
+ copyID := resp.headers.Get("x-ms-copy-id")
+ if copyID == "" {
+ return "", errors.New("Got empty copy id header")
+ }
+ return copyID, nil
+}
+
+func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error {
+ for {
+ props, err := b.GetBlobProperties(container, name)
+ if err != nil {
+ return err
+ }
+
+ if props.CopyID != copyID {
+ return errBlobCopyIDMismatch
+ }
+
+ switch props.CopyStatus {
+ case blobCopyStatusSuccess:
+ return nil
+ case blobCopyStatusPending:
+ continue
+ case blobCopyStatusAborted:
+ return errBlobCopyAborted
+ case blobCopyStatusFailed:
+ return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription)
+ default:
+ return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus)
+ }
+ }
+}
+
+// DeleteBlob deletes the given blob from the specified container.
+// If the blob does not exists at the time of the Delete Blob operation, it
+// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx
+func (b BlobStorageClient) DeleteBlob(container, name string) error {
+ resp, err := b.deleteBlob(container, name)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
+}
+
+// DeleteBlobIfExists deletes the given blob from the specified container If the
+// blob is deleted with this call, returns true. Otherwise returns false.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx
+func (b BlobStorageClient) DeleteBlobIfExists(container, name string) (bool, error) {
+ resp, err := b.deleteBlob(container, name)
+ if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) {
+ return resp.statusCode == http.StatusAccepted, nil
+ }
+ defer resp.body.Close()
+ return false, err
+}
+
+func (b BlobStorageClient) deleteBlob(container, name string) (*storageResponse, error) {
+ verb := "DELETE"
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+ headers := b.client.getStandardHeaders()
+
+ return b.client.exec(verb, uri, headers, nil)
+}
+
+// helper method to construct the path to a container given its name
+func pathForContainer(name string) string {
+ return fmt.Sprintf("/%s", name)
+}
+
+// helper method to construct the path to a blob given its container and blob
+// name
+func pathForBlob(container, name string) string {
+ return fmt.Sprintf("/%s/%s", container, name)
+}
+
+// GetBlobSASURI creates an URL to the specified blob which contains the Shared
+// Access Signature with specified permissions and expiration time.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
+func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) {
+ var (
+ signedPermissions = permissions
+ blobURL = b.GetBlobURL(container, name)
+ )
+ canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL)
+ if err != nil {
+ return "", err
+ }
+ signedExpiry := expiry.Format(time.RFC3339)
+ signedResource := "b"
+
+ stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions)
+ if err != nil {
+ return "", err
+ }
+
+ sig := b.client.computeHmac256(stringToSign)
+ sasParams := url.Values{
+ "sv": {b.client.apiVersion},
+ "se": {signedExpiry},
+ "sr": {signedResource},
+ "sp": {signedPermissions},
+ "sig": {sig},
+ }
+
+ sasURL, err := url.Parse(blobURL)
+ if err != nil {
+ return "", err
+ }
+ sasURL.RawQuery = sasParams.Encode()
+ return sasURL.String(), nil
+}
+
+func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) {
+ var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string
+
+ // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
+ if signedVersion >= "2013-08-15" {
+ return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
+ }
+ return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
+}
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go
new file mode 100644
index 000000000000..14a2f6b2ce8f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go
@@ -0,0 +1,625 @@
+package storage
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "sync"
+ "testing"
+ "time"
+
+ chk "gopkg.in/check.v1"
+)
+
+type StorageBlobSuite struct{}
+
+var _ = chk.Suite(&StorageBlobSuite{})
+
+const testContainerPrefix = "zzzztest-"
+
+func getBlobClient(c *chk.C) BlobStorageClient {
+ return getBasicClient(c).GetBlobService()
+}
+
+func (s *StorageBlobSuite) Test_pathForContainer(c *chk.C) {
+ c.Assert(pathForContainer("foo"), chk.Equals, "/foo")
+}
+
+func (s *StorageBlobSuite) Test_pathForBlob(c *chk.C) {
+ c.Assert(pathForBlob("foo", "blob"), chk.Equals, "/foo/blob")
+}
+
+func (s *StorageBlobSuite) Test_blobSASStringToSign(c *chk.C) {
+ _, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP")
+ c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15
+
+ out, err := blobSASStringToSign("2013-08-15", "CS", "SE", "SP")
+ c.Assert(err, chk.IsNil)
+ c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n")
+}
+
+func (s *StorageBlobSuite) TestGetBlobSASURI(c *chk.C) {
+ api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true)
+ c.Assert(err, chk.IsNil)
+ cli := api.GetBlobService()
+ expiry := time.Time{}
+
+ expectedParts := url.URL{
+ Scheme: "https",
+ Host: "foo.blob.core.windows.net",
+ Path: "container/name",
+ RawQuery: url.Values{
+ "sv": {"2013-08-15"},
+ "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="},
+ "sr": {"b"},
+ "sp": {"r"},
+ "se": {"0001-01-01T00:00:00Z"},
+ }.Encode()}
+
+ u, err := cli.GetBlobSASURI("container", "name", expiry, "r")
+ c.Assert(err, chk.IsNil)
+ sasParts, err := url.Parse(u)
+ c.Assert(err, chk.IsNil)
+ c.Assert(expectedParts.String(), chk.Equals, sasParts.String())
+ c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query())
+}
+
+func (s *StorageBlobSuite) TestBlobSASURICorrectness(c *chk.C) {
+ cli := getBlobClient(c)
+ cnt := randContainer()
+ blob := randString(20)
+ body := []byte(randString(100))
+ expiry := time.Now().UTC().Add(time.Hour)
+ permissions := "r"
+
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.DeleteContainer(cnt)
+
+ c.Assert(cli.putSingleBlockBlob(cnt, blob, body), chk.IsNil)
+
+ sasURI, err := cli.GetBlobSASURI(cnt, blob, expiry, permissions)
+ c.Assert(err, chk.IsNil)
+
+ resp, err := http.Get(sasURI)
+ c.Assert(err, chk.IsNil)
+
+ blobResp, err := ioutil.ReadAll(resp.Body)
+ defer resp.Body.Close()
+ c.Assert(err, chk.IsNil)
+
+ c.Assert(resp.StatusCode, chk.Equals, http.StatusOK)
+ c.Assert(len(blobResp), chk.Equals, len(body))
+}
+
+func (s *StorageBlobSuite) TestListContainersPagination(c *chk.C) {
+ cli := getBlobClient(c)
+ c.Assert(deleteTestContainers(cli), chk.IsNil)
+
+ const n = 5
+ const pageSize = 2
+
+ // Create test containers
+ created := []string{}
+ for i := 0; i < n; i++ {
+ name := randContainer()
+ c.Assert(cli.CreateContainer(name, ContainerAccessTypePrivate), chk.IsNil)
+ created = append(created, name)
+ }
+ sort.Strings(created)
+
+ // Defer test container deletions
+ defer func() {
+ var wg sync.WaitGroup
+ for _, cnt := range created {
+ wg.Add(1)
+ go func(name string) {
+ c.Assert(cli.DeleteContainer(name), chk.IsNil)
+ wg.Done()
+ }(cnt)
+ }
+ wg.Wait()
+ }()
+
+ // Paginate results
+ seen := []string{}
+ marker := ""
+ for {
+ resp, err := cli.ListContainers(ListContainersParameters{
+ Prefix: testContainerPrefix,
+ MaxResults: pageSize,
+ Marker: marker})
+ c.Assert(err, chk.IsNil)
+
+ containers := resp.Containers
+ if len(containers) > pageSize {
+ c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(containers))
+ }
+
+ for _, c := range containers {
+ seen = append(seen, c.Name)
+ }
+
+ marker = resp.NextMarker
+ if marker == "" || len(containers) == 0 {
+ break
+ }
+ }
+
+ c.Assert(seen, chk.DeepEquals, created)
+}
+
+func (s *StorageBlobSuite) TestContainerExists(c *chk.C) {
+ cnt := randContainer()
+ cli := getBlobClient(c)
+ ok, err := cli.ContainerExists(cnt)
+ c.Assert(err, chk.IsNil)
+ c.Assert(ok, chk.Equals, false)
+
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
+ defer cli.DeleteContainer(cnt)
+
+ ok, err = cli.ContainerExists(cnt)
+ c.Assert(err, chk.IsNil)
+ c.Assert(ok, chk.Equals, true)
+}
+
+func (s *StorageBlobSuite) TestCreateDeleteContainer(c *chk.C) {
+ cnt := randContainer()
+ cli := getBlobClient(c)
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ c.Assert(cli.DeleteContainer(cnt), chk.IsNil)
+}
+
+func (s *StorageBlobSuite) TestCreateContainerIfNotExists(c *chk.C) {
+ cnt := randContainer()
+ cli := getBlobClient(c)
+
+ // First create
+ ok, err := cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate)
+ c.Assert(err, chk.IsNil)
+ c.Assert(ok, chk.Equals, true)
+
+ // Second create, should not give errors
+ ok, err = cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate)
+ c.Assert(err, chk.IsNil)
+ defer cli.DeleteContainer(cnt)
+ c.Assert(ok, chk.Equals, false)
+}
+
+func (s *StorageBlobSuite) TestDeleteContainerIfExists(c *chk.C) {
+ cnt := randContainer()
+ cli := getBlobClient(c)
+
+ // Nonexisting container
+ c.Assert(cli.DeleteContainer(cnt), chk.NotNil)
+
+ ok, err := cli.DeleteContainerIfExists(cnt)
+ c.Assert(err, chk.IsNil)
+ c.Assert(ok, chk.Equals, false)
+
+ // Existing container
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ ok, err = cli.DeleteContainerIfExists(cnt)
+ c.Assert(err, chk.IsNil)
+ c.Assert(ok, chk.Equals, true)
+}
+
+func (s *StorageBlobSuite) TestBlobExists(c *chk.C) {
+ cnt := randContainer()
+ blob := randString(20)
+ cli := getBlobClient(c)
+
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
+ defer cli.DeleteContainer(cnt)
+ c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil)
+ defer cli.DeleteBlob(cnt, blob)
+
+ ok, err := cli.BlobExists(cnt, blob+".foo")
+ c.Assert(err, chk.IsNil)
+ c.Assert(ok, chk.Equals, false)
+
+ ok, err = cli.BlobExists(cnt, blob)
+ c.Assert(err, chk.IsNil)
+ c.Assert(ok, chk.Equals, true)
+}
+
+func (s *StorageBlobSuite) TestGetBlobURL(c *chk.C) {
+ api, err := NewBasicClient("foo", "YmFy")
+ c.Assert(err, chk.IsNil)
+ cli := api.GetBlobService()
+
+ c.Assert(cli.GetBlobURL("c", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/c/nested/blob")
+ c.Assert(cli.GetBlobURL("", "blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/blob")
+ c.Assert(cli.GetBlobURL("", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/nested/blob")
+}
+
+func (s *StorageBlobSuite) TestBlobCopy(c *chk.C) {
+ if testing.Short() {
+ c.Skip("skipping blob copy in short mode, no SLA on async operation")
+ }
+
+ cli := getBlobClient(c)
+ cnt := randContainer()
+ src := randString(20)
+ dst := randString(20)
+ body := []byte(randString(1024))
+
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.deleteContainer(cnt)
+
+ c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil)
+ defer cli.DeleteBlob(cnt, src)
+
+ c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil)
+ defer cli.DeleteBlob(cnt, dst)
+
+ blobBody, err := cli.GetBlob(cnt, dst)
+ c.Assert(err, chk.IsNil)
+
+ b, err := ioutil.ReadAll(blobBody)
+ defer blobBody.Close()
+ c.Assert(err, chk.IsNil)
+ c.Assert(b, chk.DeepEquals, body)
+}
+
+func (s *StorageBlobSuite) TestDeleteBlobIfExists(c *chk.C) {
+ cnt := randContainer()
+ blob := randString(20)
+
+ cli := getBlobClient(c)
+ c.Assert(cli.DeleteBlob(cnt, blob), chk.NotNil)
+
+ ok, err := cli.DeleteBlobIfExists(cnt, blob)
+ c.Assert(err, chk.IsNil)
+ c.Assert(ok, chk.Equals, false)
+}
+
+func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) {
+ cnt := randContainer()
+ blob := randString(20)
+ contents := randString(64)
+
+ cli := getBlobClient(c)
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.DeleteContainer(cnt)
+
+ // Nonexisting blob
+ _, err := cli.GetBlobProperties(cnt, blob)
+ c.Assert(err, chk.NotNil)
+
+ // Put the blob
+ c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(contents)), chk.IsNil)
+
+ // Get blob properties
+ props, err := cli.GetBlobProperties(cnt, blob)
+ c.Assert(err, chk.IsNil)
+
+ c.Assert(props.ContentLength, chk.Equals, int64(len(contents)))
+ c.Assert(props.BlobType, chk.Equals, BlobTypeBlock)
+}
+
+func (s *StorageBlobSuite) TestListBlobsPagination(c *chk.C) {
+ cli := getBlobClient(c)
+ cnt := randContainer()
+
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.DeleteContainer(cnt)
+
+ blobs := []string{}
+ const n = 5
+ const pageSize = 2
+ for i := 0; i < n; i++ {
+ name := randString(20)
+ c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil)
+ blobs = append(blobs, name)
+ }
+ sort.Strings(blobs)
+
+ // Paginate
+ seen := []string{}
+ marker := ""
+ for {
+ resp, err := cli.ListBlobs(cnt, ListBlobsParameters{
+ MaxResults: pageSize,
+ Marker: marker})
+ c.Assert(err, chk.IsNil)
+
+ for _, v := range resp.Blobs {
+ seen = append(seen, v.Name)
+ }
+
+ marker = resp.NextMarker
+ if marker == "" || len(resp.Blobs) == 0 {
+ break
+ }
+ }
+
+ // Compare
+ c.Assert(seen, chk.DeepEquals, blobs)
+}
+
+func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) {
+ cli := getBlobClient(c)
+ cnt := randContainer()
+
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.deleteContainer(cnt)
+
+ blob := randString(20)
+ c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil)
+
+ props, err := cli.GetBlobProperties(cnt, blob)
+ c.Assert(err, chk.IsNil)
+ c.Assert(props.ContentLength, chk.Not(chk.Equals), 0)
+}
+
+func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) {
+ cnt := randContainer()
+ blob := randString(20)
+ body := "0123456789"
+
+ cli := getBlobClient(c)
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
+ defer cli.DeleteContainer(cnt)
+
+ c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil)
+ defer cli.DeleteBlob(cnt, blob)
+
+ // Read 1-3
+ for _, r := range []struct {
+ rangeStr string
+ expected string
+ }{
+ {"0-", body},
+ {"1-3", body[1 : 3+1]},
+ {"3-", body[3:]},
+ } {
+ resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr)
+ c.Assert(err, chk.IsNil)
+ blobBody, err := ioutil.ReadAll(resp)
+ c.Assert(err, chk.IsNil)
+
+ str := string(blobBody)
+ c.Assert(str, chk.Equals, r.expected)
+ }
+}
+
+func (s *StorageBlobSuite) TestPutBlock(c *chk.C) {
+ cli := getBlobClient(c)
+ cnt := randContainer()
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.deleteContainer(cnt)
+
+ blob := randString(20)
+ chunk := []byte(randString(1024))
+ blockID := base64.StdEncoding.EncodeToString([]byte("foo"))
+ c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil)
+}
+
+func (s *StorageBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) {
+ cli := getBlobClient(c)
+ cnt := randContainer()
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.deleteContainer(cnt)
+
+ blob := randString(20)
+ chunk := []byte(randString(1024))
+ blockID := base64.StdEncoding.EncodeToString([]byte("foo"))
+
+ // Put one block
+ c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil)
+ defer cli.deleteBlob(cnt, blob)
+
+ // Get committed blocks
+ committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted)
+ c.Assert(err, chk.IsNil)
+
+ if len(committed.CommittedBlocks) > 0 {
+ c.Fatal("There are committed blocks")
+ }
+
+ // Get uncommitted blocks
+ uncommitted, err := cli.GetBlockList(cnt, blob, BlockListTypeUncommitted)
+ c.Assert(err, chk.IsNil)
+
+ c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1)
+ // Commit block list
+ c.Assert(cli.PutBlockList(cnt, blob, []Block{{blockID, BlockStatusUncommitted}}), chk.IsNil)
+
+ // Get all blocks
+ all, err := cli.GetBlockList(cnt, blob, BlockListTypeAll)
+ c.Assert(err, chk.IsNil)
+ c.Assert(len(all.CommittedBlocks), chk.Equals, 1)
+ c.Assert(len(all.UncommittedBlocks), chk.Equals, 0)
+
+ // Verify the block
+ thatBlock := all.CommittedBlocks[0]
+ c.Assert(thatBlock.Name, chk.Equals, blockID)
+ c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk)))
+}
+
+func (s *StorageBlobSuite) TestCreateBlockBlob(c *chk.C) {
+ cli := getBlobClient(c)
+ cnt := randContainer()
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.deleteContainer(cnt)
+
+ blob := randString(20)
+ c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil)
+
+ // Verify
+ blocks, err := cli.GetBlockList(cnt, blob, BlockListTypeAll)
+ c.Assert(err, chk.IsNil)
+ c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0)
+ c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0)
+}
+
+func (s *StorageBlobSuite) TestPutPageBlob(c *chk.C) {
+ cli := getBlobClient(c)
+ cnt := randContainer()
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.deleteContainer(cnt)
+
+ blob := randString(20)
+ size := int64(10 * 1024 * 1024)
+ c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
+
+ // Verify
+ props, err := cli.GetBlobProperties(cnt, blob)
+ c.Assert(err, chk.IsNil)
+ c.Assert(props.ContentLength, chk.Equals, size)
+ c.Assert(props.BlobType, chk.Equals, BlobTypePage)
+}
+
+func (s *StorageBlobSuite) TestPutPagesUpdate(c *chk.C) {
+ cli := getBlobClient(c)
+ cnt := randContainer()
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.deleteContainer(cnt)
+
+ blob := randString(20)
+ size := int64(10 * 1024 * 1024) // larger than we'll use
+ c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
+
+ chunk1 := []byte(randString(1024))
+ chunk2 := []byte(randString(512))
+
+ // Append chunks
+ c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1), chk.IsNil)
+ c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2), chk.IsNil)
+
+ // Verify contents
+ out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1))
+ c.Assert(err, chk.IsNil)
+ defer out.Close()
+ blobContents, err := ioutil.ReadAll(out)
+ c.Assert(err, chk.IsNil)
+ c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...))
+ out.Close()
+
+ // Overwrite first half of chunk1
+ chunk0 := []byte(randString(512))
+ c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0), chk.IsNil)
+
+ // Verify contents
+ out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1))
+ c.Assert(err, chk.IsNil)
+ defer out.Close()
+ blobContents, err = ioutil.ReadAll(out)
+ c.Assert(err, chk.IsNil)
+ c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...))
+}
+
+func (s *StorageBlobSuite) TestPutPagesClear(c *chk.C) {
+ cli := getBlobClient(c)
+ cnt := randContainer()
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.deleteContainer(cnt)
+
+ blob := randString(20)
+ size := int64(10 * 1024 * 1024) // larger than we'll use
+ c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
+
+ // Put 0-2047
+ chunk := []byte(randString(2048))
+ c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk), chk.IsNil)
+
+ // Clear 512-1023
+ c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil), chk.IsNil)
+
+ // Verify contents
+ out, err := cli.GetBlobRange(cnt, blob, "0-2047")
+ c.Assert(err, chk.IsNil)
+ contents, err := ioutil.ReadAll(out)
+ c.Assert(err, chk.IsNil)
+ defer out.Close()
+ c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...))
+}
+
+func (s *StorageBlobSuite) TestGetPageRanges(c *chk.C) {
+ cli := getBlobClient(c)
+ cnt := randContainer()
+ c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
+ defer cli.deleteContainer(cnt)
+
+ blob := randString(20)
+ size := int64(10 * 1024 * 1024) // larger than we'll use
+ c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
+
+ // Get page ranges on empty blob
+ out, err := cli.GetPageRanges(cnt, blob)
+ c.Assert(err, chk.IsNil)
+ c.Assert(len(out.PageList), chk.Equals, 0)
+
+ // Add 0-512 page
+ c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512))), chk.IsNil)
+
+ out, err = cli.GetPageRanges(cnt, blob)
+ c.Assert(err, chk.IsNil)
+ c.Assert(len(out.PageList), chk.Equals, 1)
+
+ // Add 1024-2048
+ c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024))), chk.IsNil)
+
+ out, err = cli.GetPageRanges(cnt, blob)
+ c.Assert(err, chk.IsNil)
+ c.Assert(len(out.PageList), chk.Equals, 2)
+}
+
+func deleteTestContainers(cli BlobStorageClient) error {
+ for {
+ resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix})
+ if err != nil {
+ return err
+ }
+ if len(resp.Containers) == 0 {
+ break
+ }
+ for _, c := range resp.Containers {
+ err = cli.DeleteContainer(c.Name)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error {
+ if len(chunk) > MaxBlobBlockSize {
+ return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize)
+ }
+
+ uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
+ headers := b.client.getStandardHeaders()
+ headers["x-ms-blob-type"] = string(BlobTypeBlock)
+ headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
+
+ resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk))
+ if err != nil {
+ return err
+ }
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+func randContainer() string {
+ return testContainerPrefix + randString(32-len(testContainerPrefix))
+}
+
+func randString(n int) string {
+ if n <= 0 {
+ panic("negative number")
+ }
+ const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz"
+ var bytes = make([]byte, n)
+ rand.Read(bytes)
+ for i, b := range bytes {
+ bytes[i] = alphanum[b%byte(len(alphanum))]
+ }
+ return string(bytes)
+}
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client.go b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client.go
new file mode 100644
index 000000000000..6c171050a2f6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client.go
@@ -0,0 +1,359 @@
+// Package storage provides clients for Microsoft Azure Storage Services.
+package storage
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+const (
+ // DefaultBaseURL is the domain name used for storage requests when a
+ // default client is created.
+ DefaultBaseURL = "core.windows.net"
+
+ // DefaultAPIVersion is the Azure Storage API version string used when a
+ // basic client is created.
+ DefaultAPIVersion = "2014-02-14"
+
+ defaultUseHTTPS = true
+
+ blobServiceName = "blob"
+ tableServiceName = "table"
+ queueServiceName = "queue"
+)
+
+// Client is the object that needs to be constructed to perform
+// operations on the storage account.
+type Client struct {
+ accountName string
+ accountKey []byte
+ useHTTPS bool
+ baseURL string
+ apiVersion string
+}
+
+type storageResponse struct {
+ statusCode int
+ headers http.Header
+ body io.ReadCloser
+}
+
+// AzureStorageServiceError contains fields of the error response from
+// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
+// Some fields might be specific to certain calls.
+type AzureStorageServiceError struct {
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+ AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
+ QueryParameterName string `xml:"QueryParameterName"`
+ QueryParameterValue string `xml:"QueryParameterValue"`
+ Reason string `xml:"Reason"`
+ StatusCode int
+ RequestID string
+}
+
+// UnexpectedStatusCodeError is returned when a storage service responds with neither an error
+// nor with an HTTP status code indicating success.
+type UnexpectedStatusCodeError struct {
+ allowed []int
+ got int
+}
+
+func (e UnexpectedStatusCodeError) Error() string {
+ s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
+
+ got := s(e.got)
+ expected := []string{}
+ for _, v := range e.allowed {
+ expected = append(expected, s(v))
+ }
+ return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or "))
+}
+
+// NewBasicClient constructs a Client with given storage service name and
+// key.
+func NewBasicClient(accountName, accountKey string) (Client, error) {
+ return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
+}
+
+// NewClient constructs a Client. This should be used if the caller wants
+// to specify whether to use HTTPS, a specific REST API version or a custom
+// storage endpoint than Azure Public Cloud.
+func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
+ var c Client
+ if accountName == "" {
+ return c, fmt.Errorf("azure: account name required")
+ } else if accountKey == "" {
+ return c, fmt.Errorf("azure: account key required")
+ } else if blobServiceBaseURL == "" {
+ return c, fmt.Errorf("azure: base storage service url required")
+ }
+
+ key, err := base64.StdEncoding.DecodeString(accountKey)
+ if err != nil {
+ return c, err
+ }
+
+ return Client{
+ accountName: accountName,
+ accountKey: key,
+ useHTTPS: useHTTPS,
+ baseURL: blobServiceBaseURL,
+ apiVersion: apiVersion,
+ }, nil
+}
+
+func (c Client) getBaseURL(service string) string {
+ scheme := "http"
+ if c.useHTTPS {
+ scheme = "https"
+ }
+
+ host := fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
+
+ u := &url.URL{
+ Scheme: scheme,
+ Host: host}
+ return u.String()
+}
+
+func (c Client) getEndpoint(service, path string, params url.Values) string {
+ u, err := url.Parse(c.getBaseURL(service))
+ if err != nil {
+ // really should not be happening
+ panic(err)
+ }
+
+ if path == "" {
+ path = "/" // API doesn't accept path segments not starting with '/'
+ }
+
+ u.Path = path
+ u.RawQuery = params.Encode()
+ return u.String()
+}
+
+// GetBlobService returns a BlobStorageClient which can operate on the blob
+// service of the storage account.
+func (c Client) GetBlobService() BlobStorageClient {
+ return BlobStorageClient{c}
+}
+
+// GetQueueService returns a QueueServiceClient which can operate on the queue
+// service of the storage account.
+func (c Client) GetQueueService() QueueServiceClient {
+ return QueueServiceClient{c}
+}
+
+func (c Client) createAuthorizationHeader(canonicalizedString string) string {
+ signature := c.computeHmac256(canonicalizedString)
+ return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature)
+}
+
+func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) {
+ canonicalizedResource, err := c.buildCanonicalizedResource(url)
+ if err != nil {
+ return "", err
+ }
+
+ canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource)
+ return c.createAuthorizationHeader(canonicalizedString), nil
+}
+
+func (c Client) getStandardHeaders() map[string]string {
+ return map[string]string{
+ "x-ms-version": c.apiVersion,
+ "x-ms-date": currentTimeRfc1123Formatted(),
+ }
+}
+
+func (c Client) buildCanonicalizedHeader(headers map[string]string) string {
+ cm := make(map[string]string)
+
+ for k, v := range headers {
+ headerName := strings.TrimSpace(strings.ToLower(k))
+ match, _ := regexp.MatchString("x-ms-", headerName)
+ if match {
+ cm[headerName] = v
+ }
+ }
+
+ if len(cm) == 0 {
+ return ""
+ }
+
+ keys := make([]string, 0, len(cm))
+ for key := range cm {
+ keys = append(keys, key)
+ }
+
+ sort.Strings(keys)
+
+ ch := ""
+
+ for i, key := range keys {
+ if i == len(keys)-1 {
+ ch += fmt.Sprintf("%s:%s", key, cm[key])
+ } else {
+ ch += fmt.Sprintf("%s:%s\n", key, cm[key])
+ }
+ }
+ return ch
+}
+
+func (c Client) buildCanonicalizedResource(uri string) (string, error) {
+ errMsg := "buildCanonicalizedResource error: %s"
+ u, err := url.Parse(uri)
+ if err != nil {
+ return "", fmt.Errorf(errMsg, err.Error())
+ }
+
+ cr := "/" + c.accountName
+ if len(u.Path) > 0 {
+ cr += u.Path
+ }
+
+ params, err := url.ParseQuery(u.RawQuery)
+ if err != nil {
+ return "", fmt.Errorf(errMsg, err.Error())
+ }
+
+ if len(params) > 0 {
+ cr += "\n"
+ keys := make([]string, 0, len(params))
+ for key := range params {
+ keys = append(keys, key)
+ }
+
+ sort.Strings(keys)
+
+ for i, key := range keys {
+ if len(params[key]) > 1 {
+ sort.Strings(params[key])
+ }
+
+ if i == len(keys)-1 {
+ cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))
+ } else {
+ cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ","))
+ }
+ }
+ }
+ return cr, nil
+}
+
+func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string {
+ canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
+ verb,
+ headers["Content-Encoding"],
+ headers["Content-Language"],
+ headers["Content-Length"],
+ headers["Content-MD5"],
+ headers["Content-Type"],
+ headers["Date"],
+ headers["If-Modified-Singe"],
+ headers["If-Match"],
+ headers["If-None-Match"],
+ headers["If-Unmodified-Singe"],
+ headers["Range"],
+ c.buildCanonicalizedHeader(headers),
+ canonicalizedResource)
+
+ return canonicalizedString
+}
+
+func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) {
+ authHeader, err := c.getAuthorizationHeader(verb, url, headers)
+ if err != nil {
+ return nil, err
+ }
+ headers["Authorization"] = authHeader
+
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest(verb, url, body)
+ for k, v := range headers {
+ req.Header.Add(k, v)
+ }
+ httpClient := http.Client{}
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ statusCode := resp.StatusCode
+ if statusCode >= 400 && statusCode <= 505 {
+ var respBody []byte
+ respBody, err = readResponseBody(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(respBody) == 0 {
+ // no error in response body
+ err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status)
+ } else {
+ // response contains storage service error object, unmarshal
+ storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id"))
+ if err != nil { // error unmarshaling the error response
+ err = errIn
+ }
+ err = storageErr
+ }
+ return &storageResponse{
+ statusCode: resp.StatusCode,
+ headers: resp.Header,
+ body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */
+ }, err
+ }
+
+ return &storageResponse{
+ statusCode: resp.StatusCode,
+ headers: resp.Header,
+ body: resp.Body}, nil
+}
+
+func readResponseBody(resp *http.Response) ([]byte, error) {
+ defer resp.Body.Close()
+ out, err := ioutil.ReadAll(resp.Body)
+ if err == io.EOF {
+ err = nil
+ }
+ return out, err
+}
+
+func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) {
+ var storageErr AzureStorageServiceError
+ if err := xml.Unmarshal(body, &storageErr); err != nil {
+ return storageErr, err
+ }
+ storageErr.StatusCode = statusCode
+ storageErr.RequestID = requestID
+ return storageErr, nil
+}
+
+func (e AzureStorageServiceError) Error() string {
+ return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID)
+}
+
+// checkRespCode returns UnexpectedStatusError if the given response code is not
+// one of the allowed status codes; otherwise nil.
+func checkRespCode(respCode int, allowed []int) error {
+ for _, v := range allowed {
+ if respCode == v {
+ return nil
+ }
+ }
+ return UnexpectedStatusCodeError{allowed, respCode}
+}
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go
new file mode 100644
index 000000000000..5bc52110055e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/client_test.go
@@ -0,0 +1,156 @@
+package storage
+
+import (
+ "encoding/base64"
+ "net/url"
+ "os"
+ "testing"
+
+ chk "gopkg.in/check.v1"
+)
+
+// Hook up gocheck to testing
+func Test(t *testing.T) { chk.TestingT(t) }
+
+type StorageClientSuite struct{}
+
+var _ = chk.Suite(&StorageClientSuite{})
+
+// getBasicClient returns a test client from storage credentials in the env
+func getBasicClient(c *chk.C) Client {
+ name := os.Getenv("ACCOUNT_NAME")
+ if name == "" {
+ c.Fatal("ACCOUNT_NAME not set, need an empty storage account to test")
+ }
+ key := os.Getenv("ACCOUNT_KEY")
+ if key == "" {
+ c.Fatal("ACCOUNT_KEY not set")
+ }
+ cli, err := NewBasicClient(name, key)
+ c.Assert(err, chk.IsNil)
+ return cli
+}
+
+func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) {
+ cli, err := NewBasicClient("foo", "YmFy")
+ c.Assert(err, chk.IsNil)
+ c.Assert(cli.apiVersion, chk.Equals, DefaultAPIVersion)
+ c.Assert(err, chk.IsNil)
+ c.Assert(cli.getBaseURL("table"), chk.Equals, "https://foo.table.core.windows.net")
+}
+
+func (s *StorageClientSuite) TestGetBaseURL_Custom_NoHttps(c *chk.C) {
+ apiVersion := "2015-01-01" // a non existing one
+ cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false)
+ c.Assert(err, chk.IsNil)
+ c.Assert(cli.apiVersion, chk.Equals, apiVersion)
+ c.Assert(cli.getBaseURL("table"), chk.Equals, "http://foo.table.core.chinacloudapi.cn")
+}
+
+func (s *StorageClientSuite) TestGetEndpoint_None(c *chk.C) {
+ cli, err := NewBasicClient("foo", "YmFy")
+ c.Assert(err, chk.IsNil)
+ output := cli.getEndpoint(blobServiceName, "", url.Values{})
+ c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/")
+}
+
+func (s *StorageClientSuite) TestGetEndpoint_PathOnly(c *chk.C) {
+ cli, err := NewBasicClient("foo", "YmFy")
+ c.Assert(err, chk.IsNil)
+ output := cli.getEndpoint(blobServiceName, "path", url.Values{})
+ c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path")
+}
+
+func (s *StorageClientSuite) TestGetEndpoint_ParamsOnly(c *chk.C) {
+ cli, err := NewBasicClient("foo", "YmFy")
+ c.Assert(err, chk.IsNil)
+ params := url.Values{}
+ params.Set("a", "b")
+ params.Set("c", "d")
+ output := cli.getEndpoint(blobServiceName, "", params)
+ c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/?a=b&c=d")
+}
+
+func (s *StorageClientSuite) TestGetEndpoint_Mixed(c *chk.C) {
+ cli, err := NewBasicClient("foo", "YmFy")
+ c.Assert(err, chk.IsNil)
+ params := url.Values{}
+ params.Set("a", "b")
+ params.Set("c", "d")
+ output := cli.getEndpoint(blobServiceName, "path", params)
+ c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path?a=b&c=d")
+}
+
+func (s *StorageClientSuite) Test_getStandardHeaders(c *chk.C) {
+ cli, err := NewBasicClient("foo", "YmFy")
+ c.Assert(err, chk.IsNil)
+
+ headers := cli.getStandardHeaders()
+ c.Assert(len(headers), chk.Equals, 2)
+ c.Assert(headers["x-ms-version"], chk.Equals, cli.apiVersion)
+ if _, ok := headers["x-ms-date"]; !ok {
+ c.Fatal("Missing date header")
+ }
+}
+
+func (s *StorageClientSuite) Test_buildCanonicalizedResource(c *chk.C) {
+ cli, err := NewBasicClient("foo", "YmFy")
+ c.Assert(err, chk.IsNil)
+
+ type test struct{ url, expected string }
+ tests := []test{
+ {"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"},
+ {"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"},
+ {"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"},
+ }
+
+ for _, i := range tests {
+ out, err := cli.buildCanonicalizedResource(i.url)
+ c.Assert(err, chk.IsNil)
+ c.Assert(out, chk.Equals, i.expected)
+ }
+}
+
+func (s *StorageClientSuite) Test_buildCanonicalizedHeader(c *chk.C) {
+ cli, err := NewBasicClient("foo", "YmFy")
+ c.Assert(err, chk.IsNil)
+
+ type test struct {
+ headers map[string]string
+ expected string
+ }
+ tests := []test{
+ {map[string]string{}, ""},
+ {map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"},
+ {map[string]string{"foo:": "bar"}, ""},
+ {map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"},
+ {map[string]string{
+ "x-ms-version": "9999-99-99",
+ "x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}}
+
+ for _, i := range tests {
+ c.Assert(cli.buildCanonicalizedHeader(i.headers), chk.Equals, i.expected)
+ }
+}
+
+func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) {
+ // attempt to delete a nonexisting container
+ _, err := getBlobClient(c).deleteContainer(randContainer())
+ c.Assert(err, chk.NotNil)
+
+ v, ok := err.(AzureStorageServiceError)
+ c.Check(ok, chk.Equals, true)
+ c.Assert(v.StatusCode, chk.Equals, 404)
+ c.Assert(v.Code, chk.Equals, "ContainerNotFound")
+ c.Assert(v.Code, chk.Not(chk.Equals), "")
+}
+
+func (s *StorageClientSuite) Test_createAuthorizationHeader(c *chk.C) {
+ key := base64.StdEncoding.EncodeToString([]byte("bar"))
+ cli, err := NewBasicClient("foo", key)
+ c.Assert(err, chk.IsNil)
+
+ canonicalizedString := `foobarzoo`
+ expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=`
+ c.Assert(cli.createAuthorizationHeader(canonicalizedString), chk.Equals, expected)
+}
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue.go b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue.go
new file mode 100644
index 000000000000..fa017f4c83e3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue.go
@@ -0,0 +1,230 @@
+package storage
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+)
+
+// QueueServiceClient contains operations for Microsoft Azure Queue Storage
+// Service.
+type QueueServiceClient struct {
+ client Client
+}
+
+func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
+func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
+func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
+
+type putMessageRequest struct {
+ XMLName xml.Name `xml:"QueueMessage"`
+ MessageText string `xml:"MessageText"`
+}
+
+// PutMessageParameters is the set of options can be specified for Put Messsage
+// operation. A zero struct does not use any preferences for the request.
+type PutMessageParameters struct {
+ VisibilityTimeout int
+ MessageTTL int
+}
+
+func (p PutMessageParameters) getParameters() url.Values {
+ out := url.Values{}
+ if p.VisibilityTimeout != 0 {
+ out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
+ }
+ if p.MessageTTL != 0 {
+ out.Set("messagettl", strconv.Itoa(p.MessageTTL))
+ }
+ return out
+}
+
+// GetMessagesParameters is the set of options can be specified for Get
+// Messsages operation. A zero struct does not use any preferences for the
+// request.
+type GetMessagesParameters struct {
+ NumOfMessages int
+ VisibilityTimeout int
+}
+
+func (p GetMessagesParameters) getParameters() url.Values {
+ out := url.Values{}
+ if p.NumOfMessages != 0 {
+ out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
+ }
+ if p.VisibilityTimeout != 0 {
+ out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
+ }
+ return out
+}
+
+// PeekMessagesParameters is the set of options can be specified for Peek
+// Messsage operation. A zero struct does not use any preferences for the
+// request.
+type PeekMessagesParameters struct {
+ NumOfMessages int
+}
+
+func (p PeekMessagesParameters) getParameters() url.Values {
+ out := url.Values{"peekonly": {"true"}} // Required for peek operation
+ if p.NumOfMessages != 0 {
+ out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
+ }
+ return out
+}
+
+// GetMessagesResponse represents a response returned from Get Messages
+// operation.
+type GetMessagesResponse struct {
+ XMLName xml.Name `xml:"QueueMessagesList"`
+ QueueMessagesList []GetMessageResponse `xml:"QueueMessage"`
+}
+
+// GetMessageResponse represents a QueueMessage object returned from Get
+// Messages operation response.
+type GetMessageResponse struct {
+ MessageID string `xml:"MessageId"`
+ InsertionTime string `xml:"InsertionTime"`
+ ExpirationTime string `xml:"ExpirationTime"`
+ PopReceipt string `xml:"PopReceipt"`
+ TimeNextVisible string `xml:"TimeNextVisible"`
+ DequeueCount int `xml:"DequeueCount"`
+ MessageText string `xml:"MessageText"`
+}
+
+// PeekMessagesResponse represents a response returned from Get Messages
+// operation.
+type PeekMessagesResponse struct {
+ XMLName xml.Name `xml:"QueueMessagesList"`
+ QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"`
+}
+
+// PeekMessageResponse represents a QueueMessage object returned from Peek
+// Messages operation response.
+type PeekMessageResponse struct {
+ MessageID string `xml:"MessageId"`
+ InsertionTime string `xml:"InsertionTime"`
+ ExpirationTime string `xml:"ExpirationTime"`
+ DequeueCount int `xml:"DequeueCount"`
+ MessageText string `xml:"MessageText"`
+}
+
+// CreateQueue operation creates a queue under the given account.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx
+func (c QueueServiceClient) CreateQueue(name string) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
+ headers := c.client.getStandardHeaders()
+ headers["Content-Length"] = "0"
+ resp, err := c.client.exec("PUT", uri, headers, nil)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// DeleteQueue operation permanently deletes the specified queue.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx
+func (c QueueServiceClient) DeleteQueue(name string) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
+ resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
+}
+
+// QueueExists returns true if a queue with given name exists.
+func (c QueueServiceClient) QueueExists(name string) (bool, error) {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}})
+ resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
+ if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) {
+ return resp.statusCode == http.StatusOK, nil
+ }
+
+ return false, err
+}
+
+// PutMessage operation adds a new message to the back of the message queue.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx
+func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
+ req := putMessageRequest{MessageText: message}
+ body, nn, err := xmlMarshal(req)
+ if err != nil {
+ return err
+ }
+ headers := c.client.getStandardHeaders()
+ headers["Content-Length"] = strconv.Itoa(nn)
+ resp, err := c.client.exec("POST", uri, headers, body)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusCreated})
+}
+
+// ClearMessages operation deletes all messages from the specified queue.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx
+func (c QueueServiceClient) ClearMessages(queue string) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{})
+ resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
+}
+
+// GetMessages operation retrieves one or more messages from the front of the
+// queue.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx
+func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) {
+ var r GetMessagesResponse
+ uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
+ resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
+ if err != nil {
+ return r, err
+ }
+ defer resp.body.Close()
+ err = xmlUnmarshal(resp.body, &r)
+ return r, err
+}
+
+// PeekMessages retrieves one or more messages from the front of the queue, but
+// does not alter the visibility of the message.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx
+func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) {
+ var r PeekMessagesResponse
+ uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
+ resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
+ if err != nil {
+ return r, err
+ }
+ defer resp.body.Close()
+ err = xmlUnmarshal(resp.body, &r)
+ return r, err
+}
+
+// DeleteMessage operation deletes the specified message.
+//
+// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
+func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error {
+ uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{
+ "popreceipt": {popReceipt}})
+ resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
+ if err != nil {
+ return err
+ }
+ defer resp.body.Close()
+ return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
+}
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go
new file mode 100644
index 000000000000..5c7bad93c6b5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go
@@ -0,0 +1,91 @@
+package storage
+
+import (
+ chk "gopkg.in/check.v1"
+)
+
+type StorageQueueSuite struct{}
+
+var _ = chk.Suite(&StorageQueueSuite{})
+
+func getQueueClient(c *chk.C) QueueServiceClient {
+ return getBasicClient(c).GetQueueService()
+}
+
+func (s *StorageQueueSuite) Test_pathForQueue(c *chk.C) {
+ c.Assert(pathForQueue("q"), chk.Equals, "/q")
+}
+
+func (s *StorageQueueSuite) Test_pathForQueueMessages(c *chk.C) {
+ c.Assert(pathForQueueMessages("q"), chk.Equals, "/q/messages")
+}
+
+func (s *StorageQueueSuite) Test_pathForMessage(c *chk.C) {
+ c.Assert(pathForMessage("q", "m"), chk.Equals, "/q/messages/m")
+}
+
+func (s *StorageQueueSuite) TestCreateQueue_DeleteQueue(c *chk.C) {
+ cli := getQueueClient(c)
+ name := randString(20)
+ c.Assert(cli.CreateQueue(name), chk.IsNil)
+ c.Assert(cli.DeleteQueue(name), chk.IsNil)
+}
+
+func (s *StorageQueueSuite) TestQueueExists(c *chk.C) {
+ cli := getQueueClient(c)
+ ok, err := cli.QueueExists("nonexistent-queue")
+ c.Assert(err, chk.IsNil)
+ c.Assert(ok, chk.Equals, false)
+
+ name := randString(20)
+ c.Assert(cli.CreateQueue(name), chk.IsNil)
+ defer cli.DeleteQueue(name)
+
+ ok, err = cli.QueueExists(name)
+ c.Assert(err, chk.IsNil)
+ c.Assert(ok, chk.Equals, true)
+}
+
+func (s *StorageQueueSuite) TestPostMessage_PeekMessage_DeleteMessage(c *chk.C) {
+ q := randString(20)
+ cli := getQueueClient(c)
+ c.Assert(cli.CreateQueue(q), chk.IsNil)
+ defer cli.DeleteQueue(q)
+
+ msg := randString(64 * 1024) // exercise max length
+ c.Assert(cli.PutMessage(q, msg, PutMessageParameters{}), chk.IsNil)
+ r, err := cli.PeekMessages(q, PeekMessagesParameters{})
+ c.Assert(err, chk.IsNil)
+ c.Assert(len(r.QueueMessagesList), chk.Equals, 1)
+ c.Assert(r.QueueMessagesList[0].MessageText, chk.Equals, msg)
+}
+
+func (s *StorageQueueSuite) TestGetMessages(c *chk.C) {
+ q := randString(20)
+ cli := getQueueClient(c)
+ c.Assert(cli.CreateQueue(q), chk.IsNil)
+ defer cli.DeleteQueue(q)
+
+ n := 4
+ for i := 0; i < n; i++ {
+ c.Assert(cli.PutMessage(q, randString(10), PutMessageParameters{}), chk.IsNil)
+ }
+
+ r, err := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: n})
+ c.Assert(err, chk.IsNil)
+ c.Assert(len(r.QueueMessagesList), chk.Equals, n)
+}
+
+func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) {
+ q := randString(20)
+ cli := getQueueClient(c)
+ c.Assert(cli.CreateQueue(q), chk.IsNil)
+ defer cli.DeleteQueue(q)
+
+ c.Assert(cli.PutMessage(q, "message", PutMessageParameters{}), chk.IsNil)
+ r, err := cli.GetMessages(q, GetMessagesParameters{VisibilityTimeout: 1})
+ c.Assert(err, chk.IsNil)
+ c.Assert(len(r.QueueMessagesList), chk.Equals, 1)
+ m := r.QueueMessagesList[0]
+ c.Assert(cli.DeleteMessage(q, m.MessageID, m.PopReceipt), chk.IsNil)
+}
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util.go b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util.go
new file mode 100644
index 000000000000..33155af7f020
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util.go
@@ -0,0 +1,71 @@
+package storage
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+func (c Client) computeHmac256(message string) string {
+ h := hmac.New(sha256.New, c.accountKey)
+ h.Write([]byte(message))
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
+
+func currentTimeRfc1123Formatted() string {
+ return timeRfc1123Formatted(time.Now().UTC())
+}
+
+func timeRfc1123Formatted(t time.Time) string {
+ return t.Format(http.TimeFormat)
+}
+
+func mergeParams(v1, v2 url.Values) url.Values {
+ out := url.Values{}
+ for k, v := range v1 {
+ out[k] = v
+ }
+ for k, v := range v2 {
+ vals, ok := out[k]
+ if ok {
+ vals = append(vals, v...)
+ out[k] = vals
+ } else {
+ out[k] = v
+ }
+ }
+ return out
+}
+
+func prepareBlockListRequest(blocks []Block) string {
+ s := ``
+ for _, v := range blocks {
+ s += fmt.Sprintf("<%s>%s%s>", v.Status, v.ID, v.Status)
+ }
+ s += ``
+ return s
+}
+
+func xmlUnmarshal(body io.Reader, v interface{}) error {
+ data, err := ioutil.ReadAll(body)
+ if err != nil {
+ return err
+ }
+ return xml.Unmarshal(data, v)
+}
+
+func xmlMarshal(v interface{}) (io.Reader, int, error) {
+ b, err := xml.Marshal(v)
+ if err != nil {
+ return nil, 0, err
+ }
+ return bytes.NewReader(b), len(b), nil
+}
diff --git a/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go
new file mode 100644
index 000000000000..9bf82dcc3461
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/Azure/azure-sdk-for-go/storage/util_test.go
@@ -0,0 +1,69 @@
+package storage
+
+import (
+ "encoding/xml"
+ "io/ioutil"
+ "net/url"
+ "strings"
+ "time"
+
+ chk "gopkg.in/check.v1"
+)
+
+func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) {
+ now := time.Now().UTC()
+ expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT"
+ c.Assert(timeRfc1123Formatted(now), chk.Equals, now.Format(expectedLayout))
+}
+
+func (s *StorageClientSuite) Test_mergeParams(c *chk.C) {
+ v1 := url.Values{
+ "k1": {"v1"},
+ "k2": {"v2"}}
+ v2 := url.Values{
+ "k1": {"v11"},
+ "k3": {"v3"}}
+ out := mergeParams(v1, v2)
+ c.Assert(out.Get("k1"), chk.Equals, "v1")
+ c.Assert(out.Get("k2"), chk.Equals, "v2")
+ c.Assert(out.Get("k3"), chk.Equals, "v3")
+ c.Assert(out["k1"], chk.DeepEquals, []string{"v1", "v11"})
+}
+
+func (s *StorageClientSuite) Test_prepareBlockListRequest(c *chk.C) {
+ empty := []Block{}
+ expected := ``
+ c.Assert(prepareBlockListRequest(empty), chk.DeepEquals, expected)
+
+ blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}}
+ expected = `foobar`
+ c.Assert(prepareBlockListRequest(blocks), chk.DeepEquals, expected)
+}
+
+func (s *StorageClientSuite) Test_xmlUnmarshal(c *chk.C) {
+ xml := `
+
+ myblob
+ `
+ var blob Blob
+ body := ioutil.NopCloser(strings.NewReader(xml))
+ c.Assert(xmlUnmarshal(body, &blob), chk.IsNil)
+ c.Assert(blob.Name, chk.Equals, "myblob")
+}
+
+func (s *StorageClientSuite) Test_xmlMarshal(c *chk.C) {
+ type t struct {
+ XMLName xml.Name `xml:"S"`
+ Name string `xml:"Name"`
+ }
+
+ b := t{Name: "myblob"}
+ expected := `myblob`
+ r, i, err := xmlMarshal(b)
+ c.Assert(err, chk.IsNil)
+ o, err := ioutil.ReadAll(r)
+ c.Assert(err, chk.IsNil)
+ out := string(o)
+ c.Assert(out, chk.Equals, expected)
+ c.Assert(i, chk.Equals, len(expected))
+}
diff --git a/pkg/cmd/dockerregistry/dockerregistry.go b/pkg/cmd/dockerregistry/dockerregistry.go
index b12d9916c63d..3a791034accb 100644
--- a/pkg/cmd/dockerregistry/dockerregistry.go
+++ b/pkg/cmd/dockerregistry/dockerregistry.go
@@ -25,6 +25,7 @@ import (
_ "github.com/docker/distribution/registry/auth/htpasswd"
_ "github.com/docker/distribution/registry/auth/token"
+ _ "github.com/docker/distribution/registry/storage/driver/azure"
_ "github.com/docker/distribution/registry/storage/driver/filesystem"
_ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront"
_ "github.com/docker/distribution/registry/storage/driver/s3"