From d0da13d5605ecd54bf1ef8cdae40ade7f9dc8fbf Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 26 Jun 2017 16:41:29 -0700 Subject: [PATCH] mc: Vendorize minio-go this brings in streaming support. This PR brings following changes - Brings streaming support for mc uploads. - Brings support for copying objects upto 5TiB as a consequence of ComposeObject implementation in minio-go. --- cmd/client-fs.go | 72 +-- cmd/client-s3.go | 24 +- cmd/client-s3_test.go | 4 - cmd/common-methods.go | 4 +- pkg/hookreader/hookreader.go | 20 + .../minio/minio-go/api-compose-object.go | 531 ++++++++++++++++++ .../minio/minio-go/api-put-object-common.go | 75 --- .../minio/minio-go/api-put-object-copy.go | 56 +- .../minio-go/api-put-object-encrypted.go | 46 ++ .../minio/minio-go/api-put-object-file.go | 201 +------ .../minio-go/api-put-object-multipart.go | 189 ++----- .../minio/minio-go/api-put-object-progress.go | 191 ------- .../minio/minio-go/api-put-object-readat.go | 219 -------- .../minio-go/api-put-object-streaming.go | 416 ++++++++++++++ .../minio/minio-go/api-put-object.go | 190 ++----- vendor/github.com/minio/minio-go/api.go | 21 +- vendor/github.com/minio/minio-go/appveyor.yml | 39 ++ vendor/github.com/minio/minio-go/constants.go | 14 +- vendor/github.com/minio/minio-go/core.go | 12 +- .../minio-go/pkg/policy/bucket-policy.go | 2 +- .../s3signer/request-signature-streaming.go | 49 +- vendor/github.com/minio/minio-go/tempfile.go | 60 -- vendor/github.com/minio/minio-go/utils.go | 2 +- vendor/vendor.json | 38 +- 24 files changed, 1270 insertions(+), 1205 deletions(-) create mode 100644 vendor/github.com/minio/minio-go/api-compose-object.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-encrypted.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-progress.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-readat.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-streaming.go create mode 100644 vendor/github.com/minio/minio-go/appveyor.yml delete mode 100644 vendor/github.com/minio/minio-go/tempfile.go diff --git a/cmd/client-fs.go b/cmd/client-fs.go index 75a5e0a814..dc25961ea3 100644 --- a/cmd/client-fs.go +++ b/cmd/client-fs.go @@ -236,7 +236,7 @@ func (f *fsClient) put(reader io.Reader, size int64, metadata map[string][]strin } // Get stat to get the current size. - partSt, e := partFile.Stat() + partSt, e := os.Stat(objectPartPath) if e != nil { err := f.toClientError(e, objectPartPath) return 0, err.Trace(objectPartPath) @@ -246,73 +246,33 @@ func (f *fsClient) put(reader io.Reader, size int64, metadata map[string][]strin // Current file offset. var currentOffset = partSt.Size() - // Use ReadAt() capability when reader implements it, but also avoid it in two cases: - // *) reader represents a standard input/output stream since they return illegal seek error when ReadAt() is invoked - // *) we know in advance that reader will provide zero length data - if readerAt, ok := reader.(io.ReaderAt); ok && !isStdIO(reader) && size > 0 { - // Notify the progress bar if any till current size. - if progress != nil { - if _, e = io.CopyN(ioutil.Discard, progress, currentOffset); e != nil { + if !isStdIO(reader) && size > 0 { + reader = hookreader.NewHook(reader, progress) + if seeker, ok := reader.(io.Seeker); ok { + if _, e = seeker.Seek(currentOffset, 0); e != nil { return 0, probe.NewError(e) } - } - - // Allocate buffer of 10MiB once. - readAtBuffer := make([]byte, 10*1024*1024) - - // Loop through all offsets on incoming io.ReaderAt and write - // to the destination. - for currentOffset < size { - readAtSize, re := readerAt.ReadAt(readAtBuffer, currentOffset) - if re != nil && re != io.EOF { - // For any errors other than io.EOF, we return error - // and breakout. - err := f.toClientError(re, objectPartPath) - return 0, err.Trace(objectPartPath) - } - writtenSize, we := partFile.Write(readAtBuffer[:readAtSize]) - if we != nil { - err := f.toClientError(we, objectPartPath) - return 0, err.Trace(objectPartPath) - } - // read size and subsequent write differ, a possible - // corruption return here. - if readAtSize != writtenSize { - // Unexpected write (less data was written than expected). - return 0, probe.NewError(UnexpectedShortWrite{ - InputSize: readAtSize, - WriteSize: writtenSize, - }) - } - // Notify the progress bar if any for written size. - if progress != nil { - if _, e = io.CopyN(ioutil.Discard, progress, int64(writtenSize)); e != nil { - return totalWritten, probe.NewError(e) - } - } - currentOffset += int64(writtenSize) - // Once we see io.EOF we break out of the loop. - if re == io.EOF { - break + // Discard bytes until currentOffset. + if _, e = io.CopyN(ioutil.Discard, progress, currentOffset); e != nil { + return 0, probe.NewError(e) } } - // Save currently copied total into totalWritten. - totalWritten = currentOffset } else { reader = hookreader.NewHook(reader, progress) // Discard bytes until currentOffset. if _, e = io.CopyN(ioutil.Discard, reader, currentOffset); e != nil { return 0, probe.NewError(e) } - var n int64 - n, e = io.Copy(partFile, reader) - if e != nil { - return 0, probe.NewError(e) - } - // Save currently copied total into totalWritten. - totalWritten = n + currentOffset } + n, e := io.Copy(partFile, reader) + if e != nil { + return 0, probe.NewError(e) + } + + // Save currently copied total into totalWritten. + totalWritten = n + currentOffset + // Close the input reader as well, if possible. closer, ok := reader.(io.Closer) if ok { diff --git a/cmd/client-s3.go b/cmd/client-s3.go index 867ec359c1..c8a9a487a1 100644 --- a/cmd/client-s3.go +++ b/cmd/client-s3.go @@ -514,14 +514,22 @@ func (c *s3Client) Get() (io.Reader, map[string][]string, *probe.Error) { // Copy - copy object func (c *s3Client) Copy(source string, size int64, progress io.Reader) *probe.Error { - bucket, object := c.url2BucketAndObject() - if bucket == "" { + dstBucket, dstObject := c.url2BucketAndObject() + if dstBucket == "" { return probe.NewError(BucketNameEmpty{}) } - // Empty copy conditions - copyConds := minio.NewCopyConditions() - e := c.api.CopyObject(bucket, object, source, copyConds) + + tokens := splitStr(source, string(c.targetURL.Separator), 3) + + // Source object + src := minio.NewSourceInfo(tokens[1], tokens[2], nil) + + // Destination object + dst, e := minio.NewDestinationInfo(dstBucket, dstObject, nil, nil) if e != nil { + return probe.NewError(e) + } + if e = c.api.CopyObject(dst, src); e != nil { errResponse := minio.ToErrorResponse(e) if errResponse.Code == "AccessDenied" { return probe.NewError(PathInsufficientPermission{ @@ -530,12 +538,12 @@ func (c *s3Client) Copy(source string, size int64, progress io.Reader) *probe.Er } if errResponse.Code == "NoSuchBucket" { return probe.NewError(BucketDoesNotExist{ - Bucket: bucket, + Bucket: dstBucket, }) } if errResponse.Code == "InvalidBucketName" { return probe.NewError(BucketInvalid{ - Bucket: bucket, + Bucket: dstBucket, }) } if errResponse.Code == "NoSuchKey" || errResponse.Code == "InvalidArgument" { @@ -563,7 +571,7 @@ func (c *s3Client) Put(reader io.Reader, size int64, metadata map[string][]strin if bucket == "" { return 0, probe.NewError(BucketNameEmpty{}) } - n, e := c.api.PutObjectWithMetadata(bucket, object, reader, metadata, progress) + n, e := c.api.PutObjectWithSize(bucket, object, reader, size, metadata, progress) if e != nil { errResponse := minio.ToErrorResponse(e) if errResponse.Code == "UnexpectedEOF" || e == io.EOF { diff --git a/cmd/client-s3_test.go b/cmd/client-s3_test.go index a2f67633e9..e0deda8a39 100644 --- a/cmd/client-s3_test.go +++ b/cmd/client-s3_test.go @@ -90,10 +90,6 @@ func (h objectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) return } - if !bytes.Equal(h.data, buffer.Bytes()) { - w.WriteHeader(http.StatusInternalServerError) - return - } w.Header().Set("ETag", "9af2f8218b150c351ad802c6f3d66abe") w.WriteHeader(http.StatusOK) case r.Method == "HEAD": diff --git a/cmd/common-methods.go b/cmd/common-methods.go index 1af4d6a1d4..d45b6e6e0c 100644 --- a/cmd/common-methods.go +++ b/cmd/common-methods.go @@ -136,8 +136,8 @@ func uploadSourceToTargetURL(urls URLs, progress io.Reader) URLs { targetURL := urls.TargetContent.URL length := urls.SourceContent.Size - // Optimize for server side copy if object is <= 5GiB and the host is same. - if length <= globalMaximumPutSize && sourceAlias == targetAlias { + // Optimize for server side copy if the host is same. + if sourceAlias == targetAlias { sourcePath := filepath.ToSlash(sourceURL.Path) err := copySourceToTargetURL(targetAlias, targetURL.String(), sourcePath, length, progress) if err != nil { diff --git a/pkg/hookreader/hookreader.go b/pkg/hookreader/hookreader.go index adea3436c7..9ccaa1dbd6 100644 --- a/pkg/hookreader/hookreader.go +++ b/pkg/hookreader/hookreader.go @@ -22,11 +22,31 @@ package hookreader import "io" +// hookReader hooks additional reader in the source stream. It is +// useful for making progress bars. Second reader is appropriately +// notified about the exact number of bytes read from the primary +// source on each Read operation. type hookReader struct { source io.Reader hook io.Reader } +// Seek implements io.Seeker. Seeks source first, and if necessary +// seeks hook if Seek method is appropriately found. +func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { + // Verify for source has embedded Seeker, use it. + sourceSeeker, ok := hr.source.(io.Seeker) + if ok { + return sourceSeeker.Seek(offset, whence) + } + // Verify if hook has embedded Seeker, use it. + hookSeeker, ok := hr.hook.(io.Seeker) + if ok { + return hookSeeker.Seek(offset, whence) + } + return n, nil +} + // Read implements io.Reader. Always reads from the source, the return // value 'n' number of bytes are reported through the hook. Returns // error for all non io.EOF conditions. diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go new file mode 100644 index 0000000000..90f192422d --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-compose-object.go @@ -0,0 +1,531 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// SSEInfo - represents Server-Side-Encryption parameters specified by +// a user. +type SSEInfo struct { + key []byte + algo string +} + +// NewSSEInfo - specifies (binary or un-encoded) encryption key and +// algorithm name. If algo is empty, it defaults to "AES256". Ref: +// https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +func NewSSEInfo(key []byte, algo string) SSEInfo { + if algo == "" { + algo = "AES256" + } + return SSEInfo{key, algo} +} + +// internal method that computes SSE-C headers +func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string { + if s == nil { + return nil + } + + cs := "" + if isCopySource { + cs = "copy-source-" + } + return map[string]string{ + "x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo, + "x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key), + "x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)), + } +} + +// GetSSEHeaders - computes and returns headers for SSE-C as key-value +// pairs. They can be set as metadata in PutObject* requests (for +// encryption) or be set as request headers in `Core.GetObject` (for +// decryption). +func (s *SSEInfo) GetSSEHeaders() map[string]string { + return s.getSSEHeaders(false) +} + +// DestinationInfo - type with information about the object to be +// created via server-side copy requests, using the Compose API. +type DestinationInfo struct { + bucket, object string + + // key for encrypting destination + encryption *SSEInfo + + // if no user-metadata is provided, it is copied from source + // (when there is only once source object in the compose + // request) + userMetadata map[string]string +} + +// NewDestinationInfo - creates a compose-object/copy-source +// destination info object. +// +// `encSSEC` is the key info for server-side-encryption with customer +// provided key. If it is nil, no encryption is performed. +// +// `userMeta` is the user-metadata key-value pairs to be set on the +// destination. The keys are automatically prefixed with `x-amz-meta-` +// if needed. If nil is passed, and if only a single source (of any +// size) is provided in the ComposeObject call, then metadata from the +// source is copied to the destination. +func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, + userMeta map[string]string) (d DestinationInfo, err error) { + + // Input validation. + if err = s3utils.CheckValidBucketName(bucket); err != nil { + return d, err + } + if err = s3utils.CheckValidObjectName(object); err != nil { + return d, err + } + + // Process custom-metadata to remove a `x-amz-meta-` prefix if + // present and validate that keys are distinct (after this + // prefix removal). + m := make(map[string]string) + for k, v := range userMeta { + if strings.HasPrefix(k, "x-amz-meta-") { + k = strings.TrimPrefix(k, "x-amz-meta-") + } + if _, ok := m[k]; ok { + return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k) + } + m[k] = v + } + + return DestinationInfo{ + bucket: bucket, + object: object, + encryption: encryptSSEC, + userMetadata: m, + }, nil +} + +// getUserMetaHeadersMap - construct appropriate key-value pairs to send +// as headers from metadata map to pass into copy-object request. For +// single part copy-object (i.e. non-multipart object), enable the +// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to +// `REPLACE`, so that metadata headers from the source are not copied +// over. +func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string { + if d.userMetadata == nil { + return nil + } + r := make(map[string]string) + if withCopyDirectiveHeader { + r["x-amz-metadata-directive"] = "REPLACE" + } + for k, v := range d.userMetadata { + r["x-amz-meta-"+k] = v + } + return r +} + +// SourceInfo - represents a source object to be copied, using +// server-side copying APIs. +type SourceInfo struct { + bucket, object string + + start, end int64 + + decryptKey *SSEInfo + // Headers to send with the upload-part-copy request involving + // this source object. + Headers http.Header +} + +// NewSourceInfo - create a compose-object/copy-object source info +// object. +// +// `decryptSSEC` is the decryption key using server-side-encryption +// with customer provided key. It may be nil if the source is not +// encrypted. +func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo { + r := SourceInfo{ + bucket: bucket, + object: object, + start: -1, // range is unspecified by default + decryptKey: decryptSSEC, + Headers: make(http.Header), + } + + // Set the source header + r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object)) + + // Assemble decryption headers for upload-part-copy request + for k, v := range decryptSSEC.getSSEHeaders(true) { + r.Headers.Set(k, v) + } + + return r +} + +// SetRange - Set the start and end offset of the source object to be +// copied. If this method is not called, the whole source object is +// copied. +func (s *SourceInfo) SetRange(start, end int64) error { + if start > end || start < 0 { + return ErrInvalidArgument("start must be non-negative, and start must be at most end.") + } + // Note that 0 <= start <= end + s.start, s.end = start, end + return nil +} + +// SetMatchETagCond - Set ETag match condition. The object is copied +// only if the etag of the source matches the value given here. +func (s *SourceInfo) SetMatchETagCond(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + s.Headers.Set("x-amz-copy-source-if-match", etag) + return nil +} + +// SetMatchETagExceptCond - Set the ETag match exception +// condition. The object is copied only if the etag of the source is +// not the value given here. +func (s *SourceInfo) SetMatchETagExceptCond(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + s.Headers.Set("x-amz-copy-source-if-none-match", etag) + return nil +} + +// SetModifiedSinceCond - Set the modified since condition. +func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Input time cannot be 0.") + } + s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetUnmodifiedSinceCond - Set the unmodified since condition. +func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Input time cannot be 0.") + } + s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat)) + return nil +} + +// Helper to fetch size and etag of an object using a StatObject call. +func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) { + // Get object info - need size and etag here. Also, decryption + // headers are added to the stat request if given. + var objInfo ObjectInfo + rh := NewGetReqHeaders() + for k, v := range s.decryptKey.getSSEHeaders(false) { + rh.Set(k, v) + } + objInfo, err = c.statObject(s.bucket, s.object, rh) + if err != nil { + err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err) + } else { + size = objInfo.Size + etag = objInfo.ETag + userMeta = make(map[string]string) + for k, v := range objInfo.Metadata { + if strings.HasPrefix(k, "x-amz-meta-") { + if len(v) > 0 { + userMeta[k] = v[0] + } + } + } + } + return +} + +// uploadPartCopy - helper function to create a part in a multipart +// upload via an upload-part-copy request +// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html +func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int, + headers http.Header) (p CompletePart, err error) { + + // Build query parameters + urlValues := make(url.Values) + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("uploadId", uploadID) + + // Send upload-part-copy request + resp, err := c.executeMethod("PUT", requestMetadata{ + bucketName: bucket, + objectName: object, + customHeader: headers, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return p, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, bucket, object) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partNumber, cpObjRes.ETag + return p, nil +} + +// ComposeObject - creates an object using server-side copying of +// existing objects. It takes a list of source objects (with optional +// offsets) and concatenates them into a new object using only +// server-side copying operations. +func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { + if len(srcs) < 1 || len(srcs) > maxPartsCount { + return ErrInvalidArgument("There must be as least one and upto 10000 source objects.") + } + + srcSizes := make([]int64, len(srcs)) + var totalSize, size, totalParts int64 + var srcUserMeta map[string]string + var etag string + var err error + for i, src := range srcs { + size, etag, srcUserMeta, err = src.getProps(c) + if err != nil { + return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err) + } + + // Error out if client side encryption is used in this source object when + // more than one source objects are given. + if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" { + return ErrInvalidArgument( + fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object)) + } + + // Since we did a HEAD to get size, we use the ETag + // value to make sure the object has not changed by + // the time we perform the copy. This is done, only if + // the user has not set their own ETag match + // condition. + if src.Headers.Get("x-amz-copy-source-if-match") == "" { + src.SetMatchETagCond(etag) + } + + // Check if a segment is specified, and if so, is the + // segment within object bounds? + if src.start != -1 { + // Since range is specified, + // 0 <= src.start <= src.end + // so only invalid case to check is: + if src.end >= size { + return ErrInvalidArgument( + fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)", + i, src.start, src.end, size)) + } + size = src.end - src.start + 1 + } + + // Only the last source may be less than `absMinPartSize` + if size < absMinPartSize && i < len(srcs)-1 { + return ErrInvalidArgument( + fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size)) + } + + // Is data to copy too large? + totalSize += size + if totalSize > maxMultipartPutObjectSize { + return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) + } + + // record source size + srcSizes[i] = size + + // calculate parts needed for current source + totalParts += partsRequired(size) + // Do we need more parts than we are allowed? + if totalParts > maxPartsCount { + return ErrInvalidArgument(fmt.Sprintf( + "Your proposed compose object requires more than %d parts", maxPartsCount)) + } + } + + // Single source object case (i.e. when only one source is + // involved, it is being copied wholly and at most 5GiB in + // size). + if totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize { + h := srcs[0].Headers + // Add destination encryption headers + for k, v := range dst.encryption.getSSEHeaders(false) { + h.Set(k, v) + } + // Add user metadata headers from source object + metaMap := srcUserMeta + if dst.userMetadata != nil { + metaMap = dst.getUserMetaHeadersMap(true) + } + for k, v := range metaMap { + h.Set(k, v) + } + + // Send copy request + resp, err := c.executeMethod("PUT", requestMetadata{ + bucketName: dst.bucket, + objectName: dst.object, + customHeader: h, + }) + defer closeResponse(resp) + if err != nil { + return err + } + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, dst.bucket, dst.object) + } + + // Return nil on success. + return nil + } + + // Now, handle multipart-copy cases. + + // 1. Initiate a new multipart upload. + + // Set user-metadata on the destination object. + userMeta := dst.getUserMetaHeadersMap(false) + metaMap := srcUserMeta + if len(srcs) > 1 || userMeta != nil { + // cannot copy metadata if there is more than 1 source + metaMap = userMeta + } + metaHeaders := make(map[string][]string) + for k, v := range metaMap { + metaHeaders[k] = []string{v} + } + uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders) + if err != nil { + return fmt.Errorf("Error creating new upload: %v", err) + } + + // 2. Perform copy part uploads + objParts := []CompletePart{} + partIndex := 1 + for i, src := range srcs { + h := src.Headers + // Add destination encryption headers + for k, v := range dst.encryption.getSSEHeaders(false) { + h.Set(k, v) + } + + // calculate start/end indices of parts after + // splitting. + startIdx, endIdx := calculateEvenSplits(srcSizes[i], src) + for j, start := range startIdx { + end := endIdx[j] + + // Add (or reset) source range header for + // upload part copy request. + h.Set("x-amz-copy-source-range", + fmt.Sprintf("bytes=%d-%d", start, end)) + + // make upload-part-copy request + complPart, err := c.uploadPartCopy(dst.bucket, + dst.object, uploadID, partIndex, h) + if err != nil { + return fmt.Errorf("Error in upload-part-copy - %v", err) + } + objParts = append(objParts, complPart) + partIndex++ + } + } + + // 3. Make final complete-multipart request. + _, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID, + completeMultipartUpload{Parts: objParts}) + if err != nil { + err = fmt.Errorf("Error in complete-multipart request - %v", err) + } + return err +} + +// partsRequired is ceiling(size / copyPartSize) +func partsRequired(size int64) int64 { + r := size / copyPartSize + if size%copyPartSize > 0 { + r++ + } + return r +} + +// calculateEvenSplits - computes splits for a source and returns +// start and end index slices. Splits happen evenly to be sure that no +// part is less than 5MiB, as that could fail the multipart request if +// it is not the last part. +func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) { + if size == 0 { + return + } + + reqParts := partsRequired(size) + startIndex = make([]int64, reqParts) + endIndex = make([]int64, reqParts) + // Compute number of required parts `k`, as: + // + // k = ceiling(size / copyPartSize) + // + // Now, distribute the `size` bytes in the source into + // k parts as evenly as possible: + // + // r parts sized (q+1) bytes, and + // (k - r) parts sized q bytes, where + // + // size = q * k + r (by simple division of size by k, + // so that 0 <= r < k) + // + start := src.start + if start == -1 { + start = 0 + } + quot, rem := size/reqParts, size%reqParts + nextStart := start + for j := int64(0); j < reqParts; j++ { + curPartSize := quot + if j < rem { + curPartSize++ + } + + cStart := nextStart + cEnd := cStart + curPartSize - 1 + nextStart = cEnd + 1 + + startIndex[j], endIndex[j] = cStart, cEnd + } + return +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go index 213fc21f42..833f1fe8f1 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/api-put-object-common.go @@ -17,10 +17,8 @@ package minio import ( - "fmt" "hash" "io" - "io/ioutil" "math" "os" @@ -78,55 +76,6 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las return totalPartsCount, partSize, lastPartSize, nil } -// hashCopyBuffer is identical to hashCopyN except that it doesn't take -// any size argument but takes a buffer argument and reader should be -// of io.ReaderAt interface. -// -// Stages reads from offsets into the buffer, if buffer is nil it is -// initialized to optimalBufferSize. -func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) { - hashWriter := writer - for _, v := range hashAlgorithms { - hashWriter = io.MultiWriter(hashWriter, v) - } - - // Buffer is nil, initialize. - if buf == nil { - buf = make([]byte, optimalReadBufferSize) - } - - // Offset to start reading from. - var readAtOffset int64 - - // Following block reads data at an offset from the input - // reader and copies data to into local temporary file. - for { - readAtSize, rerr := reader.ReadAt(buf, readAtOffset) - if rerr != nil { - if rerr != io.EOF { - return 0, rerr - } - } - writeSize, werr := hashWriter.Write(buf[:readAtSize]) - if werr != nil { - return 0, werr - } - if readAtSize != writeSize { - return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue) - } - readAtOffset += int64(writeSize) - size += int64(writeSize) - if rerr == io.EOF { - break - } - } - - for k, v := range hashAlgorithms { - hashSums[k] = v.Sum(nil) - } - return size, err -} - // hashCopyN - Calculates chosen hashes up to partSize amount of bytes. func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) { hashWriter := writer @@ -167,27 +116,3 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][ } return initMultipartUploadResult.UploadID, nil } - -// computeHash - Calculates hashes for an input read Seeker. -func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) { - hashWriter := ioutil.Discard - for _, v := range hashAlgorithms { - hashWriter = io.MultiWriter(hashWriter, v) - } - - // If no buffer is provided, no need to allocate just use io.Copy. - size, err = io.Copy(hashWriter, reader) - if err != nil { - return 0, err - } - - // Seek back reader to the beginning location. - if _, err := reader.Seek(0, 0); err != nil { - return 0, err - } - - for k, v := range hashAlgorithms { - hashSums[k] = v.Sum(nil) - } - return size, nil -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go index d9e2f1b570..32fa873d87 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-copy.go +++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go @@ -16,57 +16,7 @@ package minio -import ( - "net/http" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// CopyObject - copy a source object into a new object with the provided name in the provided bucket -func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - if objectSource == "" { - return ErrInvalidArgument("Object source cannot be empty.") - } - - // customHeaders apply headers. - customHeaders := make(http.Header) - for _, cond := range cpCond.conditions { - customHeaders.Set(cond.key, cond.value) - } - - // Set copy source. - customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource)) - - // Execute PUT on objectName. - resp, err := c.executeMethod("PUT", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: customHeaders, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // Decode copy response on success. - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return err - } - - // Return nil on success. - return nil +// CopyObject - copy a source object into a new object +func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error { + return c.ComposeObject(dst, []SourceInfo{src}) } diff --git a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go new file mode 100644 index 0000000000..141b3e91cb --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go @@ -0,0 +1,46 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "io" + + "github.com/minio/minio-go/pkg/encrypt" +) + +// PutEncryptedObject - Encrypt and store object. +func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) { + + if encryptMaterials == nil { + return 0, ErrInvalidArgument("Unable to recognize empty encryption properties") + } + + if err := encryptMaterials.SetupEncryptMode(reader); err != nil { + return 0, err + } + + if metadata == nil { + metadata = make(map[string][]string) + } + + // Set the necessary encryption headers, for future decryption. + metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()} + metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()} + metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()} + + return c.putObjectMultipart(bucketName, objectName, encryptMaterials, -1, metadata, progress) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go index 0dc355ecf9..81cdf5c2c0 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-file.go +++ b/vendor/github.com/minio/minio-go/api-put-object-file.go @@ -17,13 +17,9 @@ package minio import ( - "fmt" - "io" - "io/ioutil" "mime" "os" "path/filepath" - "sort" "github.com/minio/minio-go/pkg/s3utils" ) @@ -55,11 +51,6 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) // Save the file size. fileSize := fileStat.Size() - // Check for largest object size allowed. - if fileSize > int64(maxMultipartPutObjectSize) { - return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName) - } - objMetadata := make(map[string][]string) // Set contentType based on filepath extension if not given or default @@ -71,195 +62,5 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) } objMetadata["Content-Type"] = []string{contentType} - - // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. - if s3utils.IsGoogleEndpoint(c.endpointURL) { - // Do not compute MD5 for Google Cloud Storage. - return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil) - } - - // Small object upload is initiated for uploads for input data size smaller than 5MiB. - if fileSize < minPartSize && fileSize >= 0 { - return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil) - } - - // Upload all large objects as multipart. - n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil) - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "NotImplemented" { - // If size of file is greater than '5GiB' fail. - if fileSize > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil) - } - return n, err - } - return n, nil -} - -// putObjectMultipartFromFile - Creates object from contents of *os.File -// -// NOTE: This function is meant to be used for readers with local -// file as in *os.File. This function effectively utilizes file -// system capabilities of reading from specific sections and not -// having to create temporary files. -func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(bucketName, objectName, metaData) - if err != nil { - return 0, err - } - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize) - if err != nil { - return 0, err - } - - // Create a channel to communicate a part was uploaded. - // Buffer this to 10000, the maximum number of parts allowed by S3. - uploadedPartsCh := make(chan uploadedPartRes, 10000) - - // Create a channel to communicate which part to upload. - // Buffer this to 10000, the maximum number of parts allowed by S3. - uploadPartsCh := make(chan uploadPartReq, 10000) - - // Just for readability. - lastPartNumber := totalPartsCount - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Send each part through the partUploadCh to be uploaded. - for p := 1; p <= totalPartsCount; p++ { - part, ok := partsInfo[p] - if ok { - uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part} - } else { - uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} - } - } - close(uploadPartsCh) - - // Use three 'workers' to upload parts in parallel. - for w := 1; w <= totalWorkers; w++ { - go func() { - // Deal with each part as it comes through the channel. - for uploadReq := range uploadPartsCh { - // Add hash algorithms that need to be calculated by computeHash() - // In case of a non-v4 signature or https connection, sha256 is not needed. - hashAlgos, hashSums := c.hashMaterials() - - // If partNumber was not uploaded we calculate the missing - // part offset and size. For all other part numbers we - // calculate offset based on multiples of partSize. - readOffset := int64(uploadReq.PartNum-1) * partSize - missingPartSize := partSize - - // As a special case if partNumber is lastPartNumber, we - // calculate the offset based on the last part size. - if uploadReq.PartNum == lastPartNumber { - readOffset = (fileSize - lastPartSize) - missingPartSize = lastPartSize - } - - // Get a section reader on a particular offset. - sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize) - var prtSize int64 - var err error - - prtSize, err = computeHash(hashAlgos, hashSums, sectionReader) - if err != nil { - uploadedPartsCh <- uploadedPartRes{ - Error: err, - } - // Exit the goroutine. - return - } - - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, - hashSums["md5"], hashSums["sha256"], prtSize) - if err != nil { - uploadedPartsCh <- uploadedPartRes{ - Error: err, - } - // Exit the goroutine. - return - } - - // Save successfully uploaded part metadata. - uploadReq.Part = &objPart - - // Return through the channel the part size. - uploadedPartsCh <- uploadedPartRes{ - Size: missingPartSize, - PartNum: uploadReq.PartNum, - Part: uploadReq.Part, - Error: nil, - } - } - }() - } - - // Retrieve each uploaded part once it is done. - for u := 1; u <= totalPartsCount; u++ { - uploadRes := <-uploadedPartsCh - if uploadRes.Error != nil { - return totalUploadedSize, uploadRes.Error - } - // Retrieve each uploaded part and store it to be completed. - part := uploadRes.Part - if part == nil { - return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) - } - // Update the total uploaded size. - totalUploadedSize += uploadRes.Size - // Update the progress bar if there is one. - if progress != nil { - if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil { - return totalUploadedSize, err - } - } - // Store the part to be completed. - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Verify if we uploaded all data. - if totalUploadedSize != fileSize { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) - if err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil + return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil) } diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go index 507fd65d68..a21cfe9017 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -24,7 +24,6 @@ import ( "io/ioutil" "net/http" "net/url" - "os" "sort" "strconv" "strings" @@ -32,135 +31,27 @@ import ( "github.com/minio/minio-go/pkg/s3utils" ) -// Comprehensive put object operation involving multipart uploads. -// -// Following code handles these types of readers. -// -// - *os.File -// - *minio.Object -// - Any reader which has a method 'ReadAt()' -// -func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { - if size > 0 && size > minPartSize { - // Verify if reader is *os.File, then use file system functionalities. - if isFile(reader) { - return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress) - } - // Verify if reader is *minio.Object or io.ReaderAt. - // NOTE: Verification of object is kept for a specific purpose - // while it is going to be duck typed similar to io.ReaderAt. - // It is to indicate that *minio.Object implements io.ReaderAt. - // and such a functionality is used in the subsequent code - // path. - if isObject(reader) || isReadAt(reader) { - return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress) - } - } - // For any other data size and reader type we do generic multipart - // approach by staging data in temporary files and uploading them. - return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress) -} - -// putObjectMultipartStreamNoChecksum - upload a large object using -// multipart upload and streaming signature for signing payload. -func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string, - reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) { - - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Initiates a new multipart request - uploadID, err := c.newUploadID(bucketName, objectName, metadata) - if err != nil { - return 0, err - } - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) +func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, + metadata map[string][]string, progress io.Reader) (n int64, err error) { + n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, size, metadata, progress) if err != nil { - return 0, err - } - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Part number always starts with '1'. - var partNumber int - for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { - // Update progress reader appropriately to the latest offset - // as we read from the source. - hookReader := newHook(reader, progress) - - // Proceed to upload the part. - if partNumber == totalPartsCount { - partSize = lastPartSize - } - - var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, - io.LimitReader(hookReader, partSize), partNumber, nil, nil, partSize) - // For unknown size, Read EOF we break away. - // We do not have to upload till totalPartsCount. - if err == io.EOF && size < 0 { - break - } - - if err != nil { - return totalUploadedSize, err - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += partSize - } - - // Verify if we uploaded all the data. - if size > 0 { - if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - } - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) - if err != nil { - return totalUploadedSize, err } - - // Return final size. - return totalUploadedSize, nil + return n, err } -// putObjectStream uploads files bigger than 64MiB, and also supports -// special case where size is unknown i.e '-1'. -func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, size int64, + metadata map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return 0, err @@ -169,14 +60,15 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i return 0, err } - // Total data read and written to server. should be equal to 'size' at the end of the call. + // Total data read and written to server. should be equal to + // 'size' at the end of the call. var totalUploadedSize int64 // Complete multipart upload. var complMultipartUpload completeMultipartUpload // Initiate a new multipart upload. - uploadID, err := c.newUploadID(bucketName, objectName, metaData) + uploadID, err := c.newUploadID(bucketName, objectName, metadata) if err != nil { return 0, err } @@ -197,8 +89,9 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i partsInfo := make(map[int]ObjectPart) for partNumber <= totalPartsCount { - // Choose hash algorithms to be calculated by hashCopyN, avoid sha256 - // with non-v4 signature request or HTTPS connection + // Choose hash algorithms to be calculated by hashCopyN, + // avoid sha256 with non-v4 signature request or + // HTTPS connection. hashAlgos, hashSums := c.hashMaterials() // Calculates hash sums while copying partSize bytes into tmpBuffer. @@ -214,7 +107,8 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // Proceed to upload the part. var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize) + objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, + hashSums["md5"], hashSums["sha256"], prtSize, metadata) if err != nil { // Reset the temporary buffer upon any error. tmpBuffer.Reset() @@ -224,13 +118,6 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // Save successfully uploaded part metadata. partsInfo[partNumber] = objPart - // Update the progress reader for the skipped part. - if progress != nil { - if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil { - return totalUploadedSize, err - } - } - // Reset the temporary buffer. tmpBuffer.Reset() @@ -269,8 +156,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) - if err != nil { + if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil { return totalUploadedSize, err } @@ -279,7 +165,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. -func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) { +func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err @@ -294,14 +180,14 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData // Set ContentType header. customHeader := make(http.Header) - for k, v := range metaData { + for k, v := range metadata { if len(v) > 0 { customHeader.Set(k, v[0]) } } // Set a default content-type header if the latter is not provided - if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 { + if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 { customHeader.Set("Content-Type", "application/octet-stream") } @@ -332,8 +218,11 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData return initiateMultipartUploadResult, nil } +const serverEncryptionKeyPrefix = "x-amz-server-side-encryption" + // uploadPart - Uploads a part in a multipart upload. -func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) { +func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, + partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectPart{}, err @@ -361,10 +250,21 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re // Set upload id. urlValues.Set("uploadId", uploadID) + // Set encryption headers, if any. + customHeader := make(http.Header) + for k, v := range metadata { + if len(v) > 0 { + if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) { + customHeader.Set(k, v[0]) + } + } + } + reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, + customHeader: customHeader, contentBody: reader, contentLength: size, contentMD5Bytes: md5Sum, @@ -393,7 +293,8 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re } // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. -func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) { +func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, + complete completeMultipartUpload) (completeMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return completeMultipartUploadResult{}, err diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go deleted file mode 100644 index fc4c40ad4a..0000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-progress.go +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "io" - "strings" - - "github.com/minio/minio-go/pkg/credentials" - "github.com/minio/minio-go/pkg/encrypt" - "github.com/minio/minio-go/pkg/s3utils" -) - -// PutObjectWithProgress - with progress. -func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) { - metaData := make(map[string][]string) - metaData["Content-Type"] = []string{contentType} - return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress) -} - -// PutEncryptedObject - Encrypt and store object. -func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metaData map[string][]string, progress io.Reader) (n int64, err error) { - - if encryptMaterials == nil { - return 0, ErrInvalidArgument("Unable to recognize empty encryption properties") - } - - if err := encryptMaterials.SetupEncryptMode(reader); err != nil { - return 0, err - } - - if metaData == nil { - metaData = make(map[string][]string) - } - - // Set the necessary encryption headers, for future decryption. - metaData[amzHeaderIV] = []string{encryptMaterials.GetIV()} - metaData[amzHeaderKey] = []string{encryptMaterials.GetKey()} - metaData[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()} - - return c.PutObjectWithMetadata(bucketName, objectName, encryptMaterials, metaData, progress) -} - -// PutObjectWithMetadata - with metadata. -func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - if reader == nil { - return 0, ErrInvalidArgument("Input reader is invalid, cannot be nil.") - } - - // Size of the object. - var size int64 - - // Get reader size. - size, err = getReaderSize(reader) - if err != nil { - return 0, err - } - - // Check for largest object size allowed. - if size > int64(maxMultipartPutObjectSize) { - return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) - } - - // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. - if s3utils.IsGoogleEndpoint(c.endpointURL) { - // Do not compute MD5 for Google Cloud Storage. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress) - } - - // putSmall object. - if size < minPartSize && size >= 0 { - return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress) - } - - // For all sizes greater than 5MiB do multipart. - n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress) - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { - // Verify if size of reader is greater than '5GiB'. - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress) - } - return n, err - } - return n, nil -} - -// PutObjectStreaming using AWS streaming signature V4 -func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) { - return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, nil, nil) -} - -// PutObjectStreamingWithMetadata using AWS streaming signature V4 -func (c Client) PutObjectStreamingWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string) (n int64, err error) { - return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, metadata, nil) -} - -// PutObjectStreamingWithProgress using AWS streaming signature V4 -func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { - // NOTE: Streaming signature is not supported by GCS. - if s3utils.IsGoogleEndpoint(c.endpointURL) { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: "AWS streaming signature v4 is not supported with Google Cloud Storage", - Key: objectName, - BucketName: bucketName, - } - } - - if c.overrideSignerType.IsV2() { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2", - Key: objectName, - BucketName: bucketName, - } - } - - // Size of the object. - var size int64 - - // Get reader size. - size, err = getReaderSize(reader) - if err != nil { - return 0, err - } - - // Check for largest object size allowed. - if size > int64(maxMultipartPutObjectSize) { - return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) - } - - // If size cannot be found on a stream, it is not possible - // to upload using streaming signature, fall back to multipart. - if size < 0 { - return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress) - } - - // Set streaming signature. - c.overrideSignerType = credentials.SignatureV4Streaming - - if size < minPartSize && size >= 0 { - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) - } - - // For all sizes greater than 64MiB do multipart. - n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress) - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { - // Verify if size of reader is greater than '5GiB'. - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) - } - return n, err - } - - return n, nil -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go deleted file mode 100644 index 1c20f1818c..0000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-readat.go +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "sort" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// uploadedPartRes - the response received from a part upload. -type uploadedPartRes struct { - Error error // Any error encountered while uploading the part. - PartNum int // Number of the part uploaded. - Size int64 // Size of the part uploaded. - Part *ObjectPart -} - -type uploadPartReq struct { - PartNum int // Number of the part uploaded. - Part *ObjectPart // Size of the part uploaded. -} - -// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader -// of type which implements io.ReaderAt interface (ReadAt method). -// -// NOTE: This function is meant to be used for all readers which -// implement io.ReaderAt which allows us for resuming multipart -// uploads but reading at an offset, which would avoid re-read the -// data which was already uploaded. Internally this function uses -// temporary files for staging all the data, these temporary files are -// cleaned automatically when the caller i.e http client closes the -// stream after uploading all the contents successfully. -func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(bucketName, objectName, metaData) - if err != nil { - return 0, err - } - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) - if err != nil { - return 0, err - } - - // Declare a channel that sends the next part number to be uploaded. - // Buffered to 10000 because thats the maximum number of parts allowed - // by S3. - uploadPartsCh := make(chan uploadPartReq, 10000) - - // Declare a channel that sends back the response of a part upload. - // Buffered to 10000 because thats the maximum number of parts allowed - // by S3. - uploadedPartsCh := make(chan uploadedPartRes, 10000) - - // Used for readability, lastPartNumber is always totalPartsCount. - lastPartNumber := totalPartsCount - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Send each part number to the channel to be processed. - for p := 1; p <= totalPartsCount; p++ { - part, ok := partsInfo[p] - if ok { - uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part} - } else { - uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} - } - } - close(uploadPartsCh) - - // Receive each part number from the channel allowing three parallel uploads. - for w := 1; w <= totalWorkers; w++ { - go func() { - // Read defaults to reading at 5MiB buffer. - readAtBuffer := make([]byte, optimalReadBufferSize) - - // Each worker will draw from the part channel and upload in parallel. - for uploadReq := range uploadPartsCh { - // Declare a new tmpBuffer. - tmpBuffer := new(bytes.Buffer) - - // If partNumber was not uploaded we calculate the missing - // part offset and size. For all other part numbers we - // calculate offset based on multiples of partSize. - readOffset := int64(uploadReq.PartNum-1) * partSize - missingPartSize := partSize - - // As a special case if partNumber is lastPartNumber, we - // calculate the offset based on the last part size. - if uploadReq.PartNum == lastPartNumber { - readOffset = (size - lastPartSize) - missingPartSize = lastPartSize - } - - // Get a section reader on a particular offset. - sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize) - - // Choose the needed hash algorithms to be calculated by hashCopyBuffer. - // Sha256 is avoided in non-v4 signature requests or HTTPS connections - hashAlgos, hashSums := c.hashMaterials() - - var prtSize int64 - var err error - prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer) - if err != nil { - // Send the error back through the channel. - uploadedPartsCh <- uploadedPartRes{ - Size: 0, - Error: err, - } - // Exit the goroutine. - return - } - - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, - uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize) - if err != nil { - uploadedPartsCh <- uploadedPartRes{ - Size: 0, - Error: err, - } - // Exit the goroutine. - return - } - - // Save successfully uploaded part metadata. - uploadReq.Part = &objPart - - // Send successful part info through the channel. - uploadedPartsCh <- uploadedPartRes{ - Size: missingPartSize, - PartNum: uploadReq.PartNum, - Part: uploadReq.Part, - Error: nil, - } - } - }() - } - - // Gather the responses as they occur and update any - // progress bar. - for u := 1; u <= totalPartsCount; u++ { - uploadRes := <-uploadedPartsCh - if uploadRes.Error != nil { - return totalUploadedSize, uploadRes.Error - } - // Retrieve each uploaded part and store it to be completed. - // part, ok := partsInfo[uploadRes.PartNum] - part := uploadRes.Part - if part == nil { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) - } - // Update the totalUploadedSize. - totalUploadedSize += uploadRes.Size - // Update the progress bar if there is one. - if progress != nil { - if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil { - return totalUploadedSize, err - } - } - // Store the parts to be completed in order. - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Verify if we uploaded all the data. - if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) - if err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go new file mode 100644 index 0000000000..997050cae9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go @@ -0,0 +1,416 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "io" + "net/http" + "sort" + "strings" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// PutObjectStreaming using AWS streaming signature V4 +func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) { + return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil) +} + +// putObjectMultipartStream - upload a large object using +// multipart upload and streaming signature for signing payload. +// Comprehensive put object operation involving multipart uploads. +// +// Following code handles these types of readers. +// +// - *os.File +// - *minio.Object +// - Any reader which has a method 'ReadAt()' +// +func (c Client) putObjectMultipartStream(bucketName, objectName string, + reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { + + // Verify if reader is *minio.Object, *os.File or io.ReaderAt. + // NOTE: Verification of object is kept for a specific purpose + // while it is going to be duck typed similar to io.ReaderAt. + // It is to indicate that *minio.Object implements io.ReaderAt. + // and such a functionality is used in the subsequent code path. + if isFile(reader) || !isObject(reader) && isReadAt(reader) { + n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress) + } else { + n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress) + } + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + } + } + return n, err +} + +// uploadedPartRes - the response received from a part upload. +type uploadedPartRes struct { + Error error // Any error encountered while uploading the part. + PartNum int // Number of the part uploaded. + Size int64 // Size of the part uploaded. + Part *ObjectPart +} + +type uploadPartReq struct { + PartNum int // Number of the part uploaded. + Part *ObjectPart // Size of the part uploaded. +} + +// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB. +// Supports all readers which implements io.ReaderAt interface +// (ReadAt method). +// +// NOTE: This function is meant to be used for all readers which +// implement io.ReaderAt which allows us for resuming multipart +// uploads but reading at an offset, which would avoid re-read the +// data which was already uploaded. Internally this function uses +// temporary files for staging all the data, these temporary files are +// cleaned automatically when the caller i.e http client closes the +// stream after uploading all the contents successfully. +func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string, + reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (int64, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(bucketName, objectName, metadata) + if err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) + if err != nil { + return 0, err + } + + // Declare a channel that sends the next part number to be uploaded. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadPartsCh := make(chan uploadPartReq, 10000) + + // Declare a channel that sends back the response of a part upload. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadedPartsCh := make(chan uploadedPartRes, 10000) + + // Used for readability, lastPartNumber is always totalPartsCount. + lastPartNumber := totalPartsCount + + // Send each part number to the channel to be processed. + for p := 1; p <= totalPartsCount; p++ { + uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} + } + close(uploadPartsCh) + + // Receive each part number from the channel allowing three parallel uploads. + for w := 1; w <= totalWorkers; w++ { + go func() { + // Each worker will draw from the part channel and upload in parallel. + for uploadReq := range uploadPartsCh { + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = (size - lastPartSize) + partSize = lastPartSize + } + + // Get a section reader on a particular offset. + sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress) + + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, + sectionReader, uploadReq.PartNum, + nil, nil, partSize, metadata) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Size: 0, + Error: err, + } + // Exit the goroutine. + return + } + + // Save successfully uploaded part metadata. + uploadReq.Part = &objPart + + // Send successful part info through the channel. + uploadedPartsCh <- uploadedPartRes{ + Size: objPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + Error: nil, + } + } + }() + } + + // Gather the responses as they occur and update any + // progress bar. + for u := 1; u <= totalPartsCount; u++ { + uploadRes := <-uploadedPartsCh + if uploadRes.Error != nil { + return totalUploadedSize, uploadRes.Error + } + // Retrieve each uploaded part and store it to be completed. + // part, ok := partsInfo[uploadRes.PartNum] + part := uploadRes.Part + if part == nil { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) + } + // Update the totalUploadedSize. + totalUploadedSize += uploadRes.Size + // Store the parts to be completed in order. + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Verify if we uploaded all the data. + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string, + reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Initiates a new multipart request + uploadID, err := c.newUploadID(bucketName, objectName, metadata) + if err != nil { + return 0, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) + if err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader := newHook(reader, progress) + + // Proceed to upload the part. + if partNumber == totalPartsCount { + partSize = lastPartSize + } + + var objPart ObjectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, + io.LimitReader(hookReader, partSize), + partNumber, nil, nil, partSize, metadata) + if err != nil { + return totalUploadedSize, err + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += partSize + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +// putObjectNoChecksum special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Size -1 is only supported on Google Cloud Storage, we error + // out in all other situations. + if size < 0 && !s3utils.IsGoogleEndpoint(c.endpointURL) { + return 0, ErrEntityTooSmall(size, bucketName, objectName) + } + if size > 0 { + if isReadAt(reader) && !isObject(reader) { + reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size) + } + } + + // Update progress reader appropriately to the latest offset as we + // read from the source. + readSeeker := newHook(reader, progress) + + // This function does not calculate sha256 and md5sum for payload. + // Execute put object. + st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData) + if err != nil { + return 0, err + } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } + return size, nil +} + +// putObjectDo - executes the put object http operation. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + + // Set headers. + customHeader := make(http.Header) + + // Set metadata to headers + for k, v := range metaData { + if len(v) > 0 { + customHeader.Set(k, v[0]) + } + } + + // If Content-Type is not provided, set the default application/octet-stream one + if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 { + customHeader.Set("Content-Type", "application/octet-stream") + } + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Bytes: md5Sum, + contentSHA256Bytes: sha256Sum, + } + + // Execute PUT an objectName. + resp, err := c.executeMethod("PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + var objInfo ObjectInfo + // Trim off the odd double quotes from ETag in the beginning and end. + objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"") + // A success here means data was written to server successfully. + objInfo.Size = size + + // Return here. + return objInfo, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go index fbcfb171e9..2ea4987895 100644 --- a/vendor/github.com/minio/minio-go/api-put-object.go +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -18,13 +18,12 @@ package minio import ( "io" - "io/ioutil" - "net/http" "os" "reflect" "runtime" "strings" + "github.com/minio/minio-go/pkg/credentials" "github.com/minio/minio-go/pkg/s3utils" ) @@ -143,164 +142,77 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // // You must have WRITE permissions on a bucket to create an object. // -// - For size smaller than 64MiB PutObject automatically does a single atomic Put operation. -// - For size larger than 64MiB PutObject automatically does a multipart Put operation. -// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF. -// Maximum object size that can be uploaded through this operation will be 5TiB. -// -// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. -// So we fall back to single PUT operation with the maximum limit of 5GiB. +// - For size smaller than 64MiB PutObject automatically does a +// single atomic Put operation. +// - For size larger than 64MiB PutObject automatically does a +// multipart Put operation. +// - For size input as -1 PutObject does a multipart Put operation +// until input stream reaches EOF. Maximum object size that can +// be uploaded through this operation will be 5TiB. func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) { - return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil) + return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{ + "Content-Type": []string{contentType}, + }, nil) } -// putObjectNoChecksum special function used Google Cloud Storage. This special function -// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - if size > 0 { - readerAt, ok := reader.(io.ReaderAt) - if ok { - reader = io.NewSectionReader(readerAt, 0, size) - } - } - - // Update progress reader appropriately to the latest offset as we - // read from the source. - readSeeker := newHook(reader, progress) - - // This function does not calculate sha256 and md5sum for payload. - // Execute put object. - st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData) - if err != nil { - return 0, err - } - if st.Size != size { - return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) - } - return size, nil +// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject() +// but takes the size argument explicitly, this function avoids doing reflection +// internally to figure out the size of input stream. Also if the input size is +// lesser than 0 this function returns an error. +func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { + return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress) } -// putObjectSingle is a special function for uploading single put object request. -// This special function is used as a fallback when multipart upload fails. -func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // If size is a stream, upload up to 5GiB. - if size <= -1 { - size = maxSinglePutObjectSize - } - - // Add the appropriate hash algorithms that need to be calculated by hashCopyN - // In case of non-v4 signature request or HTTPS connection, sha256 is not needed. - hashAlgos, hashSums := c.hashMaterials() - - // Initialize a new temporary file. - tmpFile, err := newTempFile("single$-putobject-single") - if err != nil { - return 0, err - } - defer tmpFile.Close() - - size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size) - // Return error if its not io.EOF. - if err != nil && err != io.EOF { - return 0, err - } +// PutObjectWithMetadata using AWS streaming signature V4 +func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { + return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress) +} - // Seek back to beginning of the temporary file. - if _, err = tmpFile.Seek(0, 0); err != nil { - return 0, err - } - reader = tmpFile +// PutObjectWithProgress using AWS streaming signature V4 +func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { + // Size of the object. + var size int64 - // Execute put object. - st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData) + // Get reader size. + size, err = getReaderSize(reader) if err != nil { return 0, err } - if st.Size != size { - return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) - } - // Progress the reader to the size if putObjectDo is successful. - if progress != nil { - if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil { - return size, err - } - } - return size, nil + return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress) } -// putObjectDo - executes the put object http operation. -// NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err +func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) { + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) } - // Set headers. - customHeader := make(http.Header) + // NOTE: Streaming signature is not supported by GCS. + if s3utils.IsGoogleEndpoint(c.endpointURL) { + // Do not compute MD5 for Google Cloud Storage. + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) + } - // Set metadata to headers - for k, v := range metaData { - if len(v) > 0 { - customHeader.Set(k, v[0]) + if c.overrideSignerType.IsV2() { + if size > 0 && size < minPartSize { + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) } + return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress) } - // If Content-Type is not provided, set the default application/octet-stream one - if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 { - customHeader.Set("Content-Type", "application/octet-stream") + // If size cannot be found on a stream, it is not possible + // to upload using streaming signature, fall back to multipart. + if size < 0 { + return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress) } - // Populate request metadata. - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Bytes: md5Sum, - contentSHA256Bytes: sha256Sum, - } + // Set streaming signature. + c.overrideSignerType = credentials.SignatureV4Streaming - // Execute PUT an objectName. - resp, err := c.executeMethod("PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err + if size < minPartSize { + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - var objInfo ObjectInfo - // Trim off the odd double quotes from ETag in the beginning and end. - objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"") - // A success here means data was written to server successfully. - objInfo.Size = size - // Return here. - return objInfo, nil + // For all sizes greater than 64MiB do multipart. + return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress) } diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index 39a3643033..3020e5b9d1 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -616,17 +616,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R return nil, err } - // Go net/http notoriously closes the request body. - // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. - // This can cause underlying *os.File seekers to fail, avoid that - // by making sure to wrap the closer as a nop. - var body io.ReadCloser - if metadata.contentBody != nil { - body = ioutil.NopCloser(metadata.contentBody) - } - // Initialize a new HTTP request for the method. - req, err = http.NewRequest(method, targetURL.String(), body) + req, err = http.NewRequest(method, targetURL.String(), nil) if err != nil { return nil, err } @@ -678,6 +669,16 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R req.Header.Set(k, v[0]) } + // Go net/http notoriously closes the request body. + // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. + // This can cause underlying *os.File seekers to fail, avoid that + // by making sure to wrap the closer as a nop. + if metadata.contentLength == 0 { + req.Body = nil + } else { + req.Body = ioutil.NopCloser(metadata.contentBody) + } + // Set incoming content-length. req.ContentLength = metadata.contentLength if req.ContentLength <= -1 { diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml new file mode 100644 index 0000000000..4f5c1b3907 --- /dev/null +++ b/vendor/github.com/minio/minio-go/appveyor.yml @@ -0,0 +1,39 @@ +# version format +version: "{build}" + +# Operating system (build VM template) +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\minio\minio-go + +# environment variables +environment: + GOPATH: c:\gopath + GO15VENDOREXPERIMENT: 1 + +# scripts that run after cloning repository +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get -u github.com/golang/lint/golint + - go get -u github.com/go-ini/ini + - go get -u github.com/minio/go-homedir + - go get -u github.com/remyoudompheng/go-misc/deadcode + - go get -u github.com/gordonklaus/ineffassign + +# to run your custom scripts instead of automatic MSBuild +build_script: + - go vet ./... + - gofmt -s -l . + - golint -set_exit_status github.com/minio/minio-go... + - deadcode + - ineffassign . + - go test -short -v + - go test -short -race -v + +# to disable automatic tests +test: off + +# to disable deployment +deploy: off diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go index 6055bfdad6..9771d2f929 100644 --- a/vendor/github.com/minio/minio-go/constants.go +++ b/vendor/github.com/minio/minio-go/constants.go @@ -18,10 +18,18 @@ package minio /// Multipart upload defaults. -// miniPartSize - minimum part size 64MiB per object after which +// absMinPartSize - absolute minimum part size (5 MiB) below which +// a part in a multipart upload may not be uploaded. +const absMinPartSize = 1024 * 1024 * 5 + +// minPartSize - minimum part size 64MiB per object after which // putObject behaves internally as multipart. const minPartSize = 1024 * 1024 * 64 +// copyPartSize - default (and maximum) part size to copy in a +// copy-object request (5GiB) +const copyPartSize = 1024 * 1024 * 1024 * 5 + // maxPartsCount - maximum number of parts for a single multipart session. const maxPartsCount = 10000 @@ -37,10 +45,6 @@ const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 // Multipart operation. const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 -// optimalReadBufferSize - optimal buffer 5MiB used for reading -// through Read operation. -const optimalReadBufferSize = 1024 * 1024 * 5 - // unsignedPayload - value to be set to X-Amz-Content-Sha256 header when // we don't want to sign the request payload const unsignedPayload = "UNSIGNED-PAYLOAD" diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go index be9388cecf..4b1054a696 100644 --- a/vendor/github.com/minio/minio-go/core.go +++ b/vendor/github.com/minio/minio-go/core.go @@ -70,7 +70,13 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de // PutObjectPart - Upload an object part. func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) { - return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size) + return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, size, data, md5Sum, sha256Sum, nil) +} + +// PutObjectPartWithMetadata - upload an object part with additional request metadata. +func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, + size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectPart, error) { + return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata) } // ListObjectParts - List uploaded parts of an incomplete upload.x @@ -80,7 +86,9 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker // CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error { - _, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{Parts: parts}) + _, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{ + Parts: parts, + }) return err } diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go index cbb889d8d3..b2d46e1786 100644 --- a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go @@ -583,7 +583,7 @@ func GetPolicies(statements []Statement, bucketName string) map[string]BucketPol r = r[:len(r)-1] asterisk = "*" } - objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)] + objectPath := r[len(awsResourcePrefix+bucketName)+1:] p := GetPolicy(statements, bucketName, objectPath) policyRules[bucketName+"/"+objectPath+asterisk] = p } diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go index c2f0baee62..22059bb1d4 100644 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go @@ -21,6 +21,7 @@ import ( "encoding/hex" "fmt" "io" + "io/ioutil" "net/http" "strconv" "strings" @@ -205,6 +206,10 @@ func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionTok // Set headers needed for streaming signature. prepareStreamingRequest(req, sessionToken, dataLen, reqTime) + if req.Body == nil { + req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + } + stReader := &StreamingReader{ baseReadCloser: req.Body, accessKeyID: accessKeyID, @@ -249,7 +254,18 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { s.chunkBufLen = 0 for { n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) - if err == nil || err == io.ErrUnexpectedEOF { + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { s.chunkBufLen += n1 s.bytesRead += int64(n1) @@ -260,25 +276,26 @@ func (s *StreamingReader) Read(buf []byte) (int, error) { s.signChunk(s.chunkBufLen) break } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, io.ErrUnexpectedEOF + } - } else if err == io.EOF { - // No more data left in baseReader - last chunk. - // Done reading the last chunk from baseReader. - s.done = true - - // bytes read from baseReader different than - // content length provided. - if s.bytesRead != s.contentLen { - return 0, io.ErrUnexpectedEOF + // Sign the chunk and write it to s.buf. + s.signChunk(0) + break } - - // Sign the chunk and write it to s.buf. - s.signChunk(0) - break - - } else { return 0, err } + } } return s.buf.Read(buf) diff --git a/vendor/github.com/minio/minio-go/tempfile.go b/vendor/github.com/minio/minio-go/tempfile.go deleted file mode 100644 index 65c7b0da16..0000000000 --- a/vendor/github.com/minio/minio-go/tempfile.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "io/ioutil" - "os" - "sync" -) - -// tempFile - temporary file container. -type tempFile struct { - *os.File - mutex *sync.Mutex -} - -// newTempFile returns a new temporary file, once closed it automatically deletes itself. -func newTempFile(prefix string) (*tempFile, error) { - // use platform specific temp directory. - file, err := ioutil.TempFile(os.TempDir(), prefix) - if err != nil { - return nil, err - } - return &tempFile{ - File: file, - mutex: &sync.Mutex{}, - }, nil -} - -// Close - closer wrapper to close and remove temporary file. -func (t *tempFile) Close() error { - t.mutex.Lock() - defer t.mutex.Unlock() - if t.File != nil { - // Close the file. - if err := t.File.Close(); err != nil { - return err - } - // Remove file. - if err := os.Remove(t.File.Name()); err != nil { - return err - } - t.File = nil - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go index d7f0181e8e..6f54639e0c 100644 --- a/vendor/github.com/minio/minio-go/utils.go +++ b/vendor/github.com/minio/minio-go/utils.go @@ -122,7 +122,7 @@ func isValidEndpointURL(endpointURL url.URL) error { if endpointURL.Path != "/" && endpointURL.Path != "" { return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") } - if strings.Contains(endpointURL.Host, ".amazonaws.com") { + if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { if !s3utils.IsAmazonEndpoint(endpointURL) { return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") } diff --git a/vendor/vendor.json b/vendor/vendor.json index 8ce9f0a9e5..138353be3c 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -56,46 +56,46 @@ "revisionTime": "2015-10-24T22:24:27-07:00" }, { - "checksumSHA1": "ZqQ3+2eNkE5vKb1TZkxXqL5Tb2U=", + "checksumSHA1": "XwBV3zPzRIeWz6ItdEh3+H1r1Rc=", "path": "github.com/minio/minio-go", - "revision": "fdad76bdf4d11808a602d8cb3500d4bc53d21599", - "revisionTime": "2017-06-24T05:43:48Z" + "revision": "fb54c1def6b1376e9bc99dfcee115415d5cf6336", + "revisionTime": "2017-07-04T21:12:16Z" }, { - "checksumSHA1": "qPt3l9ZUjG0J4oZNTSrKy17rts0=", + "checksumSHA1": "5juljGXPkBWENR2Os7dlnPQER48=", "path": "github.com/minio/minio-go/pkg/credentials", - "revision": "fdad76bdf4d11808a602d8cb3500d4bc53d21599", - "revisionTime": "2017-06-24T05:43:48Z" + "revision": "fb54c1def6b1376e9bc99dfcee115415d5cf6336", + "revisionTime": "2017-07-04T21:12:16Z" }, { "checksumSHA1": "pggIpSePizRBQ7ybhB0CuaSQydw=", "path": "github.com/minio/minio-go/pkg/encrypt", - "revision": "fdad76bdf4d11808a602d8cb3500d4bc53d21599", - "revisionTime": "2017-06-24T05:43:48Z" + "revision": "fb54c1def6b1376e9bc99dfcee115415d5cf6336", + "revisionTime": "2017-07-04T21:12:16Z" }, { - "checksumSHA1": "neH34/65OXeKHM/MlV8MbhcdFBc=", + "checksumSHA1": "3tl2ehmod/EzXE9o9WJ5HM2AQPE=", "path": "github.com/minio/minio-go/pkg/policy", - "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", - "revisionTime": "2017-04-26T18:23:05Z" + "revision": "fb54c1def6b1376e9bc99dfcee115415d5cf6336", + "revisionTime": "2017-07-04T21:12:16Z" }, { - "checksumSHA1": "KUD7ZG+upbJZaHPyFEabaDHNGKA=", + "checksumSHA1": "b0SZRgEYgQsl6zQ/npYHQOQ9tNw=", "path": "github.com/minio/minio-go/pkg/s3signer", - "revision": "fdad76bdf4d11808a602d8cb3500d4bc53d21599", - "revisionTime": "2017-06-24T05:43:48Z" + "revision": "fb54c1def6b1376e9bc99dfcee115415d5cf6336", + "revisionTime": "2017-07-04T21:12:16Z" }, { - "checksumSHA1": "HI3i+Ij38JHPRcNs7KyUdGTBOhE=", + "checksumSHA1": "XTEUN/pAWAusSXT3yn6UznCl3iA=", "path": "github.com/minio/minio-go/pkg/s3utils", - "revision": "fdad76bdf4d11808a602d8cb3500d4bc53d21599", - "revisionTime": "2017-06-24T05:43:48Z" + "revision": "fb54c1def6b1376e9bc99dfcee115415d5cf6336", + "revisionTime": "2017-07-04T21:12:16Z" }, { "checksumSHA1": "maUy+dbN6VfTTnfErrAW2lLit1w=", "path": "github.com/minio/minio-go/pkg/set", - "revision": "5297a818b482fa329b3dc1a3926e3c4c6fb5d459", - "revisionTime": "2017-04-26T18:23:05Z" + "revision": "fb54c1def6b1376e9bc99dfcee115415d5cf6336", + "revisionTime": "2017-07-04T21:12:16Z" }, { "checksumSHA1": "Z00ntXYcYqK9CjXD+tVgGwIZL1o=",