diff --git a/api-put-object-progress.go b/api-put-object-progress.go index e5b24ad2a1..b61950d934 100644 --- a/api-put-object-progress.go +++ b/api-put-object-progress.go @@ -17,6 +17,7 @@ package minio import ( + "fmt" "io" "strings" @@ -85,17 +86,14 @@ func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.R // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. // So we fall back to single PUT operation with the maximum limit of 5GiB. if s3utils.IsGoogleEndpoint(c.endpointURL) { - if size <= -1 { + if size > maxSinglePutObjectSize { return 0, ErrorResponse{ Code: "NotImplemented", - Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.", + Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", size), Key: objectName, BucketName: bucketName, } } - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress) } diff --git a/api-put-object.go b/api-put-object.go index e61f305cb2..d371168907 100644 --- a/api-put-object.go +++ b/api-put-object.go @@ -170,6 +170,30 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } + if size <= -1 { + // Initialize a new temporary file. + var tmpFile *tempFile + tmpFile, err = newTempFile("single$-putobject-single") + if err != nil { + return 0, err + } + defer tmpFile.Close() + size, err = io.Copy(tmpFile, io.LimitReader(reader, maxSinglePutObjectSize)) + if err != nil { + return 0, err + } + // Seek back to beginning of the temporary file. + if _, err = tmpFile.Seek(0, 0); err != nil { + return 0, err + } + reader = tmpFile + } else { + readerAt, ok := reader.(io.ReaderAt) + if ok { + reader = io.NewSectionReader(readerAt, 0, size) + } + } + // Update progress reader appropriately to the latest offset as we // read from the source. readSeeker := newHook(reader, progress)