diff --git a/api-bucket-notification.go b/api-bucket-notification.go index 1e6f3da1c..dc37b0c07 100644 --- a/api-bucket-notification.go +++ b/api-bucket-notification.go @@ -103,7 +103,6 @@ func (c *Client) getBucketNotification(ctx context.Context, bucketName string) ( return notification.Configuration{}, err } return processBucketNotificationResponse(bucketName, resp) - } // processes the GetNotification http response from the server. @@ -207,7 +206,7 @@ func (c *Client) ListenBucketNotification(ctx context.Context, bucketName, prefi // Use a higher buffer to support unexpected // caching done by proxies bio.Buffer(notificationEventBuffer, notificationCapacity) - var json = jsoniter.ConfigCompatibleWithStandardLibrary + json := jsoniter.ConfigCompatibleWithStandardLibrary // Unmarshal each line, returns marshaled values. for bio.Scan() { diff --git a/api-compose-object.go b/api-compose-object.go index f349f99ed..b59924a3d 100644 --- a/api-compose-object.go +++ b/api-compose-object.go @@ -202,8 +202,8 @@ func (opts CopySrcOptions) validate() (err error) { // Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, - metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { - + metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions, +) (ObjectInfo, error) { // Build headers. headers := make(http.Header) @@ -285,8 +285,8 @@ func (c *Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBuc } func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { - + partID int, startOffset int64, length int64, metadata map[string]string, +) (p CompletePart, err error) { headers := make(http.Header) // Set source @@ -338,8 +338,8 @@ func (c *Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, des // upload via an upload-part-copy request // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html func (c *Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, - headers http.Header) (p CompletePart, err error) { - + headers http.Header, +) (p CompletePart, err error) { // Build query parameters urlValues := make(url.Values) urlValues.Set("partNumber", strconv.Itoa(partNumber)) @@ -492,7 +492,7 @@ func (c *Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs .. objParts := []CompletePart{} partIndex := 1 for i, src := range srcs { - var h = make(http.Header) + h := make(http.Header) src.Marshal(h) if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { dst.Encryption.Marshal(h) diff --git a/api-compose-object_test.go b/api-compose-object_test.go index fbf8f66a7..f573057b7 100644 --- a/api-compose-object_test.go +++ b/api-compose-object_test.go @@ -55,7 +55,6 @@ func TestPartsRequired(t *testing.T) { } func TestCalculateEvenSplits(t *testing.T) { - testCases := []struct { // input size and source object size int64 @@ -69,47 +68,77 @@ func TestCalculateEvenSplits(t *testing.T) { {1, CopySrcOptions{Start: 0}, []int64{0}, []int64{0}}, {gb1, CopySrcOptions{Start: -1}, []int64{0, 536870912}, []int64{536870911, 1073741823}}, - {gb5, CopySrcOptions{Start: -1}, - []int64{0, 536870912, 1073741824, 1610612736, 2147483648, 2684354560, - 3221225472, 3758096384, 4294967296, 4831838208}, - []int64{536870911, 1073741823, 1610612735, 2147483647, 2684354559, 3221225471, - 3758096383, 4294967295, 4831838207, 5368709119}, + { + gb5, + CopySrcOptions{Start: -1}, + []int64{ + 0, 536870912, 1073741824, 1610612736, 2147483648, 2684354560, + 3221225472, 3758096384, 4294967296, 4831838208, + }, + []int64{ + 536870911, 1073741823, 1610612735, 2147483647, 2684354559, 3221225471, + 3758096383, 4294967295, 4831838207, 5368709119, + }, }, // 2 part splits - {gb5p1, CopySrcOptions{Start: -1}, - []int64{0, 536870913, 1073741825, 1610612737, 2147483649, 2684354561, - 3221225473, 3758096385, 4294967297, 4831838209}, - []int64{536870912, 1073741824, 1610612736, 2147483648, 2684354560, 3221225472, - 3758096384, 4294967296, 4831838208, 5368709120}, + { + gb5p1, + CopySrcOptions{Start: -1}, + []int64{ + 0, 536870913, 1073741825, 1610612737, 2147483649, 2684354561, + 3221225473, 3758096385, 4294967297, 4831838209, + }, + []int64{ + 536870912, 1073741824, 1610612736, 2147483648, 2684354560, 3221225472, + 3758096384, 4294967296, 4831838208, 5368709120, + }, }, - {gb5p1, CopySrcOptions{Start: -1}, - []int64{0, 536870913, 1073741825, 1610612737, 2147483649, 2684354561, - 3221225473, 3758096385, 4294967297, 4831838209}, - []int64{536870912, 1073741824, 1610612736, 2147483648, 2684354560, 3221225472, - 3758096384, 4294967296, 4831838208, 5368709120}, + { + gb5p1, + CopySrcOptions{Start: -1}, + []int64{ + 0, 536870913, 1073741825, 1610612737, 2147483649, 2684354561, + 3221225473, 3758096385, 4294967297, 4831838209, + }, + []int64{ + 536870912, 1073741824, 1610612736, 2147483648, 2684354560, 3221225472, + 3758096384, 4294967296, 4831838208, 5368709120, + }, }, // 3 part splits - {gb10p1, CopySrcOptions{Start: -1}, - []int64{0, 536870913, 1073741825, 1610612737, 2147483649, 2684354561, + { + gb10p1, + CopySrcOptions{Start: -1}, + []int64{ + 0, 536870913, 1073741825, 1610612737, 2147483649, 2684354561, 3221225473, 3758096385, 4294967297, 4831838209, 5368709121, 5905580033, 6442450945, 6979321857, 7516192769, 8053063681, - 8589934593, 9126805505, 9663676417, 10200547329}, - []int64{536870912, 1073741824, 1610612736, 2147483648, 2684354560, + 8589934593, 9126805505, 9663676417, 10200547329, + }, + []int64{ + 536870912, 1073741824, 1610612736, 2147483648, 2684354560, 3221225472, 3758096384, 4294967296, 4831838208, 5368709120, 5905580032, 6442450944, 6979321856, 7516192768, 8053063680, - 8589934592, 9126805504, 9663676416, 10200547328, 10737418240}, + 8589934592, 9126805504, 9663676416, 10200547328, 10737418240, + }, }, - {gb10p2, CopySrcOptions{Start: -1}, - []int64{0, 536870913, 1073741826, 1610612738, 2147483650, 2684354562, + { + gb10p2, + CopySrcOptions{Start: -1}, + []int64{ + 0, 536870913, 1073741826, 1610612738, 2147483650, 2684354562, 3221225474, 3758096386, 4294967298, 4831838210, 5368709122, 5905580034, 6442450946, 6979321858, 7516192770, 8053063682, - 8589934594, 9126805506, 9663676418, 10200547330}, - []int64{536870912, 1073741825, 1610612737, 2147483649, 2684354561, + 8589934594, 9126805506, 9663676418, 10200547330, + }, + []int64{ + 536870912, 1073741825, 1610612737, 2147483649, 2684354561, 3221225473, 3758096385, 4294967297, 4831838209, 5368709121, 5905580033, 6442450945, 6979321857, 7516192769, 8053063681, - 8589934593, 9126805505, 9663676417, 10200547329, 10737418241}, + 8589934593, 9126805505, 9663676417, 10200547329, 10737418241, + }, }, } diff --git a/api-error-response_test.go b/api-error-response_test.go index 687b1df49..57905fc00 100644 --- a/api-error-response_test.go +++ b/api-error-response_test.go @@ -174,7 +174,6 @@ func TestHttpRespToErrorResponse(t *testing.T) { // expected results. expectedResult error // flag indicating whether tests should pass. - }{ {"minio-bucket", "", inputResponses[0], expectedErrResponse[0]}, {"minio-bucket", "", inputResponses[1], expectedErrResponse[1]}, diff --git a/api-get-object-file.go b/api-get-object-file.go index 98f5acf6e..2332dbf10 100644 --- a/api-get-object-file.go +++ b/api-get-object-file.go @@ -57,7 +57,7 @@ func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePat objectDir, _ := filepath.Split(filePath) if objectDir != "" { // Create any missing top level directories. - if err := os.MkdirAll(objectDir, 0700); err != nil { + if err := os.MkdirAll(objectDir, 0o700); err != nil { return err } } @@ -72,7 +72,7 @@ func (c *Client) FGetObject(ctx context.Context, bucketName, objectName, filePat filePartPath := filePath + objectStat.ETag + ".part.minio" // If exists, open in append mode. If not create it as a part file. - filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o600) if err != nil { return err } diff --git a/api-list.go b/api-list.go index 85209b5d2..9b2b00ae3 100644 --- a/api-list.go +++ b/api-list.go @@ -774,7 +774,6 @@ func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPr }(objectMultipartStatCh) // return. return objectMultipartStatCh - } // listMultipartUploadsQuery - (List Multipart Uploads). diff --git a/api-put-object-multipart.go b/api-put-object-multipart.go index 333321aa3..342a8dc2b 100644 --- a/api-put-object-multipart.go +++ b/api-put-object-multipart.go @@ -38,7 +38,8 @@ import ( ) func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, - opts PutObjectOptions) (info UploadInfo, err error) { + opts PutObjectOptions, +) (info UploadInfo, err error) { info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { errResp := ToErrorResponse(err) @@ -240,7 +241,8 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, object // uploadPart - Uploads a part in a multipart upload. func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { + partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide, +) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ObjectPart{}, err @@ -311,7 +313,8 @@ func (c *Client) uploadPart(ctx context.Context, bucketName, objectName, uploadI // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, - complete completeMultipartUpload, opts PutObjectOptions) (UploadInfo, error) { + complete completeMultipartUpload, opts PutObjectOptions, +) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -392,5 +395,4 @@ func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, object Expiration: expTime, ExpirationRuleID: ruleID, }, nil - } diff --git a/api-put-object-streaming.go b/api-put-object-streaming.go index 8ca81edef..2497aecf3 100644 --- a/api-put-object-streaming.go +++ b/api-put-object-streaming.go @@ -42,8 +42,8 @@ import ( // - Any reader which has a method 'ReadAt()' // func (c *Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) @@ -91,7 +91,8 @@ type uploadPartReq struct { // cleaned automatically when the caller i.e http client closes the // stream after uploading all the contents successfully. func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, - reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + reader io.ReaderAt, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err @@ -147,7 +148,7 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN } close(uploadPartsCh) - var partsBuf = make([][]byte, opts.getNumThreads()) + partsBuf := make([][]byte, opts.getNumThreads()) for i := range partsBuf { partsBuf[i] = make([]byte, 0, partSize) } @@ -241,7 +242,8 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN } func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { + reader io.Reader, size int64, opts PutObjectOptions, +) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err diff --git a/api-put-object.go b/api-put-object.go index e8a964e2a..0dc77e6c3 100644 --- a/api-put-object.go +++ b/api-put-object.go @@ -229,7 +229,8 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // // NOTE: Upon errors during upload multipart operation is entirely aborted. func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions) (info UploadInfo, err error) { + opts PutObjectOptions, +) (info UploadInfo, err error) { if objectSize < 0 && opts.DisableMultipart { return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") } diff --git a/api-putobject-snowball.go b/api-putobject-snowball.go index f9feda30f..b7502e2d9 100644 --- a/api-putobject-snowball.go +++ b/api-putobject-snowball.go @@ -133,7 +133,7 @@ func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts return f, st.Size(), nil } } - var flush = func() error { return nil } + flush := func() error { return nil } if !opts.Compress { if !opts.InMemory { // Insert buffer for writes. diff --git a/api-select.go b/api-select.go index 74c1df5f1..5d47d7ec5 100644 --- a/api-select.go +++ b/api-select.go @@ -519,7 +519,7 @@ func (s *SelectResults) start(pipeWriter *io.PipeWriter) { go func() { for { var prelude preludeInfo - var headers = make(http.Header) + headers := make(http.Header) var err error // Create CRC code @@ -624,7 +624,7 @@ func (p preludeInfo) PayloadLen() int64 { // the struct, func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { var err error - var pInfo = preludeInfo{} + pInfo := preludeInfo{} // reads total length of the message (first 4 bytes) pInfo.totalLen, err = extractUint32(prelude) @@ -752,7 +752,6 @@ func checkCRC(r io.Reader, expect uint32) error { if msgCRC != expect { return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) - } return nil } diff --git a/api.go b/api.go index fef0e8b2a..ee637bd07 100644 --- a/api.go +++ b/api.go @@ -537,7 +537,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ var retryable bool // Indicates if request can be retried. var bodySeeker io.Seeker // Extracted seeker from io.Reader. - var reqRetry = MaxRetry // Indicates how many times we can retry the request + reqRetry := MaxRetry // Indicates how many times we can retry the request if metadata.contentBody != nil { // Check if body is seekable then it is retryable. diff --git a/bucket-cache_test.go b/bucket-cache_test.go index 1f1a4421f..7c163e67d 100644 --- a/bucket-cache_test.go +++ b/bucket-cache_test.go @@ -126,7 +126,6 @@ func TestGetBucketLocationRequest(t *testing.T) { } return req, nil - } // Info for 'Client' creation. // Will be used as arguments for 'NewClient'. diff --git a/core.go b/core.go index fdba52744..c2a90239f 100644 --- a/core.go +++ b/core.go @@ -63,8 +63,8 @@ func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBu // CopyObjectPart - creates a part in a multipart upload by copying (a // part of) an existing object. func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { - + partID int, startOffset, length int64, metadata map[string]string, +) (p CompletePart, err error) { return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, partID, startOffset, length, metadata) } diff --git a/core_test.go b/core_test.go index cb512fdc9..ff0256293 100644 --- a/core_test.go +++ b/core_test.go @@ -20,14 +20,13 @@ package minio import ( "bytes" "context" + "math/rand" "net/http" "os" "strconv" "testing" "time" - "math/rand" - "github.com/minio/minio-go/v7/pkg/credentials" ) diff --git a/examples/minio/getbucketreplicationmetrics.go b/examples/minio/getbucketreplicationmetrics.go index 1f8b47ed6..db534b118 100644 --- a/examples/minio/getbucketreplicationmetrics.go +++ b/examples/minio/getbucketreplicationmetrics.go @@ -43,7 +43,6 @@ func main() { Creds: credentials.NewStaticV4("YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ""), Secure: true, }) - if err != nil { log.Fatalln(err) } diff --git a/examples/s3/getobjectacl.go b/examples/s3/getobjectacl.go index 77c4f0b24..03b2e5223 100644 --- a/examples/s3/getobjectacl.go +++ b/examples/s3/getobjectacl.go @@ -48,13 +48,13 @@ func main() { log.Fatalln(err) } - //print object owner informations + // print object owner informations fmt.Printf(`Object owner: Display name: %q ID: %q `, objectInfo.Owner.DisplayName, objectInfo.Owner.ID) - //print object grant informations + // print object grant informations for _, g := range objectInfo.Grant { fmt.Printf(`Object grant: - Display name: %q @@ -64,7 +64,7 @@ ID: %q `, g.Grantee.DisplayName, g.Grantee.ID, g.Grantee.URI, g.Permission) } - //print all value header (acl, metadata, standard header value...) + // print all value header (acl, metadata, standard header value...) for k, v := range objectInfo.Metadata { fmt.Println("key:", k) fmt.Printf(" - value: %v\n", v) diff --git a/examples/s3/putobject-progress.go b/examples/s3/putobject-progress.go index d3e6aa0d2..1a9583ea7 100644 --- a/examples/s3/putobject-progress.go +++ b/examples/s3/putobject-progress.go @@ -62,7 +62,6 @@ func main() { progress := pb.New64(objectInfo.Size) progress.Start() n, err := s3Client.PutObject(context.Background(), "my-bucketname", "my-objectname-progress", reader, objectInfo.Size, minio.PutObjectOptions{ContentType: "application/octet-stream", Progress: progress}) - if err != nil { log.Fatalln(err) } diff --git a/examples/s3/removebucket.go b/examples/s3/removebucket.go index de1bfa344..4f0041478 100644 --- a/examples/s3/removebucket.go +++ b/examples/s3/removebucket.go @@ -51,5 +51,4 @@ func main() { log.Fatalln(err) } log.Println("Success") - } diff --git a/functional_tests.go b/functional_tests.go index e43e612b1..59f347eff 100644 --- a/functional_tests.go +++ b/functional_tests.go @@ -61,6 +61,7 @@ const ( letterIdxMask = 1<