diff --git a/CHANGELOG.md b/CHANGELOG.md index 0716130f6..6cd4d69a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - Added `--show-fullpath` flag to `ls`. ([#596](https://github.com/peak/s5cmd/issues/596)) - Added `pipe` command. ([#182](https://github.com/peak/s5cmd/issues/182)) - Added `--show-progress` flag to `cp` to show a progress bar. ([#51](https://github.com/peak/s5cmd/issues/51)) +- Added `--metadata` flag to `cp` and `pipe` to set arbitrary metadata for the objects. ([#537](https://github.com/peak/s5cmd/issues/537)) - Added `--include` flag to `cp`, `rm` and `sync` commands. ([#516](https://github.com/peak/s5cmd/issues/516)) #### Improvements diff --git a/command/cp.go b/command/cp.go index 36f71a3fe..3e608daf4 100644 --- a/command/cp.go +++ b/command/cp.go @@ -29,6 +29,7 @@ const ( defaultCopyConcurrency = 5 defaultPartSize = 50 // MiB megabytes = 1024 * 1024 + kilobytes = 1024 ) var copyHelpTemplate = `Name: @@ -109,6 +110,9 @@ Examples: 23. Download the specific version of a remote object to working directory > s5cmd {{.HelpName}} --version-id VERSION_ID s3://bucket/prefix/object . + + 24. Pass arbitrary metadata to the object during upload or copy + > s5cmd {{.HelpName}} --metadata "camera=Nixon D750" --metadata "imageSize=6032x4032" flowers.png s3://bucket/prefix/flowers.png ` func NewSharedFlags() []cli.Flag { @@ -133,6 +137,10 @@ func NewSharedFlags() []cli.Flag { Value: defaultPartSize, Usage: "size of each part transferred between host and remote server, in MiB", }, + &MapFlag{ + Name: "metadata", + Usage: "set arbitrary metadata for the object, e.g. --metadata 'foo=bar' --metadata 'fizz=buzz'", + }, &cli.StringFlag{ Name: "sse", Usage: "perform server side encryption of the data at its destination, e.g. aws:kms", @@ -296,6 +304,7 @@ type Copy struct { contentType string contentEncoding string contentDisposition string + metadata map[string]string showProgress bool progressbar progressbar.ProgressBar @@ -338,6 +347,13 @@ func NewCopy(c *cli.Context, deleteSource bool) (*Copy, error) { commandProgressBar = &progressbar.NoOp{} } + metadata, ok := c.Value("metadata").(MapValue) + if !ok { + err := errors.New("metadata flag is not a map") + printError(fullCommand, c.Command.Name, err) + return nil, err + } + return &Copy{ src: src, dst: dst, @@ -365,6 +381,7 @@ func NewCopy(c *cli.Context, deleteSource bool) (*Copy, error) { contentType: c.String("content-type"), contentEncoding: c.String("content-encoding"), contentDisposition: c.String("content-disposition"), + metadata: metadata, showProgress: c.Bool("show-progress"), progressbar: commandProgressBar, @@ -497,11 +514,11 @@ func (c Copy) Run(ctx context.Context) error { switch { case srcurl.Type == c.dst.Type: // local->local or remote->remote - task = c.prepareCopyTask(ctx, srcurl, c.dst, isBatch) + task = c.prepareCopyTask(ctx, srcurl, c.dst, isBatch, c.metadata) case srcurl.IsRemote(): // remote->local task = c.prepareDownloadTask(ctx, srcurl, c.dst, isBatch) case c.dst.IsRemote(): // local->remote - task = c.prepareUploadTask(ctx, srcurl, c.dst, isBatch) + task = c.prepareUploadTask(ctx, srcurl, c.dst, isBatch, c.metadata) default: panic("unexpected src-dst pair") } @@ -518,10 +535,11 @@ func (c Copy) prepareCopyTask( srcurl *url.URL, dsturl *url.URL, isBatch bool, + metadata map[string]string, ) func() error { return func() error { dsturl = prepareRemoteDestination(srcurl, dsturl, c.flatten, isBatch) - err := c.doCopy(ctx, srcurl, dsturl) + err := c.doCopy(ctx, srcurl, dsturl, metadata) if err != nil { return &errorpkg.Error{ Op: c.op, @@ -565,10 +583,11 @@ func (c Copy) prepareUploadTask( srcurl *url.URL, dsturl *url.URL, isBatch bool, + metadata map[string]string, ) func() error { return func() error { dsturl = prepareRemoteDestination(srcurl, dsturl, c.flatten, isBatch) - err := c.doUpload(ctx, srcurl, dsturl) + err := c.doUpload(ctx, srcurl, dsturl, metadata) if err != nil { return &errorpkg.Error{ Op: c.op, @@ -644,7 +663,7 @@ func (c Copy) doDownload(ctx context.Context, srcurl *url.URL, dsturl *url.URL) return nil } -func (c Copy) doUpload(ctx context.Context, srcurl *url.URL, dsturl *url.URL) error { +func (c Copy) doUpload(ctx context.Context, srcurl *url.URL, dsturl *url.URL, extradata map[string]string) error { srcClient := storage.NewLocalClient(c.storageOpts) file, err := srcClient.Open(srcurl.Absolute()) @@ -670,29 +689,29 @@ func (c Copy) doUpload(ctx context.Context, srcurl *url.URL, dsturl *url.URL) er if err != nil { return err } + metadata := storage.Metadata{UserDefined: extradata} - metadata := storage.NewMetadata(). - SetStorageClass(string(c.storageClass)). - SetSSE(c.encryptionMethod). - SetSSEKeyID(c.encryptionKeyID). - SetACL(c.acl). - SetCacheControl(c.cacheControl). - SetExpires(c.expires) + if c.storageClass != "" { + metadata.StorageClass = string(c.storageClass) + } if c.contentType != "" { - metadata.SetContentType(c.contentType) + metadata.ContentType = c.contentType } else { - metadata.SetContentType(guessContentType(file)) + metadata.ContentType = guessContentType(file) } if c.contentEncoding != "" { - metadata.SetContentEncoding(c.contentEncoding) + metadata.ContentEncoding = c.contentEncoding } + if c.contentDisposition != "" { - metadata.SetContentDisposition(c.contentDisposition) + metadata.ContentDisposition = c.contentDisposition } + reader := newCountingReaderWriter(file, c.progressbar) err = dstClient.Put(ctx, reader, dsturl, metadata, c.concurrency, c.partSize) + if err != nil { return err } @@ -726,7 +745,7 @@ func (c Copy) doUpload(ctx context.Context, srcurl *url.URL, dsturl *url.URL) er return nil } -func (c Copy) doCopy(ctx context.Context, srcurl, dsturl *url.URL) error { +func (c Copy) doCopy(ctx context.Context, srcurl, dsturl *url.URL, extradata map[string]string) error { // override destination region if set if c.dstRegion != "" { c.storageOpts.SetRegion(c.dstRegion) @@ -736,22 +755,20 @@ func (c Copy) doCopy(ctx context.Context, srcurl, dsturl *url.URL) error { return err } - metadata := storage.NewMetadata(). - SetStorageClass(string(c.storageClass)). - SetSSE(c.encryptionMethod). - SetSSEKeyID(c.encryptionKeyID). - SetACL(c.acl). - SetCacheControl(c.cacheControl). - SetExpires(c.expires) + metadata := storage.Metadata{UserDefined: extradata} + if c.storageClass != "" { + metadata.StorageClass = string(c.storageClass) + } if c.contentType != "" { - metadata.SetContentType(c.contentType) + metadata.ContentType = c.contentType } + if c.contentEncoding != "" { - metadata.SetContentEncoding(c.contentEncoding) + metadata.ContentEncoding = c.contentEncoding } if c.contentDisposition != "" { - metadata.SetContentDisposition(c.contentDisposition) + metadata.ContentDisposition = c.contentDisposition } err = c.shouldOverride(ctx, srcurl, dsturl) diff --git a/command/flag.go b/command/flag.go index f29cdb08b..256b546f7 100644 --- a/command/flag.go +++ b/command/flag.go @@ -1,8 +1,11 @@ package command import ( + "flag" "fmt" "strings" + + "github.com/urfave/cli/v2" ) type EnumValue struct { @@ -41,3 +44,127 @@ func (e EnumValue) String() string { func (e EnumValue) Get() interface{} { return e } + +type MapValue map[string]string + +func (m MapValue) String() string { + if m == nil { + m = make(map[string]string) + } + + var s strings.Builder + for key, value := range m { + s.WriteString(fmt.Sprintf("%s=%s ", key, value)) + } + + return s.String() +} + +func (m MapValue) Set(s string) error { + if m == nil { + m = make(map[string]string) + } + + if len(s) == 0 { + return fmt.Errorf("flag can't be passed empty. Format: key=value") + } + + tokens := strings.Split(s, "=") + if len(tokens) <= 1 { + return fmt.Errorf("the key value pair(%s) has invalid format", tokens) + } + + key := tokens[0] + value := strings.Join(tokens[1:], "=") + + _, ok := m[key] + if ok { + return fmt.Errorf("key %q is already defined", key) + } + + m[key] = value + return nil +} + +func (m MapValue) Get() interface{} { + if m == nil { + m = make(map[string]string) + } + return m +} + +type MapFlag struct { + Name string + + Category string + DefaultText string + FilePath string + Usage string + + HasBeenSet bool + Required bool + Hidden bool + + Value MapValue +} + +var ( + _ cli.Flag = (*MapFlag)(nil) + _ cli.RequiredFlag = (*MapFlag)(nil) + _ cli.VisibleFlag = (*MapFlag)(nil) + _ cli.DocGenerationFlag = (*MapFlag)(nil) +) + +func (f *MapFlag) Apply(set *flag.FlagSet) error { + if f.Value == nil { + f.Value = make(map[string]string) + } + for _, name := range f.Names() { + set.Var(f.Value, name, f.Usage) + if len(f.Value) > 0 { + f.HasBeenSet = true + } + } + + return nil +} + +func (f *MapFlag) GetUsage() string { + return f.Usage +} + +func (f *MapFlag) Names() []string { + return []string{f.Name} +} + +func (f *MapFlag) IsSet() bool { + return f.HasBeenSet +} + +func (f *MapFlag) IsVisible() bool { + return true +} + +func (f *MapFlag) String() string { + return cli.FlagStringer(f) +} + +func (f *MapFlag) TakesValue() bool { + return true +} + +func (f *MapFlag) GetValue() string { + return f.Value.String() +} + +func (f *MapFlag) GetDefaultText() string { + return "" +} + +func (f *MapFlag) GetEnvVars() []string { + return []string{} +} + +func (f *MapFlag) IsRequired() bool { + return f.Required +} diff --git a/command/pipe.go b/command/pipe.go index 77370a18e..dfbaeb01e 100644 --- a/command/pipe.go +++ b/command/pipe.go @@ -2,6 +2,7 @@ package command import ( "context" + "errors" "fmt" "mime" "os" @@ -28,9 +29,11 @@ Options: Examples: 01. Stream stdin to an object > echo "content" | gzip | s5cmd {{.HelpName}} s3://bucket/prefix/object.gz - 02. Download an object and stream it to a bucket + 02. Pass arbitrary metadata to an object + > cat "flowers.png" | gzip | s5cmd {{.HelpName}} --metadata "imageSize=6032x4032" s3://bucket/prefix/flowers.gz + 03. Download an object and stream it to a bucket > curl https://github.com/peak/s5cmd/ | s5cmd {{.HelpName}} s3://bucket/s5cmd.html - 03. Compress an object and stream it to a bucket + 04. Compress an object and stream it to a bucket > tar -cf - file.bin | s5cmd {{.HelpName}} s3://bucket/file.bin.tar ` @@ -52,6 +55,10 @@ func NewPipeCommandFlags() []cli.Flag { Value: defaultPartSize, Usage: "size of each part transferred between host and remote server, in MiB", }, + &MapFlag{ + Name: "metadata", + Usage: "set arbitrary metadata for the object", + }, &cli.StringFlag{ Name: "sse", Usage: "perform server side encryption of the data at its destination, e.g. aws:kms", @@ -145,6 +152,7 @@ type Pipe struct { contentType string contentEncoding string contentDisposition string + metadata map[string]string // s3 options concurrency int @@ -162,6 +170,13 @@ func NewPipe(c *cli.Context, deleteSource bool) (*Pipe, error) { return nil, err } + metadata, ok := c.Value("metadata").(MapValue) + if !ok { + err := errors.New("metadata flag is not a map") + printError(fullCommand, c.Command.Name, err) + return nil, err + } + return &Pipe{ dst: dst, op: c.Command.Name, @@ -180,7 +195,7 @@ func NewPipe(c *cli.Context, deleteSource bool) (*Pipe, error) { contentType: c.String("content-type"), contentEncoding: c.String("content-encoding"), contentDisposition: c.String("content-disposition"), - + metadata: metadata, // s3 options storageOpts: NewStorageOpts(c), }, nil @@ -206,26 +221,22 @@ func (c Pipe) Run(ctx context.Context) error { return err } - metadata := storage.NewMetadata(). - SetStorageClass(string(c.storageClass)). - SetSSE(c.encryptionMethod). - SetSSEKeyID(c.encryptionKeyID). - SetACL(c.acl). - SetCacheControl(c.cacheControl). - SetExpires(c.expires) + metadata := storage.Metadata{UserDefined: c.metadata} + if c.storageClass != "" { + metadata.StorageClass = string(c.storageClass) + } if c.contentType != "" { - metadata.SetContentType(c.contentType) + metadata.ContentType = c.contentType } else { - metadata.SetContentType(guessContentTypeByExtension(c.dst)) + metadata.ContentType = guessContentTypeByExtension(c.dst) } if c.contentEncoding != "" { - metadata.SetContentEncoding(c.contentEncoding) + metadata.ContentEncoding = c.contentEncoding } - if c.contentDisposition != "" { - metadata.SetContentDisposition(c.contentDisposition) + metadata.ContentDisposition = c.contentDisposition } err = client.Put(ctx, &stdin{file: os.Stdin}, c.dst, metadata, c.concurrency, c.partSize) diff --git a/e2e/cp_test.go b/e2e/cp_test.go index 92773ede3..ed5a6f491 100644 --- a/e2e/cp_test.go +++ b/e2e/cp_test.go @@ -33,6 +33,7 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go/aws" "gotest.tools/v3/assert" "gotest.tools/v3/fs" "gotest.tools/v3/icmd" @@ -719,6 +720,98 @@ func TestCopySingleFileToS3(t *testing.T) { assert.Assert(t, ensureS3Object(s3client, bucket, filename, content, ensureContentType(expectedContentType), ensureContentDisposition(expectedContentDisposition))) } +// cp dir/file s3://bucket/ --metadata key1=val1 --metadata key2=val2 ... +func TestCopySingleFileToS3WithArbitraryMetadata(t *testing.T) { + t.Parallel() + + s3client, s5cmd := setup(t) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + const ( + // make sure that Put reads the file header and guess Content-Type correctly. + filename = "index" + content = ` + + + + +
+
+
+
+ +
+ + +` + foo = "Key1=foo" + bar = "Key2=bar" + ) + + // build assert map + metadata := map[string]*string{ + "Key1": aws.String("foo"), + "Key2": aws.String("bar"), + } + workdir := fs.NewDir(t, bucket, fs.WithFile(filename, content)) + defer workdir.Remove() + + srcpath := workdir.Join(filename) + dstpath := fmt.Sprintf("s3://%v/", bucket) + + srcpath = filepath.ToSlash(srcpath) + cmd := s5cmd("cp", "--metadata", foo, "--metadata", bar, srcpath, dstpath) + result := icmd.RunCmd(cmd) + result.Assert(t, icmd.Success) + + // assert local filesystem + expected := fs.Expected(t, fs.WithFile(filename, content)) + assert.Assert(t, fs.Equal(workdir.Path(), expected)) + + // assert S3 + assert.Assert(t, ensureS3Object(s3client, bucket, filename, content, ensureArbitraryMetadata(metadata))) +} + +// cp s3://bucket2/obj2 s3://bucket1/obj1 --metadata key1=val1 --metadata key2=val2 ... +func TestCopyS3ToS3WithArbitraryMetadata(t *testing.T) { + t.Parallel() + + s3client, s5cmd := setup(t) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + const ( + filename = "index" + content = "things" + foo = "Key1=foo" + bar = "Key2=bar" + ) + + // build assert map + srcmetadata := map[string]*string{ + "Key1": aws.String("value1"), + "Key2": aws.String("value2"), + } + + dstmetadata := map[string]*string{ + "Key1": aws.String("foo"), + "Key2": aws.String("bar"), + } + srcpath := fmt.Sprintf("s3://%v/%v", bucket, filename) + dstpath := fmt.Sprintf("s3://%v/%v_cp", bucket, filename) + + putFileWithMetadata(t, s3client, bucket, filename, content, srcmetadata) + cmd := s5cmd("cp", "--metadata", foo, "--metadata", bar, srcpath, dstpath) + result := icmd.RunCmd(cmd) + result.Assert(t, icmd.Success) + + // assert S3 + assert.Assert(t, ensureS3Object(s3client, bucket, fmt.Sprintf("%s_cp", filename), content, ensureArbitraryMetadata(dstmetadata))) +} + func TestCopySingleFileToS3WithAdjacentSlashes(t *testing.T) { t.Parallel() diff --git a/e2e/pipe_test.go b/e2e/pipe_test.go index 04dd1596a..62527634c 100644 --- a/e2e/pipe_test.go +++ b/e2e/pipe_test.go @@ -6,6 +6,7 @@ import ( "runtime" "testing" + "github.com/aws/aws-sdk-go/aws" "gotest.tools/v3/assert" "gotest.tools/v3/icmd" ) @@ -370,6 +371,59 @@ func TestUploadStdinToS3JSON(t *testing.T) { assert.Assert(t, ensureS3Object(s3client, bucket, filename, content)) } +// cp dir/file s3://bucket/ --metadata key1=val1 --metadata key2=val2 ... +func TestPipeToS3WithArbitraryMetadata(t *testing.T) { + t.Parallel() + + s3client, s5cmd := setup(t) + + bucket := s3BucketFromTestName(t) + createBucket(t, s3client, bucket) + + const ( + // make sure that Put reads the file header and guess Content-Type correctly. + filename = "index" + content = ` + + + + +
+
+
+
+ +
+ + +` + foo = "Key1=foo" + bar = "Key2=bar" + ) + + // build assert map + metadata := map[string]*string{ + "Key1": aws.String("foo"), + "Key2": aws.String("bar"), + } + + reader := bytes.NewBufferString(content) + + dstpath := fmt.Sprintf("s3://%v/%v", bucket, filename) + + cmd := s5cmd("pipe", "--metadata", foo, "--metadata", bar, dstpath) + result := icmd.RunCmd(cmd, icmd.WithStdin(reader)) + result.Assert(t, icmd.Success) + + // assert local filesystem + assertLines(t, result.Stdout(), map[int]compareFunc{ + 0: suffix(`pipe %v`, dstpath), + }) + + // assert S3 + assert.Assert(t, ensureS3Object(s3client, bucket, filename, content, ensureArbitraryMetadata(metadata))) +} + // pipe --storage-class=GLACIER s3://bucket/object func TestUploadStdinToS3WithStorageClassGlacier(t *testing.T) { t.Parallel() diff --git a/e2e/util_test.go b/e2e/util_test.go index ba609f0ab..ac096827d 100644 --- a/e2e/util_test.go +++ b/e2e/util_test.go @@ -513,6 +513,7 @@ var errS3NoSuchKey = fmt.Errorf("s3: no such key") type ensureOpts struct { contentType *string contentDisposition *string + metadata map[string]*string storageClass *string } @@ -536,6 +537,12 @@ func ensureStorageClass(expected string) ensureOption { } } +func ensureArbitraryMetadata(metadata map[string]*string) ensureOption { + return func(opts *ensureOpts) { + opts.metadata = metadata + } +} + func ensureS3Object( client *s3.S3, bucket string, @@ -593,6 +600,16 @@ func ensureS3Object( } } + if opts.metadata != nil { + for mkey := range opts.metadata { + if opts.metadata[mkey] == nil || output.Metadata[mkey] == nil { + return fmt.Errorf("check the assertion keys of %v/%v key:%v\n", bucket, key, mkey) + } + if diff := cmp.Diff(*opts.metadata[mkey], *output.Metadata[mkey]); diff != "" { + return fmt.Errorf("arbitrary metadata of %v/%v: (-want +got):\n%v", bucket, key, diff) + } + } + } return nil } @@ -609,6 +626,20 @@ func putFile(t *testing.T, client *s3.S3, bucket string, filename string, conten } } +func putFileWithMetadata(t *testing.T, client *s3.S3, bucket string, filename string, content string, metadata map[string]*string) { + t.Helper() + + _, err := client.PutObject(&s3.PutObjectInput{ + Body: strings.NewReader(content), + Bucket: aws.String(bucket), + Key: aws.String(filename), + Metadata: metadata, + }) + if err != nil { + t.Fatal(err) + } +} + func replaceMatchWithSpace(input string, match ...string) string { for _, m := range match { if m == "" { diff --git a/storage/s3.go b/storage/s3.go index 91eb77b89..890d5ed69 100644 --- a/storage/s3.go +++ b/storage/s3.go @@ -502,31 +502,22 @@ func (s *S3) Copy(ctx context.Context, from, to *url.URL, metadata Metadata) err input.CopySource = aws.String(copySource + "?versionId=" + from.VersionID) } - storageClass := metadata.StorageClass() + storageClass := metadata.StorageClass if storageClass != "" { input.StorageClass = aws.String(storageClass) } - sseEncryption := metadata.SSE() - if sseEncryption != "" { - input.ServerSideEncryption = aws.String(sseEncryption) - sseKmsKeyID := metadata.SSEKeyID() - if sseKmsKeyID != "" { - input.SSEKMSKeyId = aws.String(sseKmsKeyID) - } - } - - acl := metadata.ACL() + acl := metadata.ACL if acl != "" { input.ACL = aws.String(acl) } - cacheControl := metadata.CacheControl() + cacheControl := metadata.CacheControl if cacheControl != "" { input.CacheControl = aws.String(cacheControl) } - expires := metadata.Expires() + expires := metadata.Expires if expires != "" { t, err := time.Parse(time.RFC3339, expires) if err != nil { @@ -535,6 +526,38 @@ func (s *S3) Copy(ctx context.Context, from, to *url.URL, metadata Metadata) err input.Expires = aws.Time(t) } + sseEncryption := metadata.EncryptionMethod + if sseEncryption != "" { + input.ServerSideEncryption = aws.String(sseEncryption) + sseKmsKeyID := metadata.EncryptionKeyID + if sseKmsKeyID != "" { + input.SSEKMSKeyId = aws.String(sseKmsKeyID) + } + } + + contentEncoding := metadata.ContentEncoding + if contentEncoding != "" { + input.ContentEncoding = aws.String(contentEncoding) + } + + contentDisposition := metadata.ContentDisposition + if contentDisposition != "" { + input.ContentDisposition = aws.String(contentDisposition) + } + + // add retry ID to the object metadata + if s.noSuchUploadRetryCount > 0 { + input.Metadata[metadataKeyRetryID] = generateRetryID() + } + + if len(metadata.UserDefined) != 0 { + m := make(map[string]*string) + for k, v := range metadata.UserDefined { + m[k] = aws.String(v) + } + input.Metadata = m + } + _, err := s.api.CopyObject(input) return err } @@ -687,7 +710,7 @@ func (s *S3) Put( return nil } - contentType := metadata.ContentType() + contentType := metadata.ContentType if contentType == "" { contentType = "application/octet-stream" } @@ -701,21 +724,21 @@ func (s *S3) Put( RequestPayer: s.RequestPayer(), } - storageClass := metadata.StorageClass() + storageClass := metadata.StorageClass if storageClass != "" { input.StorageClass = aws.String(storageClass) } - acl := metadata.ACL() + acl := metadata.ACL if acl != "" { input.ACL = aws.String(acl) } - cacheControl := metadata.CacheControl() + cacheControl := metadata.CacheControl if cacheControl != "" { input.CacheControl = aws.String(cacheControl) } - expires := metadata.Expires() + expires := metadata.Expires if expires != "" { t, err := time.Parse(time.RFC3339, expires) if err != nil { @@ -724,21 +747,21 @@ func (s *S3) Put( input.Expires = aws.Time(t) } - sseEncryption := metadata.SSE() + sseEncryption := metadata.EncryptionMethod if sseEncryption != "" { input.ServerSideEncryption = aws.String(sseEncryption) - sseKmsKeyID := metadata.SSEKeyID() + sseKmsKeyID := metadata.EncryptionKeyID if sseKmsKeyID != "" { input.SSEKMSKeyId = aws.String(sseKmsKeyID) } } - contentEncoding := metadata.ContentEncoding() + contentEncoding := metadata.ContentEncoding if contentEncoding != "" { input.ContentEncoding = aws.String(contentEncoding) } - contentDisposition := metadata.ContentDisposition() + contentDisposition := metadata.ContentDisposition if contentDisposition != "" { input.ContentDisposition = aws.String(contentDisposition) } @@ -748,6 +771,14 @@ func (s *S3) Put( input.Metadata[metadataKeyRetryID] = generateRetryID() } + if len(metadata.UserDefined) != 0 { + m := make(map[string]*string) + for k, v := range metadata.UserDefined { + m[k] = aws.String(v) + } + input.Metadata = m + } + uploaderOptsFn := func(u *s3manager.Uploader) { u.PartSize = partSize u.Concurrency = concurrency diff --git a/storage/s3_test.go b/storage/s3_test.go index 73e72813b..aee355b1c 100644 --- a/storage/s3_test.go +++ b/storage/s3_test.go @@ -638,7 +638,7 @@ func TestS3RetryOnNoSuchUpload(t *testing.T) { atomic.AddInt32(atomicCounter, 1) }) - mockS3.Put(ctx, strings.NewReader(""), url, NewMetadata(), s3manager.DefaultUploadConcurrency, s3manager.DefaultUploadPartSize) + mockS3.Put(ctx, strings.NewReader(""), url, Metadata{}, s3manager.DefaultUploadConcurrency, s3manager.DefaultUploadPartSize) // +1 is for the original request // *2 is to account for the "Stat" requests that are made to obtain @@ -745,7 +745,10 @@ func TestS3CopyEncryptionRequest(t *testing.T) { api: mockAPI, } - metadata := NewMetadata().SetSSE(tc.sse).SetSSEKeyID(tc.sseKeyID).SetACL(tc.acl) + metadata := Metadata{} + metadata.EncryptionMethod = tc.sse + metadata.EncryptionKeyID = tc.sseKeyID + metadata.ACL = tc.acl err = mockS3.Copy(context.Background(), u, u, metadata) @@ -838,7 +841,10 @@ func TestS3PutEncryptionRequest(t *testing.T) { uploader: s3manager.NewUploaderWithClient(mockAPI), } - metadata := NewMetadata().SetSSE(tc.sse).SetSSEKeyID(tc.sseKeyID).SetACL(tc.acl) + metadata := Metadata{} + metadata.EncryptionMethod = tc.sse + metadata.EncryptionKeyID = tc.sseKeyID + metadata.ACL = tc.acl err = mockS3.Put(context.Background(), bytes.NewReader([]byte("")), u, metadata, 1, 5242880) diff --git a/storage/storage.go b/storage/storage.go index e20332636..8de7d7c33 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -219,92 +219,18 @@ func (s StorageClass) IsGlacier() bool { return s == "GLACIER" } -type Metadata map[string]string - -// NewMetadata will return an empty metadata object. -func NewMetadata() Metadata { - return Metadata{} -} - -func (m Metadata) ACL() string { - return m["ACL"] -} - -func (m Metadata) SetACL(acl string) Metadata { - m["ACL"] = acl - return m -} - -func (m Metadata) CacheControl() string { - return m["CacheControl"] -} - -func (m Metadata) SetCacheControl(cacheControl string) Metadata { - m["CacheControl"] = cacheControl - return m -} - -func (m Metadata) Expires() string { - return m["Expires"] -} - -func (m Metadata) SetExpires(expires string) Metadata { - m["Expires"] = expires - return m -} - -func (m Metadata) StorageClass() string { - return m["StorageClass"] -} - -func (m Metadata) SetStorageClass(class string) Metadata { - m["StorageClass"] = class - return m -} - -func (m Metadata) ContentType() string { - return m["ContentType"] -} - -func (m Metadata) SetContentType(contentType string) Metadata { - m["ContentType"] = contentType - return m -} - -func (m Metadata) SetContentDisposition(contentDisposition string) Metadata { - m["ContentDisposition"] = contentDisposition - return m -} - -func (m Metadata) ContentDisposition() string { - return m["ContentDisposition"] -} - -func (m Metadata) SSE() string { - return m["EncryptionMethod"] -} - -func (m Metadata) SetSSE(sse string) Metadata { - m["EncryptionMethod"] = sse - return m -} - -func (m Metadata) SSEKeyID() string { - return m["EncryptionKeyID"] -} - -func (m Metadata) SetSSEKeyID(kid string) Metadata { - m["EncryptionKeyID"] = kid - return m -} - -func (m Metadata) ContentEncoding() string { - return m["ContentEncoding"] -} - -func (m Metadata) SetContentEncoding(contentEncoding string) Metadata { - m["ContentEncoding"] = contentEncoding - return m +type Metadata struct { + ACL string + CacheControl string + Expires string + StorageClass string + ContentType string + ContentDisposition string + EncryptionMethod string + EncryptionKeyID string + ContentEncoding string + + UserDefined map[string]string } func (o Object) ToBytes() []byte {