-
Notifications
You must be signed in to change notification settings - Fork 490
/
Copy paths3store.go
1255 lines (1074 loc) · 41.6 KB
/
s3store.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Package s3store provides a storage backend using AWS S3 or compatible servers.
//
// # Configuration
//
// In order to allow this backend to function properly, the user accessing the
// bucket must have at least following AWS IAM policy permissions for the
// bucket and all of its subresources:
//
// s3:AbortMultipartUpload
// s3:DeleteObject
// s3:GetObject
// s3:ListMultipartUploadParts
// s3:PutObject
//
// While this package uses the official AWS SDK for Go, S3Store is able
// to work with any S3-compatible service such as MinIO. In order to change
// the HTTP endpoint used for sending requests to, adjust the `BaseEndpoint`
// option in the AWS SDK For Go V2 (https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3#Options).
//
// # Implementation
//
// Once a new tus upload is initiated, multiple objects in S3 are created:
//
// First of all, a new info object is stored which contains a JSON-encoded blob
// of general information about the upload including its size and meta data.
// This kind of objects have the suffix ".info" in their key.
//
// In addition a new multipart upload
// (http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) is
// created. Whenever a new chunk is uploaded to tusd using a PATCH request, a
// new part is pushed to the multipart upload on S3.
//
// If meta data is associated with the upload during creation, it will be added
// to the multipart upload and after finishing it, the meta data will be passed
// to the final object. However, the metadata which will be attached to the
// final object can only contain ASCII characters and every non-ASCII character
// will be replaced by a question mark (for example, "Menü" will be "Men?").
// However, this does not apply for the metadata returned by the GetInfo
// function since it relies on the info object for reading the metadata.
// Therefore, HEAD responses will always contain the unchanged metadata, Base64-
// encoded, even if it contains non-ASCII characters.
//
// Once the upload is finished, the multipart upload is completed, resulting in
// the entire file being stored in the bucket. The info object, containing
// meta data is not deleted. It is recommended to copy the finished upload to
// another bucket to avoid it being deleted by the Termination extension.
//
// If an upload is about to being terminated, the multipart upload is aborted
// which removes all of the uploaded parts from the bucket. In addition, the
// info object is also deleted. If the upload has been finished already, the
// finished object containing the entire upload is also removed.
//
// # Considerations
//
// In order to support tus' principle of resumable upload, S3's Multipart-Uploads
// are internally used.
//
// When receiving a PATCH request, its body will be temporarily stored on disk.
// This requirement has been made to ensure the minimum size of a single part
// and to allow the AWS SDK to calculate a checksum. Once the part has been uploaded
// to S3, the temporary file will be removed immediately. Therefore, please
// ensure that the server running this storage backend has enough disk space
// available to hold these caches.
//
// In addition, it must be mentioned that AWS S3 only offers eventual
// consistency (https://docs.aws.amazon.com/AmazonS3/latest/dev/Introduction.html#ConsistencyModel).
// Therefore, it is required to build additional measurements in order to
// prevent concurrent access to the same upload resources which may result in
// data corruption. See handler.LockerDataStore for more information.
package s3store
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"regexp"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/tus/tusd/v2/internal/semaphore"
"github.com/tus/tusd/v2/internal/uid"
"github.com/tus/tusd/v2/pkg/handler"
"golang.org/x/exp/slices"
"golang.org/x/sync/errgroup"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go"
)
// This regular expression matches every character which is not
// considered valid into a header value according to RFC2616.
var nonPrintableRegexp = regexp.MustCompile(`[^\x09\x20-\x7E]`)
// See the handler.DataStore interface for documentation about the different
// methods.
type S3Store struct {
// Bucket used to store the data in, e.g. "tusdstore.example.com"
Bucket string
// ObjectPrefix is prepended to the name of each S3 object that is created
// to store uploaded files. It can be used to create a pseudo-directory
// structure in the bucket, e.g. "path/to/my/uploads".
ObjectPrefix string
// MetadataObjectPrefix is prepended to the name of each .info and .part S3
// object that is created. If it is not set, then ObjectPrefix is used.
MetadataObjectPrefix string
// Service specifies an interface used to communicate with the S3 backend.
// Usually, this is an instance of github.com/aws/aws-sdk-go-v2/service/s3.Client
// (https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3#Client).
Service S3API
// MaxPartSize specifies the maximum size of a single part uploaded to S3
// in bytes. This value must be bigger than MinPartSize! In order to
// choose the correct number, two things have to be kept in mind:
//
// If this value is too big and uploading the part to S3 is interrupted
// expectedly, the entire part is discarded and the end user is required
// to resume the upload and re-upload the entire big part. In addition, the
// entire part must be written to disk before submitting to S3.
//
// If this value is too low, a lot of requests to S3 may be made, depending
// on how fast data is coming in. This may result in an eventual overhead.
MaxPartSize int64
// MinPartSize specifies the minimum size of a single part uploaded to S3
// in bytes. This number needs to match with the underlying S3 backend or else
// uploaded parts will be reject. AWS S3, for example, uses 5MB for this value.
MinPartSize int64
// PreferredPartSize specifies the preferred size of a single part uploaded to
// S3. S3Store will attempt to slice the incoming data into parts with this
// size whenever possible. In some cases, smaller parts are necessary, so
// not every part may reach this value. The PreferredPartSize must be inside the
// range of MinPartSize to MaxPartSize.
PreferredPartSize int64
// MaxMultipartParts is the maximum number of parts an S3 multipart upload is
// allowed to have according to AWS S3 API specifications.
// See: http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
MaxMultipartParts int64
// MaxObjectSize is the maximum size an S3 Object can have according to S3
// API specifications. See link above.
MaxObjectSize int64
// MaxBufferedParts is the number of additional parts that can be received from
// the client and stored on disk while a part is being uploaded to S3. This
// can help improve throughput by not blocking the client while tusd is
// communicating with the S3 API, which can have unpredictable latency.
MaxBufferedParts int64
// TemporaryDirectory is the path where S3Store will create temporary files
// on disk during the upload. An empty string ("", the default value) will
// cause S3Store to use the operating system's default temporary directory.
TemporaryDirectory string
// DisableContentHashes instructs the S3Store to not calculate the MD5 and SHA256
// hashes when uploading data to S3. These hashes are used for file integrity checks
// and for authentication. However, these hashes also consume a significant amount of
// CPU, so it might be desirable to disable them.
// Note that this property is experimental and might be removed in the future!
DisableContentHashes bool
// uploadSemaphore limits the number of concurrent multipart part uploads to S3.
uploadSemaphore semaphore.Semaphore
// requestDurationMetric holds the prometheus instance for storing the request durations.
requestDurationMetric *prometheus.SummaryVec
// diskWriteDurationMetric holds the prometheus instance for storing the time it takes to write chunks to disk.
diskWriteDurationMetric prometheus.Summary
// uploadSemaphoreDemandMetric holds the prometheus instance for storing the demand on the upload semaphore
uploadSemaphoreDemandMetric prometheus.Gauge
// uploadSemaphoreLimitMetric holds the prometheus instance for storing the limit on the upload semaphore
uploadSemaphoreLimitMetric prometheus.Gauge
}
// The labels to use for observing and storing request duration. One label per operation.
const (
metricGetInfoObject = "get_info_object"
metricPutInfoObject = "put_info_object"
metricCreateMultipartUpload = "create_multipart_upload"
metricCompleteMultipartUpload = "complete_multipart_upload"
metricUploadPart = "upload_part"
metricListParts = "list_parts"
metricHeadPartObject = "head_part_object"
metricGetPartObject = "get_part_object"
metricPutPartObject = "put_part_object"
metricDeletePartObject = "delete_part_object"
)
type S3API interface {
PutObject(ctx context.Context, input *s3.PutObjectInput, opt ...func(*s3.Options)) (*s3.PutObjectOutput, error)
ListParts(ctx context.Context, input *s3.ListPartsInput, opt ...func(*s3.Options)) (*s3.ListPartsOutput, error)
UploadPart(ctx context.Context, input *s3.UploadPartInput, opt ...func(*s3.Options)) (*s3.UploadPartOutput, error)
GetObject(ctx context.Context, input *s3.GetObjectInput, opt ...func(*s3.Options)) (*s3.GetObjectOutput, error)
HeadObject(ctx context.Context, input *s3.HeadObjectInput, opt ...func(*s3.Options)) (*s3.HeadObjectOutput, error)
CreateMultipartUpload(ctx context.Context, input *s3.CreateMultipartUploadInput, opt ...func(*s3.Options)) (*s3.CreateMultipartUploadOutput, error)
AbortMultipartUpload(ctx context.Context, input *s3.AbortMultipartUploadInput, opt ...func(*s3.Options)) (*s3.AbortMultipartUploadOutput, error)
DeleteObject(ctx context.Context, input *s3.DeleteObjectInput, opt ...func(*s3.Options)) (*s3.DeleteObjectOutput, error)
DeleteObjects(ctx context.Context, input *s3.DeleteObjectsInput, opt ...func(*s3.Options)) (*s3.DeleteObjectsOutput, error)
CompleteMultipartUpload(ctx context.Context, input *s3.CompleteMultipartUploadInput, opt ...func(*s3.Options)) (*s3.CompleteMultipartUploadOutput, error)
UploadPartCopy(ctx context.Context, input *s3.UploadPartCopyInput, opt ...func(*s3.Options)) (*s3.UploadPartCopyOutput, error)
}
// New constructs a new storage using the supplied bucket and service object.
func New(bucket string, service S3API) S3Store {
requestDurationMetric := prometheus.NewSummaryVec(prometheus.SummaryOpts{
Name: "tusd_s3_request_duration_ms",
Help: "Duration of requests sent to S3 in milliseconds per operation",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
}, []string{"operation"})
diskWriteDurationMetric := prometheus.NewSummary(prometheus.SummaryOpts{
Name: "tusd_s3_disk_write_duration_ms",
Help: "Duration of chunk writes to disk in milliseconds",
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
})
uploadSemaphoreDemandMetric := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "tusd_s3_upload_semaphore_demand",
Help: "Number of goroutines wanting to acquire the upload lock or having it acquired",
})
uploadSemaphoreLimitMetric := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "tusd_s3_upload_semaphore_limit",
Help: "Limit of concurrent acquisitions of upload semaphore",
})
store := S3Store{
Bucket: bucket,
Service: service,
MaxPartSize: 5 * 1024 * 1024 * 1024,
MinPartSize: 5 * 1024 * 1024,
PreferredPartSize: 50 * 1024 * 1024,
MaxMultipartParts: 10000,
MaxObjectSize: 5 * 1024 * 1024 * 1024 * 1024,
MaxBufferedParts: 20,
TemporaryDirectory: "",
requestDurationMetric: requestDurationMetric,
diskWriteDurationMetric: diskWriteDurationMetric,
uploadSemaphoreDemandMetric: uploadSemaphoreDemandMetric,
uploadSemaphoreLimitMetric: uploadSemaphoreLimitMetric,
}
store.SetConcurrentPartUploads(10)
return store
}
// SetConcurrentPartUploads changes the limit on how many concurrent part uploads to S3 are allowed.
func (store *S3Store) SetConcurrentPartUploads(limit int) {
store.uploadSemaphore = semaphore.New(limit)
store.uploadSemaphoreLimitMetric.Set(float64(limit))
}
// UseIn sets this store as the core data store in the passed composer and adds
// all possible extension to it.
func (store S3Store) UseIn(composer *handler.StoreComposer) {
composer.UseCore(store)
composer.UseTerminater(store)
composer.UseConcater(store)
composer.UseLengthDeferrer(store)
}
func (store S3Store) RegisterMetrics(registry prometheus.Registerer) {
registry.MustRegister(store.requestDurationMetric)
registry.MustRegister(store.diskWriteDurationMetric)
registry.MustRegister(store.uploadSemaphoreDemandMetric)
registry.MustRegister(store.uploadSemaphoreLimitMetric)
}
func (store S3Store) observeRequestDuration(start time.Time, label string) {
elapsed := time.Since(start)
ms := float64(elapsed.Nanoseconds() / int64(time.Millisecond))
store.requestDurationMetric.WithLabelValues(label).Observe(ms)
}
type s3Upload struct {
// objectId is the object key under which we save the final file
objectId string
// multipartId is the ID given by S3 to us for the multipart upload
multipartId string
store *S3Store
// info stores the upload's current FileInfo struct. It may be nil if it hasn't
// been fetched yet from S3. Never read or write to it directly but instead use
// the GetInfo and writeInfo functions.
info *handler.FileInfo
// parts collects all parts for this upload. It will be nil if info is nil as well.
parts []*s3Part
// incompletePartSize is the size of an incomplete part object, if one exists. It will be 0 if info is nil as well.
incompletePartSize int64
}
// s3Part represents a single part of a S3 multipart upload.
type s3Part struct {
number int32
size int64
etag string
}
func (store S3Store) NewUpload(ctx context.Context, info handler.FileInfo) (handler.Upload, error) {
// an upload larger than MaxObjectSize must throw an error
if info.Size > store.MaxObjectSize {
return nil, fmt.Errorf("s3store: upload size of %v bytes exceeds MaxObjectSize of %v bytes", info.Size, store.MaxObjectSize)
}
var objectId string
if info.ID == "" {
objectId = uid.Uid()
} else {
// certain tests set info.ID in advance
objectId = info.ID
}
// Convert meta data into a map of pointers for AWS Go SDK, sigh.
metadata := make(map[string]string, len(info.MetaData))
for key, value := range info.MetaData {
metadata[key] = nonPrintableRegexp.ReplaceAllString(value, "?")
}
// Create the actual multipart upload
t := time.Now()
multipartUploadInput := &s3.CreateMultipartUploadInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(objectId),
Metadata: metadata,
}
if fileType, found := info.MetaData["filetype"]; found {
multipartUploadInput.ContentType = aws.String(fileType)
}
res, err := store.Service.CreateMultipartUpload(ctx, multipartUploadInput)
store.observeRequestDuration(t, metricCreateMultipartUpload)
if err != nil {
return nil, fmt.Errorf("s3store: unable to create multipart upload:\n%s", err)
}
multipartId := *res.UploadId
info.ID = objectId + "+" + multipartId
info.Storage = map[string]string{
"Type": "s3store",
"Bucket": store.Bucket,
"Key": *store.keyWithPrefix(objectId),
}
upload := &s3Upload{objectId, multipartId, &store, nil, []*s3Part{}, 0}
err = upload.writeInfo(ctx, info)
if err != nil {
return nil, fmt.Errorf("s3store: unable to create info file:\n%s", err)
}
return upload, nil
}
func (store S3Store) GetUpload(ctx context.Context, id string) (handler.Upload, error) {
objectId, multipartId := splitIds(id)
if objectId == "" || multipartId == "" {
// If one of them is empty, it cannot be a valid ID.
return nil, handler.ErrNotFound
}
return &s3Upload{objectId, multipartId, &store, nil, []*s3Part{}, 0}, nil
}
func (store S3Store) AsTerminatableUpload(upload handler.Upload) handler.TerminatableUpload {
return upload.(*s3Upload)
}
func (store S3Store) AsLengthDeclarableUpload(upload handler.Upload) handler.LengthDeclarableUpload {
return upload.(*s3Upload)
}
func (store S3Store) AsConcatableUpload(upload handler.Upload) handler.ConcatableUpload {
return upload.(*s3Upload)
}
func (upload *s3Upload) writeInfo(ctx context.Context, info handler.FileInfo) error {
store := upload.store
upload.info = &info
infoJson, err := json.Marshal(info)
if err != nil {
return err
}
// Create object on S3 containing information about the file
t := time.Now()
_, err = store.Service.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.metadataKeyWithPrefix(upload.objectId + ".info"),
Body: bytes.NewReader(infoJson),
ContentLength: aws.Int64(int64(len(infoJson))),
})
store.observeRequestDuration(t, metricPutInfoObject)
return err
}
func (upload *s3Upload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) {
store := upload.store
// Get the total size of the current upload, number of parts to generate next number and whether
// an incomplete part exists
_, _, incompletePartSize, err := upload.getInternalInfo(ctx)
if err != nil {
return 0, err
}
if incompletePartSize > 0 {
incompletePartFile, err := store.downloadIncompletePartForUpload(ctx, upload.objectId)
if err != nil {
return 0, err
}
if incompletePartFile == nil {
return 0, fmt.Errorf("s3store: Expected an incomplete part file but did not get any")
}
defer cleanUpTempFile(incompletePartFile)
if err := store.deleteIncompletePartForUpload(ctx, upload.objectId); err != nil {
return 0, err
}
// Prepend an incomplete part, if necessary and adapt the offset
src = io.MultiReader(incompletePartFile, src)
offset = offset - incompletePartSize
}
bytesUploaded, err := upload.uploadParts(ctx, offset, src)
// The size of the incomplete part should not be counted, because the
// process of the incomplete part should be fully transparent to the user.
bytesUploaded = bytesUploaded - incompletePartSize
if bytesUploaded < 0 {
bytesUploaded = 0
}
upload.info.Offset += bytesUploaded
return bytesUploaded, err
}
func (upload *s3Upload) uploadParts(ctx context.Context, offset int64, src io.Reader) (int64, error) {
store := upload.store
// Get the total size of the current upload and number of parts to generate next number
info, parts, _, err := upload.getInternalInfo(ctx)
if err != nil {
return 0, err
}
size := info.Size
bytesUploaded := int64(0)
optimalPartSize, err := store.calcOptimalPartSize(size)
if err != nil {
return 0, err
}
numParts := len(parts)
nextPartNum := int32(numParts + 1)
partProducer, fileChan := newS3PartProducer(src, store.MaxBufferedParts, store.TemporaryDirectory, store.diskWriteDurationMetric)
producerCtx, cancelProducer := context.WithCancel(ctx)
defer func() {
cancelProducer()
partProducer.closeUnreadFiles()
}()
go partProducer.produce(producerCtx, optimalPartSize)
var eg errgroup.Group
for {
// We acquire the semaphore before starting the goroutine to avoid
// starting many goroutines, most of which are just waiting for the lock.
// We also acquire the semaphore before reading from the channel to reduce
// the number of part files are laying around on disk without being used.
upload.store.acquireUploadSemaphore()
fileChunk, more := <-fileChan
if !more {
upload.store.releaseUploadSemaphore()
break
}
partfile := fileChunk.reader
partsize := fileChunk.size
closePart := fileChunk.closeReader
isFinalChunk := !info.SizeIsDeferred && (size == offset+bytesUploaded+partsize)
if partsize >= store.MinPartSize || isFinalChunk {
part := &s3Part{
etag: "",
size: partsize,
number: nextPartNum,
}
upload.parts = append(upload.parts, part)
eg.Go(func() error {
defer upload.store.releaseUploadSemaphore()
t := time.Now()
uploadPartInput := &s3.UploadPartInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(upload.objectId),
UploadId: aws.String(upload.multipartId),
PartNumber: aws.Int32(part.number),
}
etag, err := upload.putPartForUpload(ctx, uploadPartInput, partfile, part.size)
store.observeRequestDuration(t, metricUploadPart)
if err == nil {
part.etag = etag
}
cerr := closePart()
if err != nil {
return err
}
if cerr != nil {
return cerr
}
return nil
})
} else {
eg.Go(func() error {
defer upload.store.releaseUploadSemaphore()
err := store.putIncompletePartForUpload(ctx, upload.objectId, partfile)
if err == nil {
upload.incompletePartSize = partsize
}
cerr := closePart()
if err != nil {
return err
}
if cerr != nil {
return cerr
}
return nil
})
}
bytesUploaded += partsize
nextPartNum += 1
}
uploadErr := eg.Wait()
if uploadErr != nil {
return 0, uploadErr
}
return bytesUploaded, partProducer.err
}
func cleanUpTempFile(file *os.File) {
file.Close()
os.Remove(file.Name())
}
func (upload *s3Upload) putPartForUpload(ctx context.Context, uploadPartInput *s3.UploadPartInput, file io.ReadSeeker, size int64) (string, error) {
if !upload.store.DisableContentHashes {
// By default, use the traditional approach to upload data
uploadPartInput.Body = file
res, err := upload.store.Service.UploadPart(ctx, uploadPartInput)
if err != nil {
return "", err
}
return *res.ETag, nil
} else {
// Experimental feature to prevent the AWS SDK from calculating the SHA256 hash
// for the parts we upload to S3.
// We compute the presigned URL without the body attached and then send the request
// on our own. This way, the body is not included in the SHA256 calculation.
s3Client, ok := upload.store.Service.(*s3.Client)
if !ok {
return "", fmt.Errorf("s3store: failed to cast S3 service for presigning")
}
presignClient := s3.NewPresignClient(s3Client)
s3Req, err := presignClient.PresignUploadPart(ctx, uploadPartInput, func(opts *s3.PresignOptions) {
opts.Expires = 15 * time.Minute
})
if err != nil {
return "", fmt.Errorf("s3store: failed to presign UploadPart: %s", err)
}
req, err := http.NewRequest("PUT", s3Req.URL, file)
if err != nil {
return "", err
}
// Set the Content-Length manually to prevent the usage of Transfer-Encoding: chunked,
// which is not supported by AWS S3.
req.ContentLength = size
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != 200 {
buf := new(strings.Builder)
io.Copy(buf, res.Body)
return "", fmt.Errorf("s3store: unexpected response code %d for presigned upload: %s", res.StatusCode, buf.String())
}
return res.Header.Get("ETag"), nil
}
}
func (upload *s3Upload) GetInfo(ctx context.Context) (info handler.FileInfo, err error) {
info, _, _, err = upload.getInternalInfo(ctx)
return info, err
}
func (upload *s3Upload) getInternalInfo(ctx context.Context) (info handler.FileInfo, parts []*s3Part, incompletePartSize int64, err error) {
if upload.info != nil {
return *upload.info, upload.parts, upload.incompletePartSize, nil
}
info, parts, incompletePartSize, err = upload.fetchInfo(ctx)
if err != nil {
return info, parts, incompletePartSize, err
}
upload.info = &info
upload.parts = parts
upload.incompletePartSize = incompletePartSize
return info, parts, incompletePartSize, nil
}
func (upload s3Upload) fetchInfo(ctx context.Context) (info handler.FileInfo, parts []*s3Part, incompletePartSize int64, err error) {
store := upload.store
var wg sync.WaitGroup
wg.Add(3)
// We store all errors in here and handle them all together once the wait
// group is done.
var infoErr error
var partsErr error
var incompletePartSizeErr error
go func() {
defer wg.Done()
t := time.Now()
// Get file info stored in separate object
var res *s3.GetObjectOutput
res, infoErr = store.Service.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.metadataKeyWithPrefix(upload.objectId + ".info"),
})
store.observeRequestDuration(t, metricGetInfoObject)
if infoErr == nil {
infoErr = json.NewDecoder(res.Body).Decode(&info)
}
}()
go func() {
defer wg.Done()
// Get uploaded parts and their offset
parts, partsErr = store.listAllParts(ctx, upload.objectId, upload.multipartId)
}()
go func() {
defer wg.Done()
// Get size of optional incomplete part file.
incompletePartSize, incompletePartSizeErr = store.headIncompletePartForUpload(ctx, upload.objectId)
}()
wg.Wait()
// Finally, after all requests are complete, let's handle the errors
if infoErr != nil {
err = infoErr
// If the info file is not found, we consider the upload to be non-existant
if isAwsError[*types.NoSuchKey](err) {
err = handler.ErrNotFound
}
return
}
if partsErr != nil {
err = partsErr
// Check if the error is caused by the multipart upload not being found. This happens
// when the multipart upload has already been completed or aborted. Since
// we already found the info object, we know that the upload has been
// completed and therefore can ensure the the offset is the size.
// AWS S3 returns NoSuchUpload, but other implementations, such as DigitalOcean
// Spaces, can also return NoSuchKey.
// The AWS Go SDK v2 has a bug where types.NoSuchUpload is not returned,
// so we also need to check the error code itself.
// See https://github.com/aws/aws-sdk-go-v2/issues/1635
// In addition, S3-compatible storages, like DigitalOcean Spaces, might cause
// types.NoSuchKey to not be returned as well.
if isAwsError[*types.NoSuchUpload](err) || isAwsErrorCode(err, "NoSuchUpload") || isAwsError[*types.NoSuchKey](err) || isAwsErrorCode(err, "NoSuchKey") {
info.Offset = info.Size
err = nil
}
return
}
if incompletePartSizeErr != nil {
err = incompletePartSizeErr
return
}
// The offset is the sum of all part sizes and the size of the incomplete part file.
offset := incompletePartSize
for _, part := range parts {
offset += part.size
}
info.Offset = offset
return info, parts, incompletePartSize, nil
}
func (upload s3Upload) GetReader(ctx context.Context) (io.ReadCloser, error) {
store := upload.store
// Attempt to get upload content
res, err := store.Service.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(upload.objectId),
})
if err == nil {
// No error occurred, and we are able to stream the object
return res.Body, nil
}
// If the file cannot be found, we ignore this error and continue since the
// upload may not have been finished yet. In this case we do not want to
// return a ErrNotFound but a more meaning-full message.
if !isAwsError[*types.NoSuchKey](err) {
return nil, err
}
// Test whether the multipart upload exists to find out if the upload
// never existsted or just has not been finished yet
_, err = store.Service.ListParts(ctx, &s3.ListPartsInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(upload.objectId),
UploadId: aws.String(upload.multipartId),
MaxParts: aws.Int32(0),
})
if err == nil {
// The multipart upload still exists, which means we cannot download it yet
return nil, handler.NewError("ERR_INCOMPLETE_UPLOAD", "cannot stream non-finished upload", http.StatusBadRequest)
}
// The AWS Go SDK v2 has a bug where types.NoSuchUpload is not returned,
// so we also need to check the error code itself.
// See https://github.com/aws/aws-sdk-go-v2/issues/1635
if isAwsError[*types.NoSuchUpload](err) || isAwsErrorCode(err, "NoSuchUpload") {
// Neither the object nor the multipart upload exists, so we return a 404
return nil, handler.ErrNotFound
}
return nil, err
}
func (upload s3Upload) Terminate(ctx context.Context) error {
store := upload.store
var wg sync.WaitGroup
wg.Add(2)
errs := make([]error, 0, 3)
go func() {
defer wg.Done()
// Abort the multipart upload
_, err := store.Service.AbortMultipartUpload(ctx, &s3.AbortMultipartUploadInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(upload.objectId),
UploadId: aws.String(upload.multipartId),
})
if err != nil && !isAwsError[*types.NoSuchUpload](err) {
errs = append(errs, err)
}
}()
go func() {
defer wg.Done()
// Delete the info and content files
res, err := store.Service.DeleteObjects(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(store.Bucket),
Delete: &types.Delete{
Objects: []types.ObjectIdentifier{
{
Key: store.keyWithPrefix(upload.objectId),
},
{
Key: store.metadataKeyWithPrefix(upload.objectId + ".part"),
},
{
Key: store.metadataKeyWithPrefix(upload.objectId + ".info"),
},
},
Quiet: aws.Bool(true),
},
})
if err != nil {
errs = append(errs, err)
return
}
for _, s3Err := range res.Errors {
if *s3Err.Code != "NoSuchKey" {
errs = append(errs, fmt.Errorf("AWS S3 Error (%s) for object %s: %s", *s3Err.Code, *s3Err.Key, *s3Err.Message))
}
}
}()
wg.Wait()
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
func (upload s3Upload) FinishUpload(ctx context.Context) error {
store := upload.store
// Get uploaded parts
_, parts, _, err := upload.getInternalInfo(ctx)
if err != nil {
return err
}
if len(parts) == 0 {
// AWS expects at least one part to be present when completing the multipart
// upload. So if the tus upload has a size of 0, we create an empty part
// and use that for completing the multipart upload.
res, err := store.Service.UploadPart(ctx, &s3.UploadPartInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(upload.objectId),
UploadId: aws.String(upload.multipartId),
PartNumber: aws.Int32(1),
Body: bytes.NewReader([]byte{}),
})
if err != nil {
return err
}
parts = []*s3Part{
{
etag: *res.ETag,
number: 1,
size: 0,
},
}
}
// Transform the []*s3.Part slice to a []*s3.CompletedPart slice for the next
// request.
completedParts := make([]types.CompletedPart, len(parts))
for index, part := range parts {
completedParts[index] = types.CompletedPart{
ETag: aws.String(part.etag),
PartNumber: aws.Int32(part.number),
}
}
t := time.Now()
_, err = store.Service.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(upload.objectId),
UploadId: aws.String(upload.multipartId),
MultipartUpload: &types.CompletedMultipartUpload{
Parts: completedParts,
},
})
store.observeRequestDuration(t, metricCompleteMultipartUpload)
return err
}
func (upload *s3Upload) ConcatUploads(ctx context.Context, partialUploads []handler.Upload) error {
hasSmallPart := false
for _, partialUpload := range partialUploads {
info, err := partialUpload.GetInfo(ctx)
if err != nil {
return err
}
if info.Size < upload.store.MinPartSize {
hasSmallPart = true
}
}
// If one partial upload is smaller than the the minimum part size for an S3
// Multipart Upload, we cannot use S3 Multipart Uploads for concatenating all
// the files.
// So instead we have to download them and concat them on disk.
if hasSmallPart {
return upload.concatUsingDownload(ctx, partialUploads)
} else {
return upload.concatUsingMultipart(ctx, partialUploads)
}
}
func (upload *s3Upload) concatUsingDownload(ctx context.Context, partialUploads []handler.Upload) error {
store := upload.store
// Create a temporary file for holding the concatenated data
file, err := os.CreateTemp(store.TemporaryDirectory, "tusd-s3-concat-tmp-")
if err != nil {
return err
}
defer cleanUpTempFile(file)
// Download each part and append it to the temporary file
for _, partialUpload := range partialUploads {
partialS3Upload := partialUpload.(*s3Upload)
res, err := store.Service.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(partialS3Upload.objectId),
})
if err != nil {
return err
}
defer res.Body.Close()
if _, err := io.Copy(file, res.Body); err != nil {
return err
}
}
// Seek to the beginning of the file, so the entire file is being uploaded
file.Seek(0, 0)
// Upload the entire file to S3
_, err = store.Service.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(upload.objectId),
Body: file,
})
if err != nil {
return err
}
// Finally, abort the multipart upload since it will no longer be used.
// This happens asynchronously since we do not need to wait for the result.
// Also, the error is ignored on purpose as it does not change the outcome of
// the request.
go func() {
store.Service.AbortMultipartUpload(ctx, &s3.AbortMultipartUploadInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(upload.objectId),
UploadId: aws.String(upload.multipartId),
})
}()
return nil
}
func (upload *s3Upload) concatUsingMultipart(ctx context.Context, partialUploads []handler.Upload) error {
store := upload.store
upload.parts = make([]*s3Part, len(partialUploads))
// Copy partial uploads concurrently
var eg errgroup.Group
for i, partialUpload := range partialUploads {
// Part numbers must be in the range of 1 to 10000, inclusive. Since
// slice indexes start at 0, we add 1 to ensure that i >= 1.
partNumber := int32(i + 1)
partialS3Upload := partialUpload.(*s3Upload)
eg.Go(func() error {
res, err := store.Service.UploadPartCopy(ctx, &s3.UploadPartCopyInput{
Bucket: aws.String(store.Bucket),
Key: store.keyWithPrefix(upload.objectId),
UploadId: aws.String(upload.multipartId),
PartNumber: aws.Int32(partNumber),
CopySource: aws.String(store.Bucket + "/" + *store.keyWithPrefix(partialS3Upload.objectId)),
})
if err != nil {