-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsfstreams.go
289 lines (249 loc) · 7.26 KB
/
sfstreams.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
package sfstreams
import (
"errors"
"fmt"
"io"
"sync"
"golang.org/x/sync/singleflight"
)
// ReaderResult carries the return values of Group.Do over the Group.DoChan channel.
type ReaderResult struct {
Err error
Reader io.ReadCloser
Shared bool
}
// Group represents a singleflight stream group. This behaves just like a normal singleflight.Group,
// but guarantees a usable (distinct) io.ReadCloser to be returned for each call.
type Group struct {
sf singleflight.Group
mu sync.Mutex
calls map[string][]chan<- io.ReadCloser
// Normally, the Group will copy the work function's returned reader, but in some cases it is
// desirable to maintain the io.Seeker interface. When this option is set to true, the Group
// no longer copies but instead returns proxy io.ReadSeekCloser readers that track their own
// read state. Whenever one of those readers seeks/reads, it synchronously does so on the work
// function's returned io.ReadSeekCloser. This can lead to performance bottlenecks if several
// call sites are attempting to read/seek at the same time.
//
// If this is set to true, but the work function doesn't return an io.ReadSeekCloser, the copy
// behaviour is used. When false (the default), the copy behaviour is always used.
UseSeekers bool
}
// Do behaves just like singleflight.Group, with the added guarantee that the returned io.ReadCloser
// is unique to the caller. The caller is responsible for closing the returned reader. If the work
// function reader returns an error, all readers generated for the key will return an error too.
//
// The returned reader will discard all unread bytes upon being closed, preventing one failed reader
// from blocking all other readers. Callers should take care to ensure any returned reader gets closed.
//
// The io.ReadCloser generated by fn is closed internally.
func (g *Group) Do(key string, fn func() (io.ReadCloser, error)) (reader io.ReadCloser, err error, shared bool) {
g.mu.Lock()
if g.calls == nil {
g.calls = make(map[string][]chan<- io.ReadCloser)
}
if _, ok := g.calls[key]; !ok {
g.calls[key] = make([]chan<- io.ReadCloser, 0)
}
resCh := make(chan io.ReadCloser, 1)
defer close(resCh)
g.calls[key] = append(g.calls[key], resCh)
valCh := g.sf.DoChan(key, g.doWork(key, fn))
g.mu.Unlock()
res := <-valCh
return <-resCh, res.Err, res.Shared
}
// DoChan runs Group.Do, but returns a channel that will receive the results/stream when ready.
//
// The returned channel is not closed.
func (g *Group) DoChan(key string, fn func() (io.ReadCloser, error)) <-chan ReaderResult {
ch := make(chan ReaderResult, 1)
go func(ch chan ReaderResult, g *Group) {
r, err, shared := g.Do(key, fn)
ch <- ReaderResult{
Err: err,
Reader: r,
Shared: shared,
}
}(ch, g)
return ch
}
// Forget acts just like singleflight.Group.
func (g *Group) Forget(key string) {
g.mu.Lock()
if chans, ok := g.calls[key]; ok {
for _, ch := range chans {
close(ch)
}
}
delete(g.calls, key)
g.sf.Forget(key)
g.mu.Unlock()
}
func (g *Group) doWork(key string, fn func() (io.ReadCloser, error)) func() (interface{}, error) {
return func() (interface{}, error) {
fnRes, fnErr := fn()
g.mu.Lock()
defer g.mu.Unlock()
g.sf.Forget(key) // we won't be processing future calls, so wrap it up
if chans, ok := g.calls[key]; !ok {
return nil, fmt.Errorf("expected to find singleflight key \"%s\", but didn't", key)
} else {
var zero io.ReadCloser
canStream := fnRes != nil && fnRes != zero
delete(g.calls, key) // we've done all we can for this call: clear it before we unlock
if !canStream {
for _, ch := range chans {
// This needs to be async to prevent a deadlock
go func(ch chan<- io.ReadCloser) {
ch <- nil
}(ch)
}
return nil, fnErr // we intentionally discard the return value
}
if g.UseSeekers {
if rsc, ok := fnRes.(io.ReadSeekCloser); ok {
parent := newParentSeeker(rsc, len(chans))
for _, ch := range chans {
// This needs to be async to prevent a deadlock
go func(ch chan<- io.ReadCloser) {
ch <- newSyncSeeker(parent)
}(ch)
}
return nil, fnErr // we intentionally discard the return value
}
}
writers := make([]*io.PipeWriter, len(chans))
for i, ch := range chans {
r, w := io.Pipe()
writers[i] = w
// This needs to be async to prevent a deadlock
go func(r io.ReadCloser, ch chan<- io.ReadCloser) {
ch <- newDiscardCloser(r)
}(r, ch)
}
// Do the io copy async to prevent holding up other singleflight calls
go finishCopy(writers, fnRes)
return nil, fnErr // we intentionally discard the return value
}
}
}
func finishCopy(writers []*io.PipeWriter, fnRes io.ReadCloser) {
defer func(fnRes io.ReadCloser) {
_ = fnRes.Close()
}(fnRes)
// Dev note: Errors are raised through the pipe writers using CloseWithError, which
// should make them available on the pipe readers. We can consume them here.
mw := newAsyncMultiWriter(writers...)
_, copyErr := io.Copy(mw, fnRes)
_ = mw.CloseWithMaybeError(copyErr)
}
// discardCloser discards any remaining data on the underlying reader on close.
type discardCloser struct {
io.ReadCloser
r io.ReadCloser
}
// newDiscardCloser creates a new discardCloser from an input io.ReadCloser
func newDiscardCloser(r io.ReadCloser) *discardCloser {
return &discardCloser{r: r}
}
func (d *discardCloser) Read(p []byte) (int, error) {
return d.r.Read(p)
}
func (d *discardCloser) Close() error {
if _, err := io.Copy(io.Discard, d.r); err != nil {
return err
}
return d.r.Close()
}
type asyncMultiWriter struct {
io.WriteCloser
writers []*io.PipeWriter
skipFlags []bool
mu *sync.Mutex
}
func newAsyncMultiWriter(writers ...*io.PipeWriter) *asyncMultiWriter {
return &asyncMultiWriter{
writers: writers,
skipFlags: make([]bool, len(writers)),
mu: new(sync.Mutex),
}
}
type writeResponse struct {
i int
err error
n int
}
func (a *asyncMultiWriter) Write(p []byte) (int, error) {
a.mu.Lock()
defer a.mu.Unlock()
ch := make(chan *writeResponse, len(a.writers))
wg := new(sync.WaitGroup)
c := 0
for i := range a.writers {
if a.skipFlags[i] {
continue
}
wg.Add(1)
c += 1
go func(i int, p []byte, a *asyncMultiWriter, ch chan *writeResponse) {
defer wg.Done()
w := a.writers[i]
n, err := w.Write(p)
ch <- &writeResponse{
i: i,
err: err,
n: n,
}
}(i, p, a, ch)
}
wg.Wait()
maxRead := 0
for i := 0; i < c; i++ {
res := <-ch
if res.n > maxRead {
maxRead = res.n
}
if res.err != nil {
w := a.writers[res.i]
_ = w.CloseWithError(res.err)
a.skipFlags[i] = true
}
}
return maxRead, nil
}
func (a *asyncMultiWriter) Close() error {
a.mu.Lock()
defer a.mu.Unlock()
errs := make([]error, 0)
for i, w := range a.writers {
a.skipFlags[i] = true
err := w.Close()
if err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
func (a *asyncMultiWriter) CloseWithMaybeError(inError error) error {
if inError == nil {
return a.Close()
}
a.mu.Lock()
defer a.mu.Unlock()
errs := make([]error, 0)
for i, w := range a.writers {
a.skipFlags[i] = true
err := w.CloseWithError(inError)
if err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}