-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathwebaudio.dart
3591 lines (3367 loc) · 143 KB
/
webaudio.dart
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright (c) 2024, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
//
// API docs from [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web).
// Attributions and copyright licensing by Mozilla Contributors is licensed
// under [CC-BY-SA 2.5](https://creativecommons.org/licenses/by-sa/2.5/.
// Generated from Web IDL definitions.
// ignore_for_file: unintended_html_in_doc_comment
@JS()
library;
import 'dart:js_interop';
import 'dom.dart';
import 'hr_time.dart';
import 'html.dart';
import 'mediacapture_streams.dart';
typedef DecodeErrorCallback = JSFunction;
typedef DecodeSuccessCallback = JSFunction;
typedef AudioWorkletProcessorConstructor = JSFunction;
typedef AudioContextState = String;
typedef AudioContextRenderSizeCategory = String;
typedef AudioContextLatencyCategory = String;
typedef AudioSinkType = String;
typedef ChannelCountMode = String;
typedef ChannelInterpretation = String;
typedef AutomationRate = String;
typedef BiquadFilterType = String;
typedef OscillatorType = String;
typedef PanningModelType = String;
typedef DistanceModelType = String;
typedef OverSampleType = String;
/// The `BaseAudioContext` interface of the
/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API)
/// acts as a base definition for online and offline audio-processing graphs, as
/// represented by [AudioContext] and [OfflineAudioContext] respectively. You
/// wouldn't use `BaseAudioContext` directly — you'd use its features via one of
/// these two inheriting interfaces.
///
/// A `BaseAudioContext` can be a target of events, therefore it implements the
/// [EventTarget] interface.
///
/// ---
///
/// API documentation sourced from
/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext).
extension type BaseAudioContext._(JSObject _) implements EventTarget, JSObject {
/// The `createAnalyser()` method of the
/// [BaseAudioContext] interface creates an [AnalyserNode], which
/// can be used to expose audio time and frequency data and create data
/// visualizations.
///
/// > **Note:** The [AnalyserNode.AnalyserNode] constructor is the
/// > recommended way to create an [AnalyserNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
///
/// > **Note:** For more on using this node, see the
/// > [AnalyserNode] page.
external AnalyserNode createAnalyser();
/// The `createBiquadFilter()` method of the [BaseAudioContext]
/// interface creates a [BiquadFilterNode], which represents a second order
/// filter configurable as several different common filter types.
///
/// > **Note:** The [BiquadFilterNode.BiquadFilterNode] constructor is the
/// > recommended way to create a [BiquadFilterNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external BiquadFilterNode createBiquadFilter();
/// The `createBuffer()` method of the [BaseAudioContext]
/// Interface is used to create a new, empty [AudioBuffer] object, which
/// can then be populated by data, and played via an [AudioBufferSourceNode].
///
/// For more details about audio buffers, check out the [AudioBuffer]
/// reference page.
///
/// > **Note:** `createBuffer()` used to be able to take compressed
/// > data and give back decoded samples, but this ability was removed from
/// > the specification,
/// > because all the decoding was done on the main thread, so
/// > `createBuffer()` was blocking other code execution. The asynchronous
/// > method
/// > `decodeAudioData()` does the same thing — takes compressed audio, such
/// > as an
/// > MP3 file, and directly gives you back an [AudioBuffer] that you can
/// > then play via an [AudioBufferSourceNode]. For simple use cases
/// > like playing an MP3, `decodeAudioData()` is what you should be using.
external AudioBuffer createBuffer(
int numberOfChannels,
int length,
num sampleRate,
);
/// The `createBufferSource()` method of the [BaseAudioContext]
/// Interface is used to create a new [AudioBufferSourceNode], which can be
/// used to play audio data contained within an [AudioBuffer] object.
/// [AudioBuffer]s are created using [BaseAudioContext.createBuffer] or
/// returned by [BaseAudioContext.decodeAudioData] when it successfully
/// decodes an audio track.
///
/// > **Note:** The [AudioBufferSourceNode.AudioBufferSourceNode]
/// > constructor is the recommended way to create a [AudioBufferSourceNode];
/// > see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external AudioBufferSourceNode createBufferSource();
/// The `createChannelMerger()` method of the [BaseAudioContext] interface
/// creates a [ChannelMergerNode],
/// which combines channels from multiple audio streams into a single audio
/// stream.
///
/// > **Note:** The [ChannelMergerNode.ChannelMergerNode] constructor is the
/// > recommended way to create a [ChannelMergerNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external ChannelMergerNode createChannelMerger([int numberOfInputs]);
/// The `createChannelSplitter()` method of the [BaseAudioContext] Interface
/// is used to create a [ChannelSplitterNode],
/// which is used to access the individual channels of an audio stream and
/// process them separately.
///
/// > **Note:** The [ChannelSplitterNode.ChannelSplitterNode]
/// > constructor is the recommended way to create a [ChannelSplitterNode];
/// > see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external ChannelSplitterNode createChannelSplitter([int numberOfOutputs]);
/// The **`createConstantSource()`**
/// property of the [BaseAudioContext] interface creates a
/// [ConstantSourceNode] object, which is an audio source that continuously
/// outputs a monaural (one-channel) sound signal whose samples all have the
/// same
/// value.
///
/// > **Note:** The [ConstantSourceNode.ConstantSourceNode]
/// > constructor is the recommended way to create a [ConstantSourceNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external ConstantSourceNode createConstantSource();
/// The `createConvolver()` method of the [BaseAudioContext]
/// interface creates a [ConvolverNode], which is commonly used to apply
/// reverb effects to your audio. See the
/// [spec definition of Convolution](https://webaudio.github.io/web-audio-api/#background-3)
/// for more information.
///
/// > **Note:** The [ConvolverNode.ConvolverNode]
/// > constructor is the recommended way to create a [ConvolverNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external ConvolverNode createConvolver();
/// The `createDelay()` method of the
/// [BaseAudioContext] Interface is used to create a [DelayNode],
/// which is used to delay the incoming audio signal by a certain amount of
/// time.
///
/// > **Note:** The [DelayNode.DelayNode]
/// > constructor is the recommended way to create a [DelayNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external DelayNode createDelay([num maxDelayTime]);
/// The `createDynamicsCompressor()` method of the [BaseAudioContext]
/// Interface is used to create a [DynamicsCompressorNode], which can be used
/// to apply compression to an audio signal.
///
/// Compression lowers the volume of the loudest parts of the signal and
/// raises the volume
/// of the softest parts. Overall, a louder, richer, and fuller sound can be
/// achieved. It is
/// especially important in games and musical applications where large numbers
/// of individual
/// sounds are played simultaneously, where you want to control the overall
/// signal level and
/// help avoid clipping (distorting) of the audio output.
///
/// > **Note:** The [DynamicsCompressorNode.DynamicsCompressorNode]
/// > constructor is the recommended way to create a [DynamicsCompressorNode];
/// > see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external DynamicsCompressorNode createDynamicsCompressor();
/// The `createGain()` method of the [BaseAudioContext]
/// interface creates a [GainNode], which can be used to control the
/// overall gain (or volume) of the audio graph.
///
/// > **Note:** The [GainNode.GainNode]
/// > constructor is the recommended way to create a [GainNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external GainNode createGain();
/// The **`createIIRFilter()`** method of the [BaseAudioContext] interface
/// creates an [IIRFilterNode], which represents a general
/// **[infinite impulse response](https://en.wikipedia.org/wiki/Infinite_impulse_response)**
/// (IIR) filter which can be configured to serve as various types of filter.
///
/// > **Note:** The [IIRFilterNode.IIRFilterNode]
/// > constructor is the recommended way to create a [IIRFilterNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external IIRFilterNode createIIRFilter(
JSArray<JSNumber> feedforward,
JSArray<JSNumber> feedback,
);
/// The `createOscillator()` method of the [BaseAudioContext]
/// interface creates an [OscillatorNode], a source representing a periodic
/// waveform. It basically generates a constant tone.
///
/// > **Note:** The [OscillatorNode.OscillatorNode]
/// > constructor is the recommended way to create a [OscillatorNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external OscillatorNode createOscillator();
/// The `createPanner()` method of the [BaseAudioContext]
/// Interface is used to create a new [PannerNode], which is used to
/// spatialize an incoming audio stream in 3D space.
///
/// The panner node is spatialized in relation to the AudioContext's
/// [AudioListener] (defined by the [BaseAudioContext.listener]
/// attribute), which represents the position and orientation of the person
/// listening to the
/// audio.
///
/// > **Note:** The [PannerNode.PannerNode]
/// > constructor is the recommended way to create a [PannerNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external PannerNode createPanner();
/// The `createPeriodicWave()` method of the [BaseAudioContext] Interface
/// is used to create a [PeriodicWave], which is used to define a periodic
/// waveform
/// that can be used to shape the output of an [OscillatorNode].
external PeriodicWave createPeriodicWave(
JSArray<JSNumber> real,
JSArray<JSNumber> imag, [
PeriodicWaveConstraints constraints,
]);
/// The `createScriptProcessor()` method of the [BaseAudioContext] interface
/// creates a [ScriptProcessorNode] used for direct audio processing.
///
/// > **Note:** This feature was replaced by
/// > [AudioWorklets](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet)
/// > and the [AudioWorkletNode] interface.
external ScriptProcessorNode createScriptProcessor([
int bufferSize,
int numberOfInputChannels,
int numberOfOutputChannels,
]);
/// The `createStereoPanner()` method of the [BaseAudioContext] interface
/// creates a [StereoPannerNode], which can be used to apply
/// stereo panning to an audio source.
/// It positions an incoming audio stream in a stereo image using a
/// [low-cost panning algorithm](https://webaudio.github.io/web-audio-api/#stereopanner-algorithm).
///
/// > **Note:** The [StereoPannerNode.StereoPannerNode]
/// > constructor is the recommended way to create a [StereoPannerNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external StereoPannerNode createStereoPanner();
/// The `createWaveShaper()` method of the [BaseAudioContext]
/// interface creates a [WaveShaperNode], which represents a non-linear
/// distortion. It is used to apply distortion effects to your audio.
///
/// > **Note:** The [WaveShaperNode.WaveShaperNode]
/// > constructor is the recommended way to create a [WaveShaperNode]; see
/// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode).
external WaveShaperNode createWaveShaper();
/// The `decodeAudioData()` method of the [BaseAudioContext]
/// Interface is used to asynchronously decode audio file data contained in an
/// `ArrayBuffer` that is loaded from [fetch],
/// [XMLHttpRequest], or [FileReader]. The decoded
/// [AudioBuffer] is resampled to the [AudioContext]'s sampling
/// rate, then passed to a callback or promise.
///
/// This is the preferred method of creating an audio source for Web Audio API
/// from an
/// audio track. This method only works on complete file data, not fragments
/// of audio file
/// data.
///
/// This function implements two alternative ways to asynchronously return the
/// audio data or error messages: it returns a `Promise` that fulfills with
/// the audio data, and also accepts callback arguments to handle success or
/// failure. The primary method of interfacing with this function is via its
/// Promise return value, and the callback parameters are provided for legacy
/// reasons.
external JSPromise<AudioBuffer> decodeAudioData(
JSArrayBuffer audioData, [
DecodeSuccessCallback? successCallback,
DecodeErrorCallback? errorCallback,
]);
/// The `destination` property of the [BaseAudioContext]
/// interface returns an [AudioDestinationNode] representing the final
/// destination of all audio in the context. It often represents an actual
/// audio-rendering
/// device such as your device's speakers.
external AudioDestinationNode get destination;
/// The `sampleRate` property of the [BaseAudioContext] interface returns a
/// floating point number representing the sample rate, in samples per second,
/// used by all nodes in this audio context.
/// This limitation means that sample-rate converters are not supported.
external double get sampleRate;
/// The `currentTime` read-only property of the [BaseAudioContext]
/// interface returns a double representing an ever-increasing hardware
/// timestamp in seconds that
/// can be used for scheduling audio playback, visualizing timelines, etc. It
/// starts at 0.
external double get currentTime;
/// The `listener` property of the [BaseAudioContext] interface
/// returns an [AudioListener] object that can then be used for
/// implementing 3D audio spatialization.
external AudioListener get listener;
/// The `state` read-only property of the [BaseAudioContext]
/// interface returns the current state of the `AudioContext`.
external AudioContextState get state;
/// The `audioWorklet` read-only property of the
/// [BaseAudioContext] interface returns an instance of
/// [AudioWorklet] that can be used for adding
/// [AudioWorkletProcessor]-derived classes which implement custom audio
/// processing.
external AudioWorklet get audioWorklet;
external EventHandler get onstatechange;
external set onstatechange(EventHandler value);
}
/// The `AudioContext` interface represents an audio-processing graph built from
/// audio modules linked together, each represented by an [AudioNode].
///
/// An audio context controls both the creation of the nodes it contains and the
/// execution of the audio processing, or decoding. You need to create an
/// `AudioContext` before you do anything else, as everything happens inside a
/// context. It's recommended to create one AudioContext and reuse it instead of
/// initializing a new one each time, and it's OK to use a single `AudioContext`
/// for several different audio sources and pipeline concurrently.
///
/// ---
///
/// API documentation sourced from
/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext).
extension type AudioContext._(JSObject _)
implements BaseAudioContext, JSObject {
external factory AudioContext([AudioContextOptions contextOptions]);
/// The
/// **`getOutputTimestamp()`** method of the
/// [AudioContext] interface returns a new `AudioTimestamp` object
/// containing two audio timestamp values relating to the current audio
/// context.
///
/// The two values are as follows:
///
/// - `AudioTimestamp.contextTime`: The time of the sample frame currently
/// being rendered by the audio output device (i.e., output audio stream
/// position), in the
/// same units and origin as the context's [BaseAudioContext.currentTime].
/// Basically, this is the time after the audio context was first created.
/// - `AudioTimestamp.performanceTime`: An estimation of the moment when the
/// sample frame corresponding to the stored `contextTime` value was rendered
/// by the audio output device, in the same units and origin as
/// [performance.now]. This is the time after the document containing the
/// audio context was first rendered.
external AudioTimestamp getOutputTimestamp();
/// The **`resume()`** method of the [AudioContext]
/// interface resumes the progression of time in an audio context that has
/// previously been
/// suspended.
///
/// This method will cause an `INVALID_STATE_ERR` exception to be thrown if
/// called on an [OfflineAudioContext].
external JSPromise<JSAny?> resume();
/// The `suspend()` method of the [AudioContext] Interface suspends the
/// progression of time in the audio context, temporarily halting audio
/// hardware access and reducing CPU/battery usage in the process — this is
/// useful if you want an application to power down the audio hardware when it
/// will not be using an audio context for a while.
///
/// This method will cause an `INVALID_STATE_ERR` exception to be thrown if
/// called on an [OfflineAudioContext].
external JSPromise<JSAny?> suspend();
/// The `close()` method of the [AudioContext] Interface closes the audio
/// context, releasing any system audio resources that it uses.
///
/// This function does not automatically release all `AudioContext`-created
/// objects, unless other references have been released as well; however, it
/// will forcibly release any system audio resources that might prevent
/// additional `AudioContexts` from being created and used, suspend the
/// progression of audio time in the audio context, and stop processing audio
/// data. The returned `Promise` resolves when all
/// `AudioContext`-creation-blocking resources have been released. This method
/// throws an `INVALID_STATE_ERR` exception if called on an
/// [OfflineAudioContext].
external JSPromise<JSAny?> close();
/// The `createMediaElementSource()` method of the [AudioContext] Interface is
/// used to create a new [MediaElementAudioSourceNode] object, given an
/// existing HTML `audio` or `video` element, the audio from which can then be
/// played and manipulated.
///
/// For more details about media element audio source nodes, check out the
/// [MediaElementAudioSourceNode] reference page.
external MediaElementAudioSourceNode createMediaElementSource(
HTMLMediaElement mediaElement);
/// The `createMediaStreamSource()` method of the [AudioContext]
/// Interface is used to create a new [MediaStreamAudioSourceNode]
/// object, given a media stream (say, from a [MediaDevices.getUserMedia]
/// instance), the audio from which can then be played and manipulated.
///
/// For more details about media stream audio source nodes, check out the
/// [MediaStreamAudioSourceNode] reference page.
external MediaStreamAudioSourceNode createMediaStreamSource(
MediaStream mediaStream);
/// The **`createMediaStreamTrackSource()`** method of the [AudioContext]
/// interface creates and returns a [MediaStreamTrackAudioSourceNode] which
/// represents an audio source whose data comes from the specified
/// [MediaStreamTrack].
///
/// This differs from [AudioContext.createMediaStreamSource], which creates a
/// [MediaStreamAudioSourceNode] whose audio comes from the audio track in a
/// specified [MediaStream] whose [MediaStreamTrack.id] is first,
/// lexicographically (alphabetically).
external MediaStreamTrackAudioSourceNode createMediaStreamTrackSource(
MediaStreamTrack mediaStreamTrack);
/// The `createMediaStreamDestination()` method of the [AudioContext]
/// Interface is used to create a new [MediaStreamAudioDestinationNode] object
/// associated with a
/// [WebRTC](https://developer.mozilla.org/en-US/docs/Web/API/WebRTC_API)
/// [MediaStream] representing an audio stream, which may be stored in a local
/// file or sent to another computer.
///
/// The [MediaStream] is created when the node is created and is accessible
/// via the [MediaStreamAudioDestinationNode]'s `stream` attribute. This
/// stream can be used in a similar way as a `MediaStream` obtained via
/// [navigator.getUserMedia] — it can, for example, be sent to a remote peer
/// using the `addStream()` method of `RTCPeerConnection`.
///
/// For more details about media stream destination nodes, check out the
/// [MediaStreamAudioDestinationNode] reference page.
external MediaStreamAudioDestinationNode createMediaStreamDestination();
/// The **`baseLatency`** read-only property of the
/// [AudioContext] interface returns a double that represents the number of
/// seconds of processing latency incurred by the `AudioContext` passing an
/// audio
/// buffer from the [AudioDestinationNode] — i.e. the end of the audio graph —
/// into the host system's audio subsystem ready for playing.
///
/// > **Note:** You can request a certain latency during
/// > [AudioContext.AudioContext] with the
/// > `latencyHint` option, but the browser may ignore the option.
external double get baseLatency;
/// The **`outputLatency`** read-only property of
/// the [AudioContext] Interface provides an estimation of the output latency
/// of the current audio context.
///
/// This is the time, in seconds, between the browser passing an audio buffer
/// out of an
/// audio graph over to the host system's audio subsystem to play, and the
/// time at which the
/// first sample in the buffer is actually processed by the audio output
/// device.
///
/// It varies depending on the platform and the available hardware.
external double get outputLatency;
}
extension type AudioContextOptions._(JSObject _) implements JSObject {
external factory AudioContextOptions({
JSAny latencyHint,
num sampleRate,
JSAny sinkId,
JSAny renderSizeHint,
});
external JSAny get latencyHint;
external set latencyHint(JSAny value);
external double get sampleRate;
external set sampleRate(num value);
external JSAny get sinkId;
external set sinkId(JSAny value);
external JSAny get renderSizeHint;
external set renderSizeHint(JSAny value);
}
extension type AudioSinkOptions._(JSObject _) implements JSObject {
external factory AudioSinkOptions({required AudioSinkType type});
external AudioSinkType get type;
external set type(AudioSinkType value);
}
extension type AudioTimestamp._(JSObject _) implements JSObject {
external factory AudioTimestamp({
num contextTime,
DOMHighResTimeStamp performanceTime,
});
external double get contextTime;
external set contextTime(num value);
external double get performanceTime;
external set performanceTime(DOMHighResTimeStamp value);
}
/// The `OfflineAudioContext` interface is an [AudioContext] interface
/// representing an audio-processing graph built from linked together
/// [AudioNode]s. In contrast with a standard [AudioContext], an
/// `OfflineAudioContext` doesn't render the audio to the device hardware;
/// instead, it generates it, as fast as it can, and outputs the result to an
/// [AudioBuffer].
///
/// ---
///
/// API documentation sourced from
/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/OfflineAudioContext).
extension type OfflineAudioContext._(JSObject _)
implements BaseAudioContext, JSObject {
external factory OfflineAudioContext(
JSAny contextOptionsOrNumberOfChannels, [
int length,
num sampleRate,
]);
/// The `startRendering()` method of the [OfflineAudioContext] Interface
/// starts rendering the audio graph, taking into account the current
/// connections and the current scheduled changes.
///
/// The [OfflineAudioContext.complete_event] event (of type
/// [OfflineAudioCompletionEvent]) is raised when the rendering is finished,
/// containing the resulting [AudioBuffer] in its `renderedBuffer` property.
///
/// Browsers currently support two versions of the `startRendering()` method —
/// an older event-based version and a newer promise-based version.
/// The former will eventually be removed, but currently both mechanisms are
/// provided for legacy reasons.
external JSPromise<AudioBuffer> startRendering();
/// The **`resume()`** method of the
/// [OfflineAudioContext] interface resumes the progression of time in an
/// audio
/// context that has been suspended. The promise resolves immediately because
/// the
/// `OfflineAudioContext` does not require the audio hardware.
external JSPromise<JSAny?> resume();
/// The **`suspend()`** method of the [OfflineAudioContext] interface
/// schedules a suspension of the time
/// progression in the audio context at the specified time and returns a
/// promise. This is
/// generally useful at the time of manipulating the audio graph synchronously
/// on
/// OfflineAudioContext.
///
/// Note that the maximum precision of suspension is the size of the render
/// quantum and the
/// specified suspension time will be rounded down to the nearest render
/// quantum boundary.
/// For this reason, it is not allowed to schedule multiple suspends at the
/// same quantized
/// frame. Also scheduling should be done while the context is not running to
/// ensure the
/// precise suspension.
external JSPromise<JSAny?> suspend(num suspendTime);
/// The **`length`** property of the
/// [OfflineAudioContext] interface returns an integer representing the size
/// of
/// the buffer in sample-frames.
external int get length;
external EventHandler get oncomplete;
external set oncomplete(EventHandler value);
}
extension type OfflineAudioContextOptions._(JSObject _) implements JSObject {
external factory OfflineAudioContextOptions({
int numberOfChannels,
required int length,
required num sampleRate,
JSAny renderSizeHint,
});
external int get numberOfChannels;
external set numberOfChannels(int value);
external int get length;
external set length(int value);
external double get sampleRate;
external set sampleRate(num value);
external JSAny get renderSizeHint;
external set renderSizeHint(JSAny value);
}
/// The
/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API)
/// `OfflineAudioCompletionEvent` interface represents events that occur when
/// the processing of an [OfflineAudioContext] is terminated. The
/// [OfflineAudioContext.complete_event] event uses this interface.
///
/// > **Note:** This interface is marked as deprecated; it is still supported
/// > for legacy reasons, but it will soon be superseded when the promise
/// > version of [OfflineAudioContext.startRendering] is supported in browsers,
/// > which will no longer need it.
///
/// ---
///
/// API documentation sourced from
/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/OfflineAudioCompletionEvent).
extension type OfflineAudioCompletionEvent._(JSObject _)
implements Event, JSObject {
external factory OfflineAudioCompletionEvent(
String type,
OfflineAudioCompletionEventInit eventInitDict,
);
/// The **`renderedBuffer`** read-only property of the
/// [OfflineAudioCompletionEvent] interface is an [AudioBuffer]
/// containing the result of processing an [OfflineAudioContext].
external AudioBuffer get renderedBuffer;
}
extension type OfflineAudioCompletionEventInit._(JSObject _)
implements EventInit, JSObject {
external factory OfflineAudioCompletionEventInit({
bool bubbles,
bool cancelable,
bool composed,
required AudioBuffer renderedBuffer,
});
external AudioBuffer get renderedBuffer;
external set renderedBuffer(AudioBuffer value);
}
/// The **`AudioBuffer`** interface represents a short audio asset residing in
/// memory, created from an audio file using the
/// [BaseAudioContext.decodeAudioData] method, or from raw data using
/// [BaseAudioContext.createBuffer]. Once put into an AudioBuffer, the audio can
/// then be played by being passed into an [AudioBufferSourceNode].
///
/// Objects of these types are designed to hold small audio snippets, typically
/// less than 45 s. For longer sounds, objects implementing the
/// [MediaElementAudioSourceNode] are more suitable. The buffer contains the
/// audio signal waveform encoded as a series of amplitudes in the following
/// format: non-interleaved IEEE754 32-bit linear PCM with a nominal range
/// between `-1` and `+1`, that is, a 32-bit floating point buffer, with each
/// sample between -1.0 and 1.0. If the [AudioBuffer] has multiple channels,
/// they are stored in separate buffers.
///
/// ---
///
/// API documentation sourced from
/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer).
extension type AudioBuffer._(JSObject _) implements JSObject {
external factory AudioBuffer(AudioBufferOptions options);
/// The **`getChannelData()`** method of the [AudioBuffer] Interface returns a
/// `Float32Array` containing the PCM data associated with the channel,
/// defined by the channel parameter (with 0 representing the first channel).
external JSFloat32Array getChannelData(int channel);
/// The
/// **`copyFromChannel()`** method of the
/// [AudioBuffer] interface copies the audio sample data from the specified
/// channel of the `AudioBuffer` to a specified
/// `Float32Array`.
external void copyFromChannel(
JSFloat32Array destination,
int channelNumber, [
int bufferOffset,
]);
/// The `copyToChannel()` method of the [AudioBuffer] interface copies
/// the samples to the specified channel of the `AudioBuffer`, from the source
/// array.
external void copyToChannel(
JSFloat32Array source,
int channelNumber, [
int bufferOffset,
]);
/// The **`sampleRate`** property of the [AudioBuffer] interface returns a
/// float representing the sample rate, in samples per second, of the PCM data
/// stored in the buffer.
external double get sampleRate;
/// The **`length`** property of the [AudioBuffer]
/// interface returns an integer representing the length, in sample-frames, of
/// the PCM data
/// stored in the buffer.
external int get length;
/// The **`duration`** property of the [AudioBuffer] interface returns a
/// double representing the duration, in seconds, of the PCM data stored in
/// the buffer.
external double get duration;
/// The `numberOfChannels` property of the [AudioBuffer]
/// interface returns an integer representing the number of discrete audio
/// channels
/// described by the PCM data stored in the buffer.
external int get numberOfChannels;
}
extension type AudioBufferOptions._(JSObject _) implements JSObject {
external factory AudioBufferOptions({
int numberOfChannels,
required int length,
required num sampleRate,
});
external int get numberOfChannels;
external set numberOfChannels(int value);
external int get length;
external set length(int value);
external double get sampleRate;
external set sampleRate(num value);
}
/// The **`AudioNode`** interface is a generic interface for representing an
/// audio processing module.
///
/// Examples include:
///
/// - an audio source (e.g. an HTML `audio` or `video` element, an
/// [OscillatorNode], etc.),
/// - the audio destination,
/// - intermediate processing module (e.g. a filter like [BiquadFilterNode] or
/// [ConvolverNode]), or
/// - volume control (like [GainNode])
///
/// > **Note:** An `AudioNode` can be target of events, therefore it implements
/// > the [EventTarget] interface.
///
/// ---
///
/// API documentation sourced from
/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode).
extension type AudioNode._(JSObject _) implements EventTarget, JSObject {
/// The `connect()` method of the [AudioNode] interface lets
/// you connect one of the node's outputs to a target, which may be either
/// another
/// `AudioNode` (thereby directing the sound data to the specified node) or an
/// [AudioParam], so that the node's output data is automatically used to
/// change the value of that parameter over time.
external AudioNode? connect(
JSObject destinationNodeOrDestinationParam, [
int output,
int input,
]);
/// The **`disconnect()`** method of the [AudioNode] interface lets you
/// disconnect one or more nodes from the node on which the method is called.
external void disconnect([
JSAny destinationNodeOrDestinationParamOrOutput,
int output,
int input,
]);
/// The read-only `context` property of the
/// [AudioNode] interface returns the associated
/// [BaseAudioContext], that is the object representing the processing graph
/// the node is participating in.
external BaseAudioContext get context;
/// The `numberOfInputs` property of
/// the [AudioNode] interface returns the number of inputs feeding the
/// node. Source nodes are defined as nodes having a `numberOfInputs`
/// property with a value of 0.
external int get numberOfInputs;
/// The `numberOfOutputs` property of
/// the [AudioNode] interface returns the number of outputs coming out of
/// the node. Destination nodes — like [AudioDestinationNode] — have
/// a value of 0 for this attribute.
external int get numberOfOutputs;
/// The **`channelCount`** property of the [AudioNode] interface represents an
/// integer used to determine how many channels are used when
/// [up-mixing and down-mixing](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Basic_concepts_behind_Web_Audio_API#up-mixing_and_down-mixing)
/// connections to any inputs to the node.
///
/// `channelCount`'s usage and precise definition depend on the value of
/// [AudioNode.channelCountMode]:
///
/// - It is ignored if the `channelCountMode` value is `max`.
/// - It is used as a maximum value if the `channelCountMode` value is
/// `clamped-max`.
/// - It is used as the exact value if the `channelCountMode` value is
/// `explicit`.
external int get channelCount;
external set channelCount(int value);
/// The `channelCountMode` property of the [AudioNode] interface represents an
/// enumerated value describing the way channels must be matched between the
/// node's inputs and outputs.
external ChannelCountMode get channelCountMode;
external set channelCountMode(ChannelCountMode value);
/// The **`channelInterpretation`** property of the [AudioNode] interface
/// represents an enumerated value describing how input channels are mapped to
/// output channels when the number of inputs/outputs is different. For
/// example, this setting defines how a mono input will be up-mixed to a
/// stereo or 5.1 channel output, or how a quad channel input will be
/// down-mixed to a stereo or mono output.
///
/// The property has two options: `speakers` and `discrete`. These are
/// documented in [Basic concepts behind Web Audio API > up-mixing and
/// down-mixing](/en-US/docs/Web/API/Web_Audio_API/Basic_concepts_behind_Web_Audio_API#up-mixing_and_down-mixing).
external ChannelInterpretation get channelInterpretation;
external set channelInterpretation(ChannelInterpretation value);
}
extension type AudioNodeOptions._(JSObject _) implements JSObject {
external factory AudioNodeOptions({
int channelCount,
ChannelCountMode channelCountMode,
ChannelInterpretation channelInterpretation,
});
external int get channelCount;
external set channelCount(int value);
external ChannelCountMode get channelCountMode;
external set channelCountMode(ChannelCountMode value);
external ChannelInterpretation get channelInterpretation;
external set channelInterpretation(ChannelInterpretation value);
}
/// The Web Audio API's `AudioParam` interface represents an audio-related
/// parameter, usually a parameter of an [AudioNode] (such as [GainNode.gain]).
///
/// An `AudioParam` can be set to a specific value or a change in value, and can
/// be scheduled to happen at a specific time and following a specific pattern.
///
/// Each `AudioParam` has a list of events, initially empty, that define when
/// and how values change. When this list is not empty, changes using the
/// `AudioParam.value` attributes are ignored. This list of events allows us to
/// schedule changes that have to happen at very precise times, using arbitrary
/// timeline-based automation curves. The time used is the one defined in
/// [BaseAudioContext.currentTime].
///
/// ---
///
/// API documentation sourced from
/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam).
extension type AudioParam._(JSObject _) implements JSObject {
/// The `setValueAtTime()` method of the
/// [AudioParam] interface schedules an instant change to the
/// `AudioParam` value at a precise time, as measured against
/// [BaseAudioContext.currentTime]. The new value is given in the value
/// parameter.
external AudioParam setValueAtTime(
num value,
num startTime,
);
/// The `linearRampToValueAtTime()` method of the [AudioParam]
/// Interface schedules a gradual linear change in the value of the
/// `AudioParam`. The change starts at the time specified for the
/// _previous_ event, follows a linear ramp to the new value given in the
/// `value` parameter, and reaches the new value at the time given in the
/// `endTime` parameter.
external AudioParam linearRampToValueAtTime(
num value,
num endTime,
);
/// The **`exponentialRampToValueAtTime()`** method of the [AudioParam]
/// Interface schedules a gradual exponential change in the value of the
/// [AudioParam].
/// The change starts at the time specified for the _previous_ event, follows
/// an exponential ramp to the new value given in the `value` parameter, and
/// reaches the new value at the time given in the
/// `endTime` parameter.
///
/// > **Note:** Exponential ramps are considered more useful when changing
/// > frequencies or playback rates than linear ramps because of the way the
/// > human ear
/// > works.
external AudioParam exponentialRampToValueAtTime(
num value,
num endTime,
);
/// The `setTargetAtTime()` method of the
/// [AudioParam] interface schedules the start of a gradual change to the
/// `AudioParam` value. This is useful for decay or release portions of ADSR
/// envelopes.
external AudioParam setTargetAtTime(
num target,
num startTime,
num timeConstant,
);
/// The
/// **`setValueCurveAtTime()`** method of the
/// [AudioParam] interface schedules the parameter's value to change
/// following a curve defined by a list of values.
///
/// The curve is a linear
/// interpolation between the sequence of values defined in an array of
/// floating-point
/// values, which are scaled to fit into the given interval starting at
/// `startTime` and a specific duration.
external AudioParam setValueCurveAtTime(
JSArray<JSNumber> values,
num startTime,
num duration,
);
/// The `cancelScheduledValues()` method of the [AudioParam]
/// Interface cancels all scheduled future changes to the `AudioParam`.
external AudioParam cancelScheduledValues(num cancelTime);
/// The **`cancelAndHoldAtTime()`** method of the
/// [AudioParam] interface cancels all scheduled future changes to the
/// `AudioParam` but holds its value at a given time until further changes are
/// made using other methods.
external AudioParam cancelAndHoldAtTime(num cancelTime);
/// The [Web Audio
/// API's](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API)
/// [AudioParam] interface property **`value`** gets
/// or sets the value of this [AudioParam] at the current time. Initially, the
/// value is set to [AudioParam.defaultValue].
///
/// Setting `value` has the same effect as
/// calling [AudioParam.setValueAtTime] with the time returned by the
/// `AudioContext`'s [BaseAudioContext.currentTime]
/// property.
external double get value;
external set value(num value);
external AutomationRate get automationRate;
external set automationRate(AutomationRate value);
/// The **`defaultValue`**
/// read-only property of the [AudioParam] interface represents the initial
/// value of the attributes as defined by the specific [AudioNode] creating
/// the `AudioParam`.
external double get defaultValue;
/// The **`minValue`**
/// read-only property of the [AudioParam] interface represents the minimum
/// possible value for the parameter's nominal (effective) range.
external double get minValue;
/// The **`maxValue`**
/// read-only property of the [AudioParam] interface represents the maximum
/// possible value for the parameter's nominal (effective) range.
external double get maxValue;
}
/// The `AudioScheduledSourceNode` interface—part of the Web Audio API—is a
/// parent interface for several types of audio source node interfaces which
/// share the ability to be started and stopped, optionally at specified times.
/// Specifically, this interface defines the [AudioScheduledSourceNode.start]
/// and [AudioScheduledSourceNode.stop] methods, as well as the
/// [AudioScheduledSourceNode.ended_event] event.
///
/// > **Note:** You can't create an `AudioScheduledSourceNode` object directly.
/// > Instead, use an interface which extends it, such as
/// > [AudioBufferSourceNode], [OscillatorNode] or [ConstantSourceNode].
///
/// Unless stated otherwise, nodes based upon `AudioScheduledSourceNode` output
/// silence when not playing (that is, before `start()` is called and after
/// `stop()` is called). Silence is represented, as always, by a stream of
/// samples with the value zero (0).
///
/// ---
///
/// API documentation sourced from
/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioScheduledSourceNode).
extension type AudioScheduledSourceNode._(JSObject _)
implements AudioNode, JSObject {
/// The `start()` method on [AudioScheduledSourceNode] schedules a sound to
/// begin playback at the specified time.
/// If no time is specified, then the sound begins playing immediately.
external void start([num when]);
/// The `stop()` method on [AudioScheduledSourceNode] schedules a
/// sound to cease playback at the specified time. If no time is specified,
/// then the sound
/// stops playing immediately.