From 76e31d3a91460490b4a094c41d11b6846422871f Mon Sep 17 00:00:00 2001 From: larpoux Date: Sat, 28 Sep 2024 11:11:49 +0200 Subject: [PATCH] Starting devlopment of tauwars --- example/pubspec.yaml | 2 + lib/src/tauweb_audio.dart | 280 +-- lib/src/tauweb_class.dart | 158 +- webaudio.dart | 3591 +++++++++++++++++++++++++++++++++++++ 4 files changed, 3847 insertions(+), 184 deletions(-) create mode 100644 webaudio.dart diff --git a/example/pubspec.yaml b/example/pubspec.yaml index 21fb8ba..d3cce9d 100644 --- a/example/pubspec.yaml +++ b/example/pubspec.yaml @@ -25,6 +25,8 @@ dependencies: sdk: flutter tauweb: # ^9.15.61 path: ../ # tauweb Dir + tau: # ^9.15.61 + path: ../../tau # tau Dir # The following adds the Cupertino Icons font to your application. # Use with the CupertinoIcons class for iOS style icons. diff --git a/lib/src/tauweb_audio.dart b/lib/src/tauweb_audio.dart index 05bd5dd..cacd37c 100644 --- a/lib/src/tauweb_audio.dart +++ b/lib/src/tauweb_audio.dart @@ -70,11 +70,11 @@ extension type BaseAudioContext._(JSObject _) implements JSObject { ); external AudioBufferSourceNode createBufferSource(); - external ChannelMergerNode createChannelMerger([int? numberOfInputs]); - external ChannelSplitterNode createChannelSplitter([int? numberOfOutputs]); + external ChannelMergerNode createChannelMerger([int numberOfInputs]); + external ChannelSplitterNode createChannelSplitter([int numberOfOutputs]); external ConstantSourceNode createConstantSource(); external ConvolverNode createConvolver(); - external DelayNode createDelay([num? maxDelayTime]); + external DelayNode createDelay([num maxDelayTime]); external DynamicsCompressorNode createDynamicsCompressor(); external GainNode createGain(); external IIRFilterNode createIIRFilter( @@ -86,12 +86,12 @@ extension type BaseAudioContext._(JSObject _) implements JSObject { external PeriodicWave createPeriodicWave( JSArray real, JSArray imag, [ - PeriodicWaveConstraints? constraints, + PeriodicWaveConstraints constraints, ]); external ScriptProcessorNode createScriptProcessor([ - int? bufferSize, - int? numberOfInputChannels, - int? numberOfOutputChannels, + int bufferSize, + int numberOfInputChannels, + int numberOfOutputChannels, ]); external StereoPannerNode createStereoPanner(); external WaveShaperNode createWaveShaper(); @@ -121,7 +121,7 @@ extension type BaseAudioContext._(JSObject _) implements JSObject { extension type AudioContext._(JSObject _) implements BaseAudioContext, JSObject { - external factory AudioContext([AudioContextOptions? contextOptions]); + external factory AudioContext([AudioContextOptions contextOptions]); external AudioTimestamp getOutputTimestamp(); external JSPromise resume(); external JSPromise suspend(); @@ -148,10 +148,10 @@ extension type AudioContext._(JSObject _) extension type AudioContextOptions._(JSObject _) implements JSObject { external factory AudioContextOptions({ - JSAny? latencyHint, - JSNumber? sampleRate, - JSAny? sinkId, - JSAny? renderSizeHint, + JSAny latencyHint, + JSNumber sampleRate, + JSAny sinkId, + JSAny renderSizeHint, }); external JSAny get latencyHint; @@ -191,8 +191,8 @@ extension type AudioSinkOptions._(JSObject _) implements JSObject { extension type AudioTimestamp._(JSObject _) implements JSObject { external factory AudioTimestamp({ - num? contextTime, - DOMHighResTimeStamp? performanceTime, + num? contextTime, // [LARPOUX] + DOMHighResTimeStamp? performanceTime, // [LARPOUX] }); external double get contextTime; @@ -215,8 +215,8 @@ extension type OfflineAudioContext._(JSObject _) implements BaseAudioContext, JSObject { external factory OfflineAudioContext( JSAny contextOptionsOrNumberOfChannels, [ - int? length, - num? sampleRate, + int length, + num sampleRate, ]); external JSPromise startRendering(); external JSPromise resume(); @@ -237,10 +237,10 @@ extension type OfflineAudioContext._(JSObject _) extension type OfflineAudioContextOptions._(JSObject _) implements JSObject { external factory OfflineAudioContextOptions({ - int? numberOfChannels, + int? numberOfChannels, // [LARPOUX] required int length, required num sampleRate, - JSAny? renderSizeHint, + JSAny renderSizeHint, }); external int get numberOfChannels; @@ -283,9 +283,9 @@ extension type OfflineAudioCompletionEvent._(JSObject _) extension type OfflineAudioCompletionEventInit._(JSObject _) implements EventInit, JSObject { external factory OfflineAudioCompletionEventInit({ - bool? bubbles, - bool? cancelable, - bool? composed, + bool? bubbles, // [LARPOUX] + bool? cancelable, // [LARPOUX] + bool? composed, // [LARPOUX] required AudioBuffer renderedBuffer, }); @@ -307,8 +307,8 @@ extension type AudioBuffer._(JSObject _) implements JSObject { external JSFloat32Array getChannelData(int channel); external void copyFromChannel( JSFloat32Array destination, - int? channelNumber, [ - int? bufferOffset, + int channelNumber, [ + int bufferOffset, ]); external void copyToChannel( JSFloat32Array source, @@ -332,7 +332,7 @@ extension type AudioBuffer._(JSObject _) implements JSObject { extension type AudioBufferOptions._(JSObject _) implements JSObject { external factory AudioBufferOptions({ - int? numberOfChannels, + int? numberOfChannels, // [LARPOUX] required int length, required num sampleRate, }); @@ -357,13 +357,13 @@ extension type AudioBufferOptions._(JSObject _) implements JSObject { extension type AudioNode._(JSObject _) implements EventTarget, JSObject { external AudioNode? connect( AudioNode destinationNodeOrDestinationParam, [ - int? output, - int? input, + int output, + int input, ]); external void disconnect([ - AudioNode? destinationNodeOrDestinationParamOrOutput, - int? output, - int? input, + AudioNode destinationNodeOrDestinationParamOrOutput, + int output, + int input, ]); external BaseAudioContext get context; external int get numberOfInputs; @@ -387,9 +387,9 @@ extension type AudioNode._(JSObject _) implements EventTarget, JSObject { extension type AudioNodeOptions._(JSObject _) implements JSObject { external factory AudioNodeOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] }); external int get channelCount; @@ -454,8 +454,8 @@ extension type AudioParam._(JSObject _) implements JSObject { extension type AudioScheduledSourceNode._(JSObject _) implements AudioNode, JSObject { - external void start([num? when]); - external void stop([num? when]); + external void start([num when]); + external void stop([num when]); external EventHandler get onended; external set onended(EventHandler value); } @@ -502,13 +502,13 @@ extension type AnalyserNode._(JSObject _) implements AudioNode, JSObject { extension type AnalyserOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory AnalyserOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - int? fftSize, - num? maxDecibels, - num? minDecibels, - num? smoothingTimeConstant, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + int? fftSize, // [LARPOUX] + num? maxDecibels, // [LARPOUX] + num? minDecibels, // [LARPOUX] + num? smoothingTimeConstant, // [LARPOUX] }); external int get fftSize; @@ -537,9 +537,9 @@ extension type AudioBufferSourceNode._(JSObject _) AudioBufferSourceOptions options, ]); external void start([ - num? when, - num? offset, - num? duration, + num when, + num offset, + num duration, ]); external AudioBuffer? get buffer; external set buffer(AudioBuffer? value); @@ -565,11 +565,11 @@ extension type AudioBufferSourceNode._(JSObject _) extension type AudioBufferSourceOptions._(JSObject _) implements JSObject { external factory AudioBufferSourceOptions({ AudioBuffer? buffer, - num? detune, - bool? loop, - num? loopEnd, - num? loopStart, - num? playbackRate, + num? detune, // [LARPOUX] + bool? loop, // [LARPOUX] + num? loopEnd, // [LARPOUX] + num? loopStart, // [LARPOUX] + num? playbackRate, // [LARPOUX] }); external AudioBuffer? get buffer; @@ -668,9 +668,9 @@ extension type AudioProcessingEvent._(JSObject _) implements Event, JSObject { extension type AudioProcessingEventInit._(JSObject _) implements EventInit, JSObject { external factory AudioProcessingEventInit({ - bool? bubbles, - bool? cancelable, - bool? composed, + bool? bubbles, // [LARPOUX] + bool? cancelable, // [LARPOUX] + bool? composed, // [LARPOUX] required num playbackTime, required AudioBuffer inputBuffer, required AudioBuffer outputBuffer, @@ -723,14 +723,14 @@ extension type BiquadFilterNode._(JSObject _) implements AudioNode, JSObject { extension type BiquadFilterOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory BiquadFilterOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - BiquadFilterType? type, - num? Q, - num? detune, - num? frequency, - num? gain, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + BiquadFilterType? type, // [LARPOUX] + num? Q, // [LARPOUX] + num? detune, // [LARPOUX] + num? frequency, // [LARPOUX] + num? gain, // [LARPOUX] }); external BiquadFilterType get type; @@ -773,10 +773,10 @@ extension type ChannelMergerNode._(JSObject _) implements AudioNode, JSObject { extension type ChannelMergerOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory ChannelMergerOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - int? numberOfInputs, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + int? numberOfInputs, // [LARPOUX] }); external int get numberOfInputs; @@ -813,10 +813,10 @@ extension type ChannelSplitterNode._(JSObject _) extension type ChannelSplitterOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory ChannelSplitterOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - int? numberOfOutputs, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + int? numberOfOutputs, // [LARPOUX] }); external int get numberOfOutputs; @@ -851,7 +851,7 @@ extension type ConstantSourceNode._(JSObject _) extension type ConstantSourceOptions._(JSObject _) implements AudioNodeOptions,JSObject { // LARPOUX - external factory ConstantSourceOptions({num? offset}); + external factory ConstantSourceOptions({num? offset}); // [LARPOUX] external double get offset; external set offset(num value); @@ -890,11 +890,11 @@ extension type ConvolverNode._(JSObject _) implements AudioNode, JSObject { extension type ConvolverOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory ConvolverOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] AudioBuffer? buffer, - bool? disableNormalization, + bool? disableNormalization, // [LARPOUX] }); external AudioBuffer? get buffer; @@ -933,11 +933,11 @@ extension type DelayNode._(JSObject _) implements AudioNode, JSObject { extension type DelayOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory DelayOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - num? maxDelayTime, - num? delayTime, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + num? maxDelayTime, // [LARPOUX] + num? delayTime, // [LARPOUX] }); external double get maxDelayTime; @@ -982,14 +982,14 @@ extension type DynamicsCompressorNode._(JSObject _) extension type DynamicsCompressorOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory DynamicsCompressorOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - num? attack, - num? knee, - num? ratio, - num? release, - num? threshold, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + num? attack, // [LARPOUX] + num? knee, // [LARPOUX] + num? ratio, // [LARPOUX] + num? release, // [LARPOUX] + num? threshold, // [LARPOUX] }); external double get attack; @@ -1032,10 +1032,10 @@ extension type GainNode._(JSObject _) implements AudioNode, JSObject { extension type GainOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory GainOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - num? gain, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + num? gain, // [LARPOUX] }); external double get gain; @@ -1076,9 +1076,9 @@ extension type IIRFilterNode._(JSObject _) implements AudioNode, JSObject { extension type IIRFilterOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory IIRFilterOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] required JSArray feedforward, required JSArray feedback, }); @@ -1249,13 +1249,13 @@ extension type OscillatorNode._(JSObject _) extension type OscillatorOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory OscillatorOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - OscillatorType? type, - num? frequency, - num? detune, - PeriodicWave? periodicWave, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + OscillatorType? type, // [LARPOUX] + num? frequency, // [LARPOUX] + num? detune, // [LARPOUX] + PeriodicWave? periodicWave, // [LARPOUX] }); external OscillatorType get type; @@ -1328,23 +1328,23 @@ extension type PannerNode._(JSObject _) implements AudioNode, JSObject { extension type PannerOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory PannerOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - PanningModelType? panningModel, - DistanceModelType? distanceModel, - num? positionX, - num? positionY, - num? positionZ, - num? orientationX, - num? orientationY, - num? orientationZ, - num? refDistance, - num? maxDistance, - num? rolloffFactor, - num? coneInnerAngle, - num? coneOuterAngle, - num? coneOuterGain, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + PanningModelType? panningModel, // [LARPOUX] + DistanceModelType? distanceModel, // [LARPOUX] + num? positionX, // [LARPOUX] + num? positionY, // [LARPOUX] + num? positionZ, // [LARPOUX] + num? orientationX, // [LARPOUX] + num? orientationY, // [LARPOUX] + num? orientationZ, // [LARPOUX] + num? refDistance, // [LARPOUX] + num? maxDistance, // [LARPOUX] + num? rolloffFactor, // [LARPOUX] + num? coneInnerAngle, // [LARPOUX] + num? coneOuterAngle, // [LARPOUX] + num? coneOuterGain, // [LARPOUX] }); external PanningModelType get panningModel; @@ -1403,7 +1403,7 @@ extension type PeriodicWave._(JSObject _) implements JSObject { extension type PeriodicWaveConstraints._(JSObject _) implements JSObject { - external factory PeriodicWaveConstraints({bool? disableNormalization}); + external factory PeriodicWaveConstraints({bool? disableNormalization}); // [LARPOUX] external bool get disableNormalization; external set disableNormalization(bool value); @@ -1421,9 +1421,9 @@ extension type PeriodicWaveConstraints._(JSObject _) implements JSObject { extension type PeriodicWaveOptions._(JSObject _) implements PeriodicWaveConstraints, JSObject { external factory PeriodicWaveOptions({ - bool? disableNormalization, - JSArray? real, - JSArray? imag, + bool? disableNormalization, // [LARPOUX] + JSArray? real, // [LARPOUX] + JSArray? imag, // [LARPOUX] }); external JSArray get real; @@ -1477,10 +1477,10 @@ extension type StereoPannerNode._(JSObject _) implements AudioNode, JSObject { extension type StereoPannerOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory StereoPannerOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - num? pan, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + num? pan, // [LARPOUX] }); external double get pan; @@ -1501,8 +1501,8 @@ extension type WaveShaperNode._(JSObject _) implements AudioNode, JSObject { BaseAudioContext context, [ WaveShaperOptions options, ]); - external JSFloat32Array? get curve; - external set curve(JSFloat32Array? value); + external JSFloat32Array get curve; + external set curve(JSFloat32Array value); external OverSampleType get oversample; external set oversample(OverSampleType value); } @@ -1519,11 +1519,11 @@ extension type WaveShaperNode._(JSObject _) implements AudioNode, JSObject { extension type WaveShaperOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory WaveShaperOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - JSArray? curve, - OverSampleType? oversample, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + JSArray? curve, // [LARPOUX] + OverSampleType? oversample, // [LARPOUX] }); external JSArray get curve; @@ -1587,14 +1587,14 @@ extension type AudioWorkletNode._(JSObject _) implements AudioNode, JSObject { extension type AudioWorkletNodeOptions._(JSObject _) implements AudioNodeOptions, JSObject { external factory AudioWorkletNodeOptions({ - int? channelCount, - ChannelCountMode? channelCountMode, - ChannelInterpretation? channelInterpretation, - int? numberOfInputs, - int? numberOfOutputs, - JSArray? outputChannelCount, - JSObject? parameterData, - JSObject? processorOptions, + int? channelCount, // [LARPOUX] + ChannelCountMode? channelCountMode, // [LARPOUX] + ChannelInterpretation? channelInterpretation, // [LARPOUX] + int? numberOfInputs, // [LARPOUX] + int? numberOfOutputs, // [LARPOUX] + JSArray? outputChannelCount, // [LARPOUX] + JSObject? parameterData, // [LARPOUX] + JSObject? processorOptions, // [LARPOUX] }); external int get numberOfInputs; diff --git a/lib/src/tauweb_class.dart b/lib/src/tauweb_class.dart index f5b73cb..9c74ed7 100644 --- a/lib/src/tauweb_class.dart +++ b/lib/src/tauweb_class.dart @@ -33,6 +33,7 @@ library; import 'dart:typed_data'; import 'package:tau/tau.dart' as t; import 'tauweb_audio.dart' as j; +//import 'webaudio.dart' as j; import 'tauweb_interop.dart'; import 'dart:js_interop'; import 'dart:html' as h; @@ -58,12 +59,15 @@ abstract class BaseAudioContext implements t.BaseAudioContext { ) => AudioBuffer.fromDelegate(getDelegate().createBuffer(numberOfChannels, length, sampleRate)); AudioBufferSourceNode createBufferSource() => AudioBufferSourceNode.fromDelegate(getDelegate().createBufferSource()); - ChannelMergerNode createChannelMerger([int? numberOfInputs]) => ChannelMergerNode.fromDelegate(getDelegate().createChannelMerger(numberOfInputs)); + ChannelMergerNode createChannelMerger([int? numberOfInputs]) => numberOfInputs == null ? ChannelMergerNode.fromDelegate(getDelegate().createChannelMerger() ) : + ChannelMergerNode.fromDelegate(getDelegate().createChannelMerger(numberOfInputs)); - ChannelSplitterNode createChannelSplitter([int? numberOfOutputs]) => ChannelSplitterNode.fromDelegate(getDelegate().createChannelSplitter(numberOfOutputs)); + ChannelSplitterNode createChannelSplitter([int? numberOfOutputs]) => numberOfOutputs == null ? ChannelSplitterNode.fromDelegate(getDelegate().createChannelSplitter()) : + ChannelSplitterNode.fromDelegate(getDelegate().createChannelSplitter(numberOfOutputs)); ConstantSourceNode createConstantSource() => ConstantSourceNode.fromDelegate(getDelegate().createConstantSource()); ConvolverNode createConvolver() => ConvolverNode.fromDelegate(getDelegate().createConvolver()); - DelayNode createDelay([num? maxDelayTime]) => DelayNode.fromDelegate(getDelegate().createDelay(maxDelayTime)); + DelayNode createDelay([num? maxDelayTime]) => maxDelayTime == null ? DelayNode.fromDelegate(getDelegate().createDelay()) : + DelayNode.fromDelegate(getDelegate().createDelay(maxDelayTime)); DynamicsCompressorNode createDynamicsCompressor() => DynamicsCompressorNode.fromDelegate(getDelegate().createDynamicsCompressor()); GainNode createGain() => GainNode.fromDelegate(getDelegate().createGain()); @@ -79,13 +83,18 @@ abstract class BaseAudioContext implements t.BaseAudioContext { t.TauArray real, t.TauArray imag, [ t.PeriodicWaveConstraints? constraints, - ]) => PeriodicWave.fromDelegate(getDelegate().createPeriodicWave(Interop().jsArrayNumber(real), Interop().jsArrayNumber(imag), (constraints as PeriodicWaveConstraints).getDelegate())); + ]) => + constraints == null ? PeriodicWave.fromDelegate(getDelegate().createPeriodicWave(Interop().jsArrayNumber(real), Interop().jsArrayNumber(imag))) : + PeriodicWave.fromDelegate(getDelegate().createPeriodicWave(Interop().jsArrayNumber(real), Interop().jsArrayNumber(imag), (constraints as PeriodicWaveConstraints).getDelegate())); ScriptProcessorNode createScriptProcessor([ int? bufferSize, int? numberOfInputChannels, int? numberOfOutputChannels, - ]) => ScriptProcessorNode.fromDelegate(getDelegate().createScriptProcessor(bufferSize, numberOfInputChannels, numberOfOutputChannels)); + ]) => bufferSize == null ? ScriptProcessorNode.fromDelegate(getDelegate().createScriptProcessor()) : + numberOfInputChannels == null ? ScriptProcessorNode.fromDelegate(getDelegate().createScriptProcessor(bufferSize)) : + numberOfOutputChannels == null ? ScriptProcessorNode.fromDelegate(getDelegate().createScriptProcessor(bufferSize, numberOfInputChannels)): + ScriptProcessorNode.fromDelegate(getDelegate().createScriptProcessor(bufferSize, numberOfInputChannels, numberOfOutputChannels)); StereoPannerNode createStereoPanner() => StereoPannerNode.fromDelegate(getDelegate().createStereoPanner()); WaveShaperNode createWaveShaper() => WaveShaperNode.fromDelegate(getDelegate().createWaveShaper()); @@ -120,7 +129,8 @@ class AudioContext extends BaseAudioContext implements t.AudioContext { j.AudioContext delegate; j.BaseAudioContext getDelegate() => delegate; /* ctor */ AudioContext.fromDelegate(this.delegate); - /* ctor */ AudioContext([t.AudioContextOptions? contextOptions]) : delegate = j.AudioContext((contextOptions as AudioContextOptions?)?.delegate); + /* ctor */ AudioContext([t.AudioContextOptions? contextOptions]) : delegate = contextOptions == null ? j.AudioContext() : + j.AudioContext((contextOptions as AudioContextOptions).delegate); AudioTimestamp getOutputTimestamp() => AudioTimestamp.fromDelegate(delegate.getOutputTimestamp()); t.TauPromise resume() => delegate.resume().toDart; @@ -159,7 +169,8 @@ class AudioContextOptions implements t.AudioContextOptions { num? sampleRate, dynamic sinkId, // t.TauAny? sinkId, dynamic renderSizeHint, // t.TauAny? renderSizeHint, - }) : delegate = j.AudioContextOptions(latencyHint: latencyHint, sampleRate: sampleRate?.toJS, sinkId: sinkId, renderSizeHint: renderSizeHint); + }) : delegate = sampleRate == null ? j.AudioContextOptions(latencyHint: latencyHint, sinkId: sinkId, renderSizeHint: renderSizeHint) : + j.AudioContextOptions(latencyHint: latencyHint, sampleRate: sampleRate.toJS, sinkId: sinkId, renderSizeHint: renderSizeHint); t.TauAny get latencyHint => delegate.latencyHint; @@ -239,7 +250,9 @@ class OfflineAudioContext extends BaseAudioContext implements t.OfflineAudioCont [ int? length, num? sampleRate, - ]) : delegate = j. OfflineAudioContext(contextOptionsOrNumberOfChannels, length, sampleRate); + ]) : delegate = length == null ? j.OfflineAudioContext(contextOptionsOrNumberOfChannels) : + sampleRate == null ? j.OfflineAudioContext(contextOptionsOrNumberOfChannels, length) : + j.OfflineAudioContext(contextOptionsOrNumberOfChannels, length, sampleRate); t.TauPromise startRendering() => delegate.startRendering().toDart.then( (e){ return AudioBuffer.fromDelegate(e);}); t.TauPromise resume() => delegate.resume().toDart; @@ -369,11 +382,14 @@ class AudioBuffer implements t.AudioBuffer { t.TauFloat32Array destination, int channelNumber, [ int? bufferOffset, - ]) => delegate.copyFromChannel( - destination.toJS, - channelNumber, - bufferOffset, - ); + ]) => bufferOffset == null ? delegate.copyFromChannel( + destination.toJS, + channelNumber,) : + delegate.copyFromChannel( + destination.toJS, + channelNumber, + bufferOffset, + ); void copyToChannel( t.TauFloat32Array source, int channelNumber, [ @@ -431,13 +447,18 @@ abstract class AudioNode implements t.AudioNode { [ int? output, int? input, - ]) { getDelegate().connect((destinationNodeOrDestinationParam as AudioNode).getDelegate(), output, input); return destinationNodeOrDestinationParam;} + ]) { output == null ? getDelegate().connect((destinationNodeOrDestinationParam as AudioNode).getDelegate()) : + input == null ? getDelegate().connect((destinationNodeOrDestinationParam as AudioNode).getDelegate(), output,) : + getDelegate().connect((destinationNodeOrDestinationParam as AudioNode).getDelegate(), output, input); + return destinationNodeOrDestinationParam;} void disconnect([ // !!!! The DestinationParamOrOutput case is not handled t.AudioNode? destinationNodeOrDestinationParamOrOutput, // t.TauAny? destinationNodeOrDestinationParamOrOutput, int? output, int? input, - ]) => getDelegate().disconnect((destinationNodeOrDestinationParamOrOutput as AudioNode?)?.getDelegate(), output, input); + ]) => output == null ? getDelegate().disconnect((destinationNodeOrDestinationParamOrOutput as AudioNode).getDelegate()) : + input == null ? getDelegate().disconnect((destinationNodeOrDestinationParamOrOutput as AudioNode).getDelegate(), output,) : + getDelegate().disconnect((destinationNodeOrDestinationParamOrOutput as AudioNode).getDelegate(), output, input); BaseAudioContext get context => AudioContext.fromDelegate(getDelegate().context as j.AudioContext); int get numberOfInputs => getDelegate().numberOfInputs; int get numberOfOutputs => getDelegate().numberOfOutputs; @@ -562,9 +583,9 @@ abstract class AudioScheduledSourceNode extends AudioNode implements t.AudioSche t.EventHandler _onEnded = (){}; j.AudioScheduledSourceNode getDelegate(); - void start([num? when]) => getDelegate().start(when); + void start([num? when]) => when == null ? getDelegate().start() : getDelegate().start(when); - void stop([num? when]) => getDelegate().stop(when); + void stop([num? when]) => when == null ? getDelegate().stop() : getDelegate().stop(when); t.EventHandler get onended => _onEnded; @@ -592,7 +613,7 @@ class AnalyserNode extends AudioNode implements t.AnalyserNode { /* ctor */ AnalyserNode( t.BaseAudioContext context, [ t.AnalyserOptions? options, - ]) : delegate = j.AnalyserNode((context as AudioContext).delegate, (options as AnalyserOptions).delegate); + ]) : delegate = options == null ? j.AnalyserNode((context as AudioContext).delegate) : j.AnalyserNode((context as AudioContext).delegate, (options! as AnalyserOptions).delegate); void getFloatFrequencyData(t.TauFloat32Array array) => delegate.getFloatFrequencyData(array.toJS); void getByteFrequencyData(t.TauUint8Array array) => delegate.getByteFrequencyData(array.toJS); @@ -669,16 +690,20 @@ class AudioBufferSourceNode extends AudioScheduledSourceNode implements t.AudioB /* ctor */ AudioBufferSourceNode( t.BaseAudioContext context, [ t.AudioBufferSourceOptions? options, - ]) : delegate = j.AudioBufferSourceNode((context as AudioContext).delegate, (options as AudioBufferSourceOptions).delegate); + ]) : delegate = options == null ? j.AudioBufferSourceNode((context as AudioContext).delegate) : + j.AudioBufferSourceNode((context as AudioContext).delegate, (options as AudioBufferSourceOptions).delegate); void start([ num? when, num? offset, num? duration, - ]) => delegate.start(when, offset, null /* !!! LARPOUX, duration */); + ]) => when == null ? delegate.start() : + offset == null ? delegate.start(when,): + duration == null ? delegate.start(when, offset,) : + delegate.start(when, offset, duration); AudioBuffer? get buffer => delegate.buffer == null ? null : AudioBuffer.fromDelegate(delegate.buffer!); - set buffer(t.AudioBuffer? value) => delegate.buffer = (value as AudioBuffer).delegate; + set buffer(t.AudioBuffer? value) => delegate.buffer = (value as AudioBuffer?)?.delegate; AudioParam get playbackRate => AudioParam.fromDelegate(delegate.playbackRate); AudioParam get detune => AudioParam.fromDelegate(delegate.detune); bool get loop => delegate.loop; @@ -712,7 +737,7 @@ class AudioBufferSourceOptions implements t.AudioBufferSourceOptions { num? loopStart, num? playbackRate, }) : delegate = j.AudioBufferSourceOptions( - buffer: (buffer as AudioBuffer)?.delegate, + buffer: (buffer as AudioBuffer?)?.delegate, detune: detune, loop: loop, loopEnd: loopEnd, @@ -721,7 +746,7 @@ class AudioBufferSourceOptions implements t.AudioBufferSourceOptions { ); AudioBuffer? get buffer => delegate.buffer == null ? null : AudioBuffer.fromDelegate(delegate.buffer!); - set buffer(t.AudioBuffer? value) => delegate.buffer = (value as AudioBuffer).delegate; + set buffer(t.AudioBuffer? value) => delegate.buffer = (value as AudioBuffer?)?.delegate; double get detune => delegate.detune; set detune(num value) => delegate.detune = value; bool get loop => delegate.loop; @@ -880,7 +905,8 @@ class BiquadFilterNode extends AudioNode implements t.BiquadFilterNode { /* ctor */ BiquadFilterNode( t.BaseAudioContext context, [ t.BiquadFilterOptions? options, - ]) : delegate = j.BiquadFilterNode((context as BaseAudioContext).getDelegate(), (options as BiquadFilterOptions).delegate); + ]) : delegate = options == null ? j.BiquadFilterNode((context as BaseAudioContext).getDelegate()) : + j.BiquadFilterNode((context as BaseAudioContext).getDelegate(), (options as BiquadFilterOptions).delegate); void getFrequencyResponse( t.TauFloat32Array frequencyHz, @@ -960,7 +986,8 @@ class ChannelMergerNode extends AudioNode implements t.ChannelMergerNode { /* ctor */ ChannelMergerNode( t.BaseAudioContext context, [ t.ChannelMergerOptions? options, - ]) : delegate = j.ChannelMergerNode((context as BaseAudioContext).getDelegate(), (options as ChannelMergerOptions).delegate); + ]) : delegate = options == null ? j.ChannelMergerNode((context as BaseAudioContext).getDelegate()) : + j.ChannelMergerNode((context as BaseAudioContext).getDelegate(), (options as ChannelMergerOptions).delegate); } @@ -1013,7 +1040,8 @@ class ChannelSplitterNode extends AudioNode implements t.ChannelSplitterNode { /* ctor */ ChannelSplitterNode( t.BaseAudioContext context, [ t.ChannelSplitterOptions? options, - ]) : delegate = j.ChannelSplitterNode((context as BaseAudioContext).getDelegate(), (options as ChannelSplitterOptions).delegate); + ]) : delegate = options == null ? j.ChannelSplitterNode((context as BaseAudioContext).getDelegate()) : + j.ChannelSplitterNode((context as BaseAudioContext).getDelegate(), (options as ChannelSplitterOptions).delegate); } @@ -1069,7 +1097,8 @@ class ConstantSourceNode extends AudioScheduledSourceNode implements t.ConstantS /* ctor */ ConstantSourceNode( t.BaseAudioContext context, [ t.ConstantSourceOptions? options, - ]) : delegate = j.ConstantSourceNode((context as BaseAudioContext).getDelegate(), (options as ConstantSourceOptions).delegate); + ]) : delegate = options == null ? j.ConstantSourceNode((context as BaseAudioContext).getDelegate()) : + j.ConstantSourceNode((context as BaseAudioContext).getDelegate(), (options as ConstantSourceOptions).delegate); AudioParam get offset => AudioParam.fromDelegate(delegate.offset); } @@ -1115,10 +1144,11 @@ class ConvolverNode extends AudioNode implements t.ConvolverNode { /* ctor */ ConvolverNode( t.BaseAudioContext context, [ t.ConvolverOptions? options, - ]) : delegate = j.ConvolverNode((context as BaseAudioContext).getDelegate(), (options as ConvolverOptions).delegate); + ]) : delegate = options == null ? j.ConvolverNode((context as BaseAudioContext).getDelegate()) : + j.ConvolverNode((context as BaseAudioContext).getDelegate(), (options as ConvolverOptions).delegate); AudioBuffer? get buffer => delegate.buffer == null ? null : AudioBuffer.fromDelegate(delegate.buffer!); - set buffer(t.AudioBuffer? value) => delegate.buffer = (value as AudioBuffer).delegate; + set buffer(t.AudioBuffer? value) => delegate.buffer = (value as AudioBuffer?)?.delegate; bool get normalize => delegate.normalize; set normalize(bool value) => delegate.normalize = value; } @@ -1153,7 +1183,7 @@ class ConvolverOptions extends AudioNodeOptions implements t.ConvolverOptions { ); AudioBuffer? get buffer => delegate.buffer == null ? null : AudioBuffer.fromDelegate(delegate.buffer!); - set buffer(t.AudioBuffer? value) => delegate.buffer = (value as AudioBuffer).delegate; + set buffer(t.AudioBuffer? value) => delegate.buffer = (value as AudioBuffer?)?.delegate; bool get disableNormalization => delegate.disableNormalization; set disableNormalization(bool value) => delegate.disableNormalization = value; } @@ -1177,7 +1207,8 @@ class DelayNode extends AudioNode implements t.DelayNode { /* ctor */ DelayNode( t.BaseAudioContext context, [ t.DelayOptions? options, - ]) : delegate = j.DelayNode((context as BaseAudioContext).getDelegate(), (options as DelayOptions).delegate); + ]) : delegate = options == null ? j.DelayNode((context as BaseAudioContext).getDelegate()) : + j.DelayNode((context as BaseAudioContext).getDelegate(), (options as DelayOptions).delegate); AudioParam get delayTime => AudioParam.fromDelegate(delegate.delayTime); } @@ -1235,7 +1266,8 @@ class DynamicsCompressorNode extends AudioNode implements t.DynamicsCompressorNo /* ctor */ DynamicsCompressorNode( t.BaseAudioContext context, [ t.DynamicsCompressorOptions? options, - ]) : delegate = j.DynamicsCompressorNode((context as BaseAudioContext).getDelegate(), (options as DynamicsCompressorOptions).delegate); + ]) : delegate = options == null ? j.DynamicsCompressorNode((context as BaseAudioContext).getDelegate()) : + j.DynamicsCompressorNode((context as BaseAudioContext).getDelegate(), (options as DynamicsCompressorOptions).delegate); AudioParam get threshold => AudioParam.fromDelegate(delegate.threshold); AudioParam get knee => AudioParam.fromDelegate(delegate.knee); @@ -1312,7 +1344,8 @@ class GainNode extends AudioNode implements t.GainNode{ /* ctor */ GainNode( t.BaseAudioContext context, [ t.GainOptions? options, - ]) : delegate = j.GainNode((context as BaseAudioContext).getDelegate(), (options as GainOptions).delegate); + ]) : delegate = options == null ? j.GainNode((context as BaseAudioContext).getDelegate()) : + j.GainNode((context as BaseAudioContext).getDelegate(), (options as GainOptions).delegate); AudioParam get gain => AudioParam.fromDelegate(delegate.gain); } @@ -1487,7 +1520,8 @@ class MediaStreamAudioDestinationNode extends AudioNode implements t.MediaStream /* ctor */ MediaStreamAudioDestinationNode( t.AudioContext context, [ t.AudioNodeOptions? options, - ]) : delegate = j.MediaStreamAudioDestinationNode((context as AudioContext).delegate, (options as AudioNodeOptions).getDelegate()); + ]) : delegate = options == null ? j.MediaStreamAudioDestinationNode((context as AudioContext).delegate) : + j.MediaStreamAudioDestinationNode((context as AudioContext).delegate, (options as AudioNodeOptions).getDelegate()); t.MediaStream get stream => MediaStream.fromDelegate(delegate.stream); } @@ -1608,7 +1642,8 @@ class OscillatorNode extends AudioScheduledSourceNode implements t.OscillatorNod /* ctor */ OscillatorNode( t.BaseAudioContext context, [ t.OscillatorOptions? options, - ]) : delegate = j.OscillatorNode((context as AudioContext).delegate, (options as OscillatorOptions).delegate); + ]) : delegate = options == null ? j.OscillatorNode((context as AudioContext).delegate) : + j.OscillatorNode((context as AudioContext).delegate, (options as OscillatorOptions).delegate); void setPeriodicWave(t.PeriodicWave periodicWave) => delegate.setPeriodicWave((periodicWave as PeriodicWave).delegate); t.OscillatorType get type => delegate.type; @@ -1651,7 +1686,7 @@ class OscillatorOptions extends AudioNodeOptions implements t.OscillatorOptions type: type, frequency: frequency, detune: detune, - periodicWave: (periodicWave as PeriodicWave).delegate, + periodicWave: (periodicWave as PeriodicWave?)?.delegate, ); t.OscillatorType get type => delegate.type; @@ -1684,7 +1719,8 @@ class PannerNode extends AudioNode implements t.PannerNode { /* ctor */ PannerNode( t.BaseAudioContext context, [ t.PannerOptions? options, - ]) : delegate = j.PannerNode((context as AudioContext).delegate, (options as PannerOptions).delegate); + ]) : delegate = options == null ? j.PannerNode((context as AudioContext).delegate) : + j.PannerNode((context as AudioContext).delegate, (options as PannerOptions).delegate); void setPosition( num x, @@ -1829,7 +1865,8 @@ class PeriodicWave implements t.PeriodicWave { /* ctor */ PeriodicWave( t.BaseAudioContext context, [ t.PeriodicWaveOptions? options, - ]) : delegate = j.PeriodicWave((context as AudioContext).delegate, (options as PeriodicWaveOptions).delegate); + ]) : delegate = options == null ? j.PeriodicWave((context as AudioContext).delegate) : + j.PeriodicWave((context as AudioContext).delegate, (options as PeriodicWaveOptions).delegate); } @@ -1942,7 +1979,8 @@ class StereoPannerNode extends AudioNode implements t.StereoPannerNode { /* ctor */ StereoPannerNode( t.BaseAudioContext context, [ t.StereoPannerOptions? options, - ]) : delegate = j.StereoPannerNode((context as AudioContext).delegate, (options as StereoPannerOptions).delegate); + ]) : delegate = options == null ? j.StereoPannerNode((context as AudioContext).delegate) : + j.StereoPannerNode((context as AudioContext).delegate, (options as StereoPannerOptions).delegate); AudioParam get pan => AudioParam.fromDelegate(delegate.pan); } @@ -1999,10 +2037,11 @@ class WaveShaperNode extends AudioNode implements t.WaveShaperNode { /* ctor */ WaveShaperNode( t.BaseAudioContext context, [ t.WaveShaperOptions? options, - ]) :delegate = j.WaveShaperNode((context as AudioContext).delegate, (options as WaveShaperOptions).delegate); + ]) : delegate = options == null ? j.WaveShaperNode((context as AudioContext).delegate) : + j.WaveShaperNode((context as AudioContext).delegate, (options as WaveShaperOptions).delegate); - t.TauFloat32Array? get curve => delegate.curve?.toDart; - set curve(t.TauFloat32Array? value) => delegate.curve = value?.toJS; + t.TauFloat32Array get curve => delegate.curve.toDart; + set curve(t.TauFloat32Array value) => delegate.curve = value.toJS; t.OverSampleType get oversample => delegate.oversample; set oversample(t.OverSampleType value) => delegate.oversample = value; } @@ -2132,7 +2171,8 @@ class AudioWorkletNode extends AudioNode implements t.AudioWorkletNode { t.BaseAudioContext context, String name, [ t.AudioWorkletNodeOptions? options, - ]) : delegate = j.AudioWorkletNode((context as AudioContext).delegate, name, (options as AudioWorkletNodeOptions).delegate); + ]) : delegate = options == null ? j.AudioWorkletNode((context as AudioContext).delegate, name) : + j.AudioWorkletNode((context as AudioContext).delegate, name, (options as AudioWorkletNodeOptions).delegate); t.AudioParamMap get parameters => AudioParamMap.fromDelegate(delegate.parameters); @@ -2152,7 +2192,7 @@ class AudioWorkletNode extends AudioNode implements t.AudioWorkletNode { // ================================================================================================= -// Added because of Tau_waweb +// Added because of Tau_web // ================================================================================================= @@ -2167,6 +2207,11 @@ class MediaStream implements t.MediaStream } + +// ------------------------------------------------------------------------------------------------------------------ + + + class MediaStreamTrack implements t.MediaStreamTrack { w.MediaStreamTrack delegate; @@ -2179,6 +2224,11 @@ class MediaStreamTrack implements t.MediaStreamTrack } + +// ------------------------------------------------------------------------------------------------------------------ + + + /* class Worklet implements t.Worklet { @@ -2191,6 +2241,11 @@ class Worklet implements t.Worklet } */ + +// ------------------------------------------------------------------------------------------------------------------ + + + /* class WorkletGlobalScope implements t.WorkletGlobalScope { @@ -2203,6 +2258,11 @@ class WorkletGlobalScope implements t.WorkletGlobalScope } */ + +// ------------------------------------------------------------------------------------------------------------------ + + + class MessagePort implements t.MessagePort { w.MessagePort delegate; @@ -2226,6 +2286,11 @@ class ProcessorOptions implements t.ProcessorOptions } +// ------------------------------------------------------------------------------------------------------------------ + + + + class ParameterData implements t.ParameterData { @@ -2237,6 +2302,11 @@ class ParameterData implements t.ParameterData +// ------------------------------------------------------------------------------------------------------------------ + + + + class AudioWorkletNodeOptions extends AudioNodeOptions implements t.AudioWorkletNodeOptions { diff --git a/webaudio.dart b/webaudio.dart new file mode 100644 index 0000000..d097ebc --- /dev/null +++ b/webaudio.dart @@ -0,0 +1,3591 @@ +// Copyright (c) 2024, the Dart project authors. Please see the AUTHORS file +// for details. All rights reserved. Use of this source code is governed by a +// BSD-style license that can be found in the LICENSE file. +// +// API docs from [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web). +// Attributions and copyright licensing by Mozilla Contributors is licensed +// under [CC-BY-SA 2.5](https://creativecommons.org/licenses/by-sa/2.5/. + +// Generated from Web IDL definitions. + +// ignore_for_file: unintended_html_in_doc_comment + +@JS() +library; + +import 'dart:js_interop'; + +import 'dom.dart'; +import 'hr_time.dart'; +import 'html.dart'; +import 'mediacapture_streams.dart'; + +typedef DecodeErrorCallback = JSFunction; +typedef DecodeSuccessCallback = JSFunction; +typedef AudioWorkletProcessorConstructor = JSFunction; +typedef AudioContextState = String; +typedef AudioContextRenderSizeCategory = String; +typedef AudioContextLatencyCategory = String; +typedef AudioSinkType = String; +typedef ChannelCountMode = String; +typedef ChannelInterpretation = String; +typedef AutomationRate = String; +typedef BiquadFilterType = String; +typedef OscillatorType = String; +typedef PanningModelType = String; +typedef DistanceModelType = String; +typedef OverSampleType = String; + +/// The `BaseAudioContext` interface of the +/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) +/// acts as a base definition for online and offline audio-processing graphs, as +/// represented by [AudioContext] and [OfflineAudioContext] respectively. You +/// wouldn't use `BaseAudioContext` directly — you'd use its features via one of +/// these two inheriting interfaces. +/// +/// A `BaseAudioContext` can be a target of events, therefore it implements the +/// [EventTarget] interface. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext). +extension type BaseAudioContext._(JSObject _) implements EventTarget, JSObject { + /// The `createAnalyser()` method of the + /// [BaseAudioContext] interface creates an [AnalyserNode], which + /// can be used to expose audio time and frequency data and create data + /// visualizations. + /// + /// > **Note:** The [AnalyserNode.AnalyserNode] constructor is the + /// > recommended way to create an [AnalyserNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + /// + /// > **Note:** For more on using this node, see the + /// > [AnalyserNode] page. + external AnalyserNode createAnalyser(); + + /// The `createBiquadFilter()` method of the [BaseAudioContext] + /// interface creates a [BiquadFilterNode], which represents a second order + /// filter configurable as several different common filter types. + /// + /// > **Note:** The [BiquadFilterNode.BiquadFilterNode] constructor is the + /// > recommended way to create a [BiquadFilterNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external BiquadFilterNode createBiquadFilter(); + + /// The `createBuffer()` method of the [BaseAudioContext] + /// Interface is used to create a new, empty [AudioBuffer] object, which + /// can then be populated by data, and played via an [AudioBufferSourceNode]. + /// + /// For more details about audio buffers, check out the [AudioBuffer] + /// reference page. + /// + /// > **Note:** `createBuffer()` used to be able to take compressed + /// > data and give back decoded samples, but this ability was removed from + /// > the specification, + /// > because all the decoding was done on the main thread, so + /// > `createBuffer()` was blocking other code execution. The asynchronous + /// > method + /// > `decodeAudioData()` does the same thing — takes compressed audio, such + /// > as an + /// > MP3 file, and directly gives you back an [AudioBuffer] that you can + /// > then play via an [AudioBufferSourceNode]. For simple use cases + /// > like playing an MP3, `decodeAudioData()` is what you should be using. + external AudioBuffer createBuffer( + int numberOfChannels, + int length, + num sampleRate, + ); + + /// The `createBufferSource()` method of the [BaseAudioContext] + /// Interface is used to create a new [AudioBufferSourceNode], which can be + /// used to play audio data contained within an [AudioBuffer] object. + /// [AudioBuffer]s are created using [BaseAudioContext.createBuffer] or + /// returned by [BaseAudioContext.decodeAudioData] when it successfully + /// decodes an audio track. + /// + /// > **Note:** The [AudioBufferSourceNode.AudioBufferSourceNode] + /// > constructor is the recommended way to create a [AudioBufferSourceNode]; + /// > see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external AudioBufferSourceNode createBufferSource(); + + /// The `createChannelMerger()` method of the [BaseAudioContext] interface + /// creates a [ChannelMergerNode], + /// which combines channels from multiple audio streams into a single audio + /// stream. + /// + /// > **Note:** The [ChannelMergerNode.ChannelMergerNode] constructor is the + /// > recommended way to create a [ChannelMergerNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external ChannelMergerNode createChannelMerger([int numberOfInputs]); + + /// The `createChannelSplitter()` method of the [BaseAudioContext] Interface + /// is used to create a [ChannelSplitterNode], + /// which is used to access the individual channels of an audio stream and + /// process them separately. + /// + /// > **Note:** The [ChannelSplitterNode.ChannelSplitterNode] + /// > constructor is the recommended way to create a [ChannelSplitterNode]; + /// > see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external ChannelSplitterNode createChannelSplitter([int numberOfOutputs]); + + /// The **`createConstantSource()`** + /// property of the [BaseAudioContext] interface creates a + /// [ConstantSourceNode] object, which is an audio source that continuously + /// outputs a monaural (one-channel) sound signal whose samples all have the + /// same + /// value. + /// + /// > **Note:** The [ConstantSourceNode.ConstantSourceNode] + /// > constructor is the recommended way to create a [ConstantSourceNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external ConstantSourceNode createConstantSource(); + + /// The `createConvolver()` method of the [BaseAudioContext] + /// interface creates a [ConvolverNode], which is commonly used to apply + /// reverb effects to your audio. See the + /// [spec definition of Convolution](https://webaudio.github.io/web-audio-api/#background-3) + /// for more information. + /// + /// > **Note:** The [ConvolverNode.ConvolverNode] + /// > constructor is the recommended way to create a [ConvolverNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external ConvolverNode createConvolver(); + + /// The `createDelay()` method of the + /// [BaseAudioContext] Interface is used to create a [DelayNode], + /// which is used to delay the incoming audio signal by a certain amount of + /// time. + /// + /// > **Note:** The [DelayNode.DelayNode] + /// > constructor is the recommended way to create a [DelayNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external DelayNode createDelay([num maxDelayTime]); + + /// The `createDynamicsCompressor()` method of the [BaseAudioContext] + /// Interface is used to create a [DynamicsCompressorNode], which can be used + /// to apply compression to an audio signal. + /// + /// Compression lowers the volume of the loudest parts of the signal and + /// raises the volume + /// of the softest parts. Overall, a louder, richer, and fuller sound can be + /// achieved. It is + /// especially important in games and musical applications where large numbers + /// of individual + /// sounds are played simultaneously, where you want to control the overall + /// signal level and + /// help avoid clipping (distorting) of the audio output. + /// + /// > **Note:** The [DynamicsCompressorNode.DynamicsCompressorNode] + /// > constructor is the recommended way to create a [DynamicsCompressorNode]; + /// > see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external DynamicsCompressorNode createDynamicsCompressor(); + + /// The `createGain()` method of the [BaseAudioContext] + /// interface creates a [GainNode], which can be used to control the + /// overall gain (or volume) of the audio graph. + /// + /// > **Note:** The [GainNode.GainNode] + /// > constructor is the recommended way to create a [GainNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external GainNode createGain(); + + /// The **`createIIRFilter()`** method of the [BaseAudioContext] interface + /// creates an [IIRFilterNode], which represents a general + /// **[infinite impulse response](https://en.wikipedia.org/wiki/Infinite_impulse_response)** + /// (IIR) filter which can be configured to serve as various types of filter. + /// + /// > **Note:** The [IIRFilterNode.IIRFilterNode] + /// > constructor is the recommended way to create a [IIRFilterNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external IIRFilterNode createIIRFilter( + JSArray feedforward, + JSArray feedback, + ); + + /// The `createOscillator()` method of the [BaseAudioContext] + /// interface creates an [OscillatorNode], a source representing a periodic + /// waveform. It basically generates a constant tone. + /// + /// > **Note:** The [OscillatorNode.OscillatorNode] + /// > constructor is the recommended way to create a [OscillatorNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external OscillatorNode createOscillator(); + + /// The `createPanner()` method of the [BaseAudioContext] + /// Interface is used to create a new [PannerNode], which is used to + /// spatialize an incoming audio stream in 3D space. + /// + /// The panner node is spatialized in relation to the AudioContext's + /// [AudioListener] (defined by the [BaseAudioContext.listener] + /// attribute), which represents the position and orientation of the person + /// listening to the + /// audio. + /// + /// > **Note:** The [PannerNode.PannerNode] + /// > constructor is the recommended way to create a [PannerNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external PannerNode createPanner(); + + /// The `createPeriodicWave()` method of the [BaseAudioContext] Interface + /// is used to create a [PeriodicWave], which is used to define a periodic + /// waveform + /// that can be used to shape the output of an [OscillatorNode]. + external PeriodicWave createPeriodicWave( + JSArray real, + JSArray imag, [ + PeriodicWaveConstraints constraints, + ]); + + /// The `createScriptProcessor()` method of the [BaseAudioContext] interface + /// creates a [ScriptProcessorNode] used for direct audio processing. + /// + /// > **Note:** This feature was replaced by + /// > [AudioWorklets](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet) + /// > and the [AudioWorkletNode] interface. + external ScriptProcessorNode createScriptProcessor([ + int bufferSize, + int numberOfInputChannels, + int numberOfOutputChannels, + ]); + + /// The `createStereoPanner()` method of the [BaseAudioContext] interface + /// creates a [StereoPannerNode], which can be used to apply + /// stereo panning to an audio source. + /// It positions an incoming audio stream in a stereo image using a + /// [low-cost panning algorithm](https://webaudio.github.io/web-audio-api/#stereopanner-algorithm). + /// + /// > **Note:** The [StereoPannerNode.StereoPannerNode] + /// > constructor is the recommended way to create a [StereoPannerNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external StereoPannerNode createStereoPanner(); + + /// The `createWaveShaper()` method of the [BaseAudioContext] + /// interface creates a [WaveShaperNode], which represents a non-linear + /// distortion. It is used to apply distortion effects to your audio. + /// + /// > **Note:** The [WaveShaperNode.WaveShaperNode] + /// > constructor is the recommended way to create a [WaveShaperNode]; see + /// > [Creating an AudioNode](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode#creating_an_audionode). + external WaveShaperNode createWaveShaper(); + + /// The `decodeAudioData()` method of the [BaseAudioContext] + /// Interface is used to asynchronously decode audio file data contained in an + /// `ArrayBuffer` that is loaded from [fetch], + /// [XMLHttpRequest], or [FileReader]. The decoded + /// [AudioBuffer] is resampled to the [AudioContext]'s sampling + /// rate, then passed to a callback or promise. + /// + /// This is the preferred method of creating an audio source for Web Audio API + /// from an + /// audio track. This method only works on complete file data, not fragments + /// of audio file + /// data. + /// + /// This function implements two alternative ways to asynchronously return the + /// audio data or error messages: it returns a `Promise` that fulfills with + /// the audio data, and also accepts callback arguments to handle success or + /// failure. The primary method of interfacing with this function is via its + /// Promise return value, and the callback parameters are provided for legacy + /// reasons. + external JSPromise decodeAudioData( + JSArrayBuffer audioData, [ + DecodeSuccessCallback? successCallback, + DecodeErrorCallback? errorCallback, + ]); + + /// The `destination` property of the [BaseAudioContext] + /// interface returns an [AudioDestinationNode] representing the final + /// destination of all audio in the context. It often represents an actual + /// audio-rendering + /// device such as your device's speakers. + external AudioDestinationNode get destination; + + /// The `sampleRate` property of the [BaseAudioContext] interface returns a + /// floating point number representing the sample rate, in samples per second, + /// used by all nodes in this audio context. + /// This limitation means that sample-rate converters are not supported. + external double get sampleRate; + + /// The `currentTime` read-only property of the [BaseAudioContext] + /// interface returns a double representing an ever-increasing hardware + /// timestamp in seconds that + /// can be used for scheduling audio playback, visualizing timelines, etc. It + /// starts at 0. + external double get currentTime; + + /// The `listener` property of the [BaseAudioContext] interface + /// returns an [AudioListener] object that can then be used for + /// implementing 3D audio spatialization. + external AudioListener get listener; + + /// The `state` read-only property of the [BaseAudioContext] + /// interface returns the current state of the `AudioContext`. + external AudioContextState get state; + + /// The `audioWorklet` read-only property of the + /// [BaseAudioContext] interface returns an instance of + /// [AudioWorklet] that can be used for adding + /// [AudioWorkletProcessor]-derived classes which implement custom audio + /// processing. + external AudioWorklet get audioWorklet; + external EventHandler get onstatechange; + external set onstatechange(EventHandler value); +} + +/// The `AudioContext` interface represents an audio-processing graph built from +/// audio modules linked together, each represented by an [AudioNode]. +/// +/// An audio context controls both the creation of the nodes it contains and the +/// execution of the audio processing, or decoding. You need to create an +/// `AudioContext` before you do anything else, as everything happens inside a +/// context. It's recommended to create one AudioContext and reuse it instead of +/// initializing a new one each time, and it's OK to use a single `AudioContext` +/// for several different audio sources and pipeline concurrently. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioContext). +extension type AudioContext._(JSObject _) + implements BaseAudioContext, JSObject { + external factory AudioContext([AudioContextOptions contextOptions]); + + /// The + /// **`getOutputTimestamp()`** method of the + /// [AudioContext] interface returns a new `AudioTimestamp` object + /// containing two audio timestamp values relating to the current audio + /// context. + /// + /// The two values are as follows: + /// + /// - `AudioTimestamp.contextTime`: The time of the sample frame currently + /// being rendered by the audio output device (i.e., output audio stream + /// position), in the + /// same units and origin as the context's [BaseAudioContext.currentTime]. + /// Basically, this is the time after the audio context was first created. + /// - `AudioTimestamp.performanceTime`: An estimation of the moment when the + /// sample frame corresponding to the stored `contextTime` value was rendered + /// by the audio output device, in the same units and origin as + /// [performance.now]. This is the time after the document containing the + /// audio context was first rendered. + external AudioTimestamp getOutputTimestamp(); + + /// The **`resume()`** method of the [AudioContext] + /// interface resumes the progression of time in an audio context that has + /// previously been + /// suspended. + /// + /// This method will cause an `INVALID_STATE_ERR` exception to be thrown if + /// called on an [OfflineAudioContext]. + external JSPromise resume(); + + /// The `suspend()` method of the [AudioContext] Interface suspends the + /// progression of time in the audio context, temporarily halting audio + /// hardware access and reducing CPU/battery usage in the process — this is + /// useful if you want an application to power down the audio hardware when it + /// will not be using an audio context for a while. + /// + /// This method will cause an `INVALID_STATE_ERR` exception to be thrown if + /// called on an [OfflineAudioContext]. + external JSPromise suspend(); + + /// The `close()` method of the [AudioContext] Interface closes the audio + /// context, releasing any system audio resources that it uses. + /// + /// This function does not automatically release all `AudioContext`-created + /// objects, unless other references have been released as well; however, it + /// will forcibly release any system audio resources that might prevent + /// additional `AudioContexts` from being created and used, suspend the + /// progression of audio time in the audio context, and stop processing audio + /// data. The returned `Promise` resolves when all + /// `AudioContext`-creation-blocking resources have been released. This method + /// throws an `INVALID_STATE_ERR` exception if called on an + /// [OfflineAudioContext]. + external JSPromise close(); + + /// The `createMediaElementSource()` method of the [AudioContext] Interface is + /// used to create a new [MediaElementAudioSourceNode] object, given an + /// existing HTML `audio` or `video` element, the audio from which can then be + /// played and manipulated. + /// + /// For more details about media element audio source nodes, check out the + /// [MediaElementAudioSourceNode] reference page. + external MediaElementAudioSourceNode createMediaElementSource( + HTMLMediaElement mediaElement); + + /// The `createMediaStreamSource()` method of the [AudioContext] + /// Interface is used to create a new [MediaStreamAudioSourceNode] + /// object, given a media stream (say, from a [MediaDevices.getUserMedia] + /// instance), the audio from which can then be played and manipulated. + /// + /// For more details about media stream audio source nodes, check out the + /// [MediaStreamAudioSourceNode] reference page. + external MediaStreamAudioSourceNode createMediaStreamSource( + MediaStream mediaStream); + + /// The **`createMediaStreamTrackSource()`** method of the [AudioContext] + /// interface creates and returns a [MediaStreamTrackAudioSourceNode] which + /// represents an audio source whose data comes from the specified + /// [MediaStreamTrack]. + /// + /// This differs from [AudioContext.createMediaStreamSource], which creates a + /// [MediaStreamAudioSourceNode] whose audio comes from the audio track in a + /// specified [MediaStream] whose [MediaStreamTrack.id] is first, + /// lexicographically (alphabetically). + external MediaStreamTrackAudioSourceNode createMediaStreamTrackSource( + MediaStreamTrack mediaStreamTrack); + + /// The `createMediaStreamDestination()` method of the [AudioContext] + /// Interface is used to create a new [MediaStreamAudioDestinationNode] object + /// associated with a + /// [WebRTC](https://developer.mozilla.org/en-US/docs/Web/API/WebRTC_API) + /// [MediaStream] representing an audio stream, which may be stored in a local + /// file or sent to another computer. + /// + /// The [MediaStream] is created when the node is created and is accessible + /// via the [MediaStreamAudioDestinationNode]'s `stream` attribute. This + /// stream can be used in a similar way as a `MediaStream` obtained via + /// [navigator.getUserMedia] — it can, for example, be sent to a remote peer + /// using the `addStream()` method of `RTCPeerConnection`. + /// + /// For more details about media stream destination nodes, check out the + /// [MediaStreamAudioDestinationNode] reference page. + external MediaStreamAudioDestinationNode createMediaStreamDestination(); + + /// The **`baseLatency`** read-only property of the + /// [AudioContext] interface returns a double that represents the number of + /// seconds of processing latency incurred by the `AudioContext` passing an + /// audio + /// buffer from the [AudioDestinationNode] — i.e. the end of the audio graph — + /// into the host system's audio subsystem ready for playing. + /// + /// > **Note:** You can request a certain latency during + /// > [AudioContext.AudioContext] with the + /// > `latencyHint` option, but the browser may ignore the option. + external double get baseLatency; + + /// The **`outputLatency`** read-only property of + /// the [AudioContext] Interface provides an estimation of the output latency + /// of the current audio context. + /// + /// This is the time, in seconds, between the browser passing an audio buffer + /// out of an + /// audio graph over to the host system's audio subsystem to play, and the + /// time at which the + /// first sample in the buffer is actually processed by the audio output + /// device. + /// + /// It varies depending on the platform and the available hardware. + external double get outputLatency; +} + + +extension type AudioContextOptions._(JSObject _) implements JSObject { + external factory AudioContextOptions({ + JSAny latencyHint, + num sampleRate, + JSAny sinkId, + JSAny renderSizeHint, + }); + + external JSAny get latencyHint; + external set latencyHint(JSAny value); + external double get sampleRate; + external set sampleRate(num value); + external JSAny get sinkId; + external set sinkId(JSAny value); + external JSAny get renderSizeHint; + external set renderSizeHint(JSAny value); +} +extension type AudioSinkOptions._(JSObject _) implements JSObject { + external factory AudioSinkOptions({required AudioSinkType type}); + + external AudioSinkType get type; + external set type(AudioSinkType value); +} +extension type AudioTimestamp._(JSObject _) implements JSObject { + external factory AudioTimestamp({ + num contextTime, + DOMHighResTimeStamp performanceTime, + }); + + external double get contextTime; + external set contextTime(num value); + external double get performanceTime; + external set performanceTime(DOMHighResTimeStamp value); +} + + + + + +/// The `OfflineAudioContext` interface is an [AudioContext] interface +/// representing an audio-processing graph built from linked together +/// [AudioNode]s. In contrast with a standard [AudioContext], an +/// `OfflineAudioContext` doesn't render the audio to the device hardware; +/// instead, it generates it, as fast as it can, and outputs the result to an +/// [AudioBuffer]. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/OfflineAudioContext). +extension type OfflineAudioContext._(JSObject _) + implements BaseAudioContext, JSObject { + external factory OfflineAudioContext( + JSAny contextOptionsOrNumberOfChannels, [ + int length, + num sampleRate, + ]); + + /// The `startRendering()` method of the [OfflineAudioContext] Interface + /// starts rendering the audio graph, taking into account the current + /// connections and the current scheduled changes. + /// + /// The [OfflineAudioContext.complete_event] event (of type + /// [OfflineAudioCompletionEvent]) is raised when the rendering is finished, + /// containing the resulting [AudioBuffer] in its `renderedBuffer` property. + /// + /// Browsers currently support two versions of the `startRendering()` method — + /// an older event-based version and a newer promise-based version. + /// The former will eventually be removed, but currently both mechanisms are + /// provided for legacy reasons. + external JSPromise startRendering(); + + /// The **`resume()`** method of the + /// [OfflineAudioContext] interface resumes the progression of time in an + /// audio + /// context that has been suspended. The promise resolves immediately because + /// the + /// `OfflineAudioContext` does not require the audio hardware. + external JSPromise resume(); + + /// The **`suspend()`** method of the [OfflineAudioContext] interface + /// schedules a suspension of the time + /// progression in the audio context at the specified time and returns a + /// promise. This is + /// generally useful at the time of manipulating the audio graph synchronously + /// on + /// OfflineAudioContext. + /// + /// Note that the maximum precision of suspension is the size of the render + /// quantum and the + /// specified suspension time will be rounded down to the nearest render + /// quantum boundary. + /// For this reason, it is not allowed to schedule multiple suspends at the + /// same quantized + /// frame. Also scheduling should be done while the context is not running to + /// ensure the + /// precise suspension. + external JSPromise suspend(num suspendTime); + + /// The **`length`** property of the + /// [OfflineAudioContext] interface returns an integer representing the size + /// of + /// the buffer in sample-frames. + external int get length; + external EventHandler get oncomplete; + external set oncomplete(EventHandler value); +} + +extension type OfflineAudioContextOptions._(JSObject _) implements JSObject { + external factory OfflineAudioContextOptions({ + int numberOfChannels, + required int length, + required num sampleRate, + JSAny renderSizeHint, + }); + + external int get numberOfChannels; + external set numberOfChannels(int value); + external int get length; + external set length(int value); + external double get sampleRate; + external set sampleRate(num value); + external JSAny get renderSizeHint; + external set renderSizeHint(JSAny value); +} + +/// The +/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) +/// `OfflineAudioCompletionEvent` interface represents events that occur when +/// the processing of an [OfflineAudioContext] is terminated. The +/// [OfflineAudioContext.complete_event] event uses this interface. +/// +/// > **Note:** This interface is marked as deprecated; it is still supported +/// > for legacy reasons, but it will soon be superseded when the promise +/// > version of [OfflineAudioContext.startRendering] is supported in browsers, +/// > which will no longer need it. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/OfflineAudioCompletionEvent). +extension type OfflineAudioCompletionEvent._(JSObject _) + implements Event, JSObject { + external factory OfflineAudioCompletionEvent( + String type, + OfflineAudioCompletionEventInit eventInitDict, + ); + + /// The **`renderedBuffer`** read-only property of the + /// [OfflineAudioCompletionEvent] interface is an [AudioBuffer] + /// containing the result of processing an [OfflineAudioContext]. + external AudioBuffer get renderedBuffer; +} + +extension type OfflineAudioCompletionEventInit._(JSObject _) + implements EventInit, JSObject { + external factory OfflineAudioCompletionEventInit({ + bool bubbles, + bool cancelable, + bool composed, + required AudioBuffer renderedBuffer, + }); + + external AudioBuffer get renderedBuffer; + external set renderedBuffer(AudioBuffer value); +} + +/// The **`AudioBuffer`** interface represents a short audio asset residing in +/// memory, created from an audio file using the +/// [BaseAudioContext.decodeAudioData] method, or from raw data using +/// [BaseAudioContext.createBuffer]. Once put into an AudioBuffer, the audio can +/// then be played by being passed into an [AudioBufferSourceNode]. +/// +/// Objects of these types are designed to hold small audio snippets, typically +/// less than 45 s. For longer sounds, objects implementing the +/// [MediaElementAudioSourceNode] are more suitable. The buffer contains the +/// audio signal waveform encoded as a series of amplitudes in the following +/// format: non-interleaved IEEE754 32-bit linear PCM with a nominal range +/// between `-1` and `+1`, that is, a 32-bit floating point buffer, with each +/// sample between -1.0 and 1.0. If the [AudioBuffer] has multiple channels, +/// they are stored in separate buffers. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer). +extension type AudioBuffer._(JSObject _) implements JSObject { + external factory AudioBuffer(AudioBufferOptions options); + + /// The **`getChannelData()`** method of the [AudioBuffer] Interface returns a + /// `Float32Array` containing the PCM data associated with the channel, + /// defined by the channel parameter (with 0 representing the first channel). + external JSFloat32Array getChannelData(int channel); + + /// The + /// **`copyFromChannel()`** method of the + /// [AudioBuffer] interface copies the audio sample data from the specified + /// channel of the `AudioBuffer` to a specified + /// `Float32Array`. + external void copyFromChannel( + JSFloat32Array destination, + int channelNumber, [ + int bufferOffset, + ]); + + /// The `copyToChannel()` method of the [AudioBuffer] interface copies + /// the samples to the specified channel of the `AudioBuffer`, from the source + /// array. + external void copyToChannel( + JSFloat32Array source, + int channelNumber, [ + int bufferOffset, + ]); + + /// The **`sampleRate`** property of the [AudioBuffer] interface returns a + /// float representing the sample rate, in samples per second, of the PCM data + /// stored in the buffer. + external double get sampleRate; + + /// The **`length`** property of the [AudioBuffer] + /// interface returns an integer representing the length, in sample-frames, of + /// the PCM data + /// stored in the buffer. + external int get length; + + /// The **`duration`** property of the [AudioBuffer] interface returns a + /// double representing the duration, in seconds, of the PCM data stored in + /// the buffer. + external double get duration; + + /// The `numberOfChannels` property of the [AudioBuffer] + /// interface returns an integer representing the number of discrete audio + /// channels + /// described by the PCM data stored in the buffer. + external int get numberOfChannels; +} +extension type AudioBufferOptions._(JSObject _) implements JSObject { + external factory AudioBufferOptions({ + int numberOfChannels, + required int length, + required num sampleRate, + }); + + external int get numberOfChannels; + external set numberOfChannels(int value); + external int get length; + external set length(int value); + external double get sampleRate; + external set sampleRate(num value); +} + +/// The **`AudioNode`** interface is a generic interface for representing an +/// audio processing module. +/// +/// Examples include: +/// +/// - an audio source (e.g. an HTML `audio` or `video` element, an +/// [OscillatorNode], etc.), +/// - the audio destination, +/// - intermediate processing module (e.g. a filter like [BiquadFilterNode] or +/// [ConvolverNode]), or +/// - volume control (like [GainNode]) +/// +/// > **Note:** An `AudioNode` can be target of events, therefore it implements +/// > the [EventTarget] interface. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioNode). +extension type AudioNode._(JSObject _) implements EventTarget, JSObject { + /// The `connect()` method of the [AudioNode] interface lets + /// you connect one of the node's outputs to a target, which may be either + /// another + /// `AudioNode` (thereby directing the sound data to the specified node) or an + /// [AudioParam], so that the node's output data is automatically used to + /// change the value of that parameter over time. + external AudioNode? connect( + JSObject destinationNodeOrDestinationParam, [ + int output, + int input, + ]); + + /// The **`disconnect()`** method of the [AudioNode] interface lets you + /// disconnect one or more nodes from the node on which the method is called. + external void disconnect([ + JSAny destinationNodeOrDestinationParamOrOutput, + int output, + int input, + ]); + + /// The read-only `context` property of the + /// [AudioNode] interface returns the associated + /// [BaseAudioContext], that is the object representing the processing graph + /// the node is participating in. + external BaseAudioContext get context; + + /// The `numberOfInputs` property of + /// the [AudioNode] interface returns the number of inputs feeding the + /// node. Source nodes are defined as nodes having a `numberOfInputs` + /// property with a value of 0. + external int get numberOfInputs; + + /// The `numberOfOutputs` property of + /// the [AudioNode] interface returns the number of outputs coming out of + /// the node. Destination nodes — like [AudioDestinationNode] — have + /// a value of 0 for this attribute. + external int get numberOfOutputs; + + /// The **`channelCount`** property of the [AudioNode] interface represents an + /// integer used to determine how many channels are used when + /// [up-mixing and down-mixing](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Basic_concepts_behind_Web_Audio_API#up-mixing_and_down-mixing) + /// connections to any inputs to the node. + /// + /// `channelCount`'s usage and precise definition depend on the value of + /// [AudioNode.channelCountMode]: + /// + /// - It is ignored if the `channelCountMode` value is `max`. + /// - It is used as a maximum value if the `channelCountMode` value is + /// `clamped-max`. + /// - It is used as the exact value if the `channelCountMode` value is + /// `explicit`. + external int get channelCount; + external set channelCount(int value); + + /// The `channelCountMode` property of the [AudioNode] interface represents an + /// enumerated value describing the way channels must be matched between the + /// node's inputs and outputs. + external ChannelCountMode get channelCountMode; + external set channelCountMode(ChannelCountMode value); + + /// The **`channelInterpretation`** property of the [AudioNode] interface + /// represents an enumerated value describing how input channels are mapped to + /// output channels when the number of inputs/outputs is different. For + /// example, this setting defines how a mono input will be up-mixed to a + /// stereo or 5.1 channel output, or how a quad channel input will be + /// down-mixed to a stereo or mono output. + /// + /// The property has two options: `speakers` and `discrete`. These are + /// documented in [Basic concepts behind Web Audio API > up-mixing and + /// down-mixing](/en-US/docs/Web/API/Web_Audio_API/Basic_concepts_behind_Web_Audio_API#up-mixing_and_down-mixing). + external ChannelInterpretation get channelInterpretation; + external set channelInterpretation(ChannelInterpretation value); +} +extension type AudioNodeOptions._(JSObject _) implements JSObject { + external factory AudioNodeOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + }); + + external int get channelCount; + external set channelCount(int value); + external ChannelCountMode get channelCountMode; + external set channelCountMode(ChannelCountMode value); + external ChannelInterpretation get channelInterpretation; + external set channelInterpretation(ChannelInterpretation value); +} + +/// The Web Audio API's `AudioParam` interface represents an audio-related +/// parameter, usually a parameter of an [AudioNode] (such as [GainNode.gain]). +/// +/// An `AudioParam` can be set to a specific value or a change in value, and can +/// be scheduled to happen at a specific time and following a specific pattern. +/// +/// Each `AudioParam` has a list of events, initially empty, that define when +/// and how values change. When this list is not empty, changes using the +/// `AudioParam.value` attributes are ignored. This list of events allows us to +/// schedule changes that have to happen at very precise times, using arbitrary +/// timeline-based automation curves. The time used is the one defined in +/// [BaseAudioContext.currentTime]. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam). +extension type AudioParam._(JSObject _) implements JSObject { + /// The `setValueAtTime()` method of the + /// [AudioParam] interface schedules an instant change to the + /// `AudioParam` value at a precise time, as measured against + /// [BaseAudioContext.currentTime]. The new value is given in the value + /// parameter. + external AudioParam setValueAtTime( + num value, + num startTime, + ); + + /// The `linearRampToValueAtTime()` method of the [AudioParam] + /// Interface schedules a gradual linear change in the value of the + /// `AudioParam`. The change starts at the time specified for the + /// _previous_ event, follows a linear ramp to the new value given in the + /// `value` parameter, and reaches the new value at the time given in the + /// `endTime` parameter. + external AudioParam linearRampToValueAtTime( + num value, + num endTime, + ); + + /// The **`exponentialRampToValueAtTime()`** method of the [AudioParam] + /// Interface schedules a gradual exponential change in the value of the + /// [AudioParam]. + /// The change starts at the time specified for the _previous_ event, follows + /// an exponential ramp to the new value given in the `value` parameter, and + /// reaches the new value at the time given in the + /// `endTime` parameter. + /// + /// > **Note:** Exponential ramps are considered more useful when changing + /// > frequencies or playback rates than linear ramps because of the way the + /// > human ear + /// > works. + external AudioParam exponentialRampToValueAtTime( + num value, + num endTime, + ); + + /// The `setTargetAtTime()` method of the + /// [AudioParam] interface schedules the start of a gradual change to the + /// `AudioParam` value. This is useful for decay or release portions of ADSR + /// envelopes. + external AudioParam setTargetAtTime( + num target, + num startTime, + num timeConstant, + ); + + /// The + /// **`setValueCurveAtTime()`** method of the + /// [AudioParam] interface schedules the parameter's value to change + /// following a curve defined by a list of values. + /// + /// The curve is a linear + /// interpolation between the sequence of values defined in an array of + /// floating-point + /// values, which are scaled to fit into the given interval starting at + /// `startTime` and a specific duration. + external AudioParam setValueCurveAtTime( + JSArray values, + num startTime, + num duration, + ); + + /// The `cancelScheduledValues()` method of the [AudioParam] + /// Interface cancels all scheduled future changes to the `AudioParam`. + external AudioParam cancelScheduledValues(num cancelTime); + + /// The **`cancelAndHoldAtTime()`** method of the + /// [AudioParam] interface cancels all scheduled future changes to the + /// `AudioParam` but holds its value at a given time until further changes are + /// made using other methods. + external AudioParam cancelAndHoldAtTime(num cancelTime); + + /// The [Web Audio + /// API's](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) + /// [AudioParam] interface property **`value`** gets + /// or sets the value of this [AudioParam] at the current time. Initially, the + /// value is set to [AudioParam.defaultValue]. + /// + /// Setting `value` has the same effect as + /// calling [AudioParam.setValueAtTime] with the time returned by the + /// `AudioContext`'s [BaseAudioContext.currentTime] + /// property. + external double get value; + external set value(num value); + external AutomationRate get automationRate; + external set automationRate(AutomationRate value); + + /// The **`defaultValue`** + /// read-only property of the [AudioParam] interface represents the initial + /// value of the attributes as defined by the specific [AudioNode] creating + /// the `AudioParam`. + external double get defaultValue; + + /// The **`minValue`** + /// read-only property of the [AudioParam] interface represents the minimum + /// possible value for the parameter's nominal (effective) range. + external double get minValue; + + /// The **`maxValue`** + /// read-only property of the [AudioParam] interface represents the maximum + /// possible value for the parameter's nominal (effective) range. + external double get maxValue; +} + +/// The `AudioScheduledSourceNode` interface—part of the Web Audio API—is a +/// parent interface for several types of audio source node interfaces which +/// share the ability to be started and stopped, optionally at specified times. +/// Specifically, this interface defines the [AudioScheduledSourceNode.start] +/// and [AudioScheduledSourceNode.stop] methods, as well as the +/// [AudioScheduledSourceNode.ended_event] event. +/// +/// > **Note:** You can't create an `AudioScheduledSourceNode` object directly. +/// > Instead, use an interface which extends it, such as +/// > [AudioBufferSourceNode], [OscillatorNode] or [ConstantSourceNode]. +/// +/// Unless stated otherwise, nodes based upon `AudioScheduledSourceNode` output +/// silence when not playing (that is, before `start()` is called and after +/// `stop()` is called). Silence is represented, as always, by a stream of +/// samples with the value zero (0). +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioScheduledSourceNode). +extension type AudioScheduledSourceNode._(JSObject _) + implements AudioNode, JSObject { + /// The `start()` method on [AudioScheduledSourceNode] schedules a sound to + /// begin playback at the specified time. + /// If no time is specified, then the sound begins playing immediately. + external void start([num when]); + + /// The `stop()` method on [AudioScheduledSourceNode] schedules a + /// sound to cease playback at the specified time. If no time is specified, + /// then the sound + /// stops playing immediately. + /// + /// Each time you call `stop()` on the same node, the specified time replaces + /// any previously-scheduled stop time that hasn't occurred yet. If the node + /// has already + /// stopped, this method has no effect. + /// + /// > **Note:** If a scheduled stop time occurs before the node's scheduled + /// > start time, the node never starts to play. + external void stop([num when]); + external EventHandler get onended; + external set onended(EventHandler value); +} + +/// The **`AnalyserNode`** interface represents a node able to provide real-time +/// frequency and time-domain analysis information. It is an [AudioNode] that +/// passes the audio stream unchanged from the input to the output, but allows +/// you to take the generated data, process it, and create audio visualizations. +/// +/// An `AnalyserNode` has exactly one input and one output. The node works even +/// if the output is not connected. +/// +/// ![Without modifying the audio stream, the node allows to get the frequency +/// and time-domain data associated to it, using a FFT.](fttaudiodata_en.svg) +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1 (but may be left unconnected)
Channel count mode"max"
Channel count2
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode). +extension type AnalyserNode._(JSObject _) implements AudioNode, JSObject { + external factory AnalyserNode( + BaseAudioContext context, [ + AnalyserOptions options, + ]); + + /// The **`getFloatFrequencyData()`** method of the [AnalyserNode] Interface + /// copies the current frequency data into a `Float32Array` array passed into + /// it. Each array value is a _sample_, the magnitude of the signal at a + /// particular time. + /// + /// Each item in the array represents the decibel value for a specific + /// frequency. The frequencies are spread linearly from 0 to 1/2 of the sample + /// rate. For example, for a `48000` Hz sample rate, the last item of the + /// array will represent the decibel value for `24000` Hz. + /// + /// If you need higher performance and don't care about precision, you can use + /// [AnalyserNode.getByteFrequencyData] instead, which works on a + /// `Uint8Array`. + external void getFloatFrequencyData(JSFloat32Array array); + + /// The **`getByteFrequencyData()`** method of the [AnalyserNode] interface + /// copies the current frequency data into a `Uint8Array` (unsigned byte + /// array) passed into it. + /// + /// The frequency data is composed of integers on a scale from 0 to 255. + /// + /// Each item in the array represents the decibel value for a specific + /// frequency. The frequencies are spread linearly from 0 to 1/2 of the sample + /// rate. For example, for `48000` sample rate, the last item of the array + /// will represent the decibel value for `24000` Hz. + /// + /// If the array has fewer elements than the [AnalyserNode.frequencyBinCount], + /// excess elements are dropped. If it has more elements than needed, excess + /// elements are ignored. + external void getByteFrequencyData(JSUint8Array array); + + /// The **`getFloatTimeDomainData()`** method of the [AnalyserNode] Interface + /// copies the current waveform, or time-domain, data into a `Float32Array` + /// array passed into it. Each array value is a _sample_, the magnitude of the + /// signal at a particular time. + external void getFloatTimeDomainData(JSFloat32Array array); + + /// The **`getByteTimeDomainData()`** method of the [AnalyserNode] Interface + /// copies the current waveform, or time-domain, data into a `Uint8Array` + /// (unsigned byte array) passed into it. + /// + /// If the array has fewer elements than the [AnalyserNode.fftSize], excess + /// elements are dropped. If it has more elements than needed, excess elements + /// are ignored. + external void getByteTimeDomainData(JSUint8Array array); + + /// The **`fftSize`** property of the [AnalyserNode] interface is an unsigned + /// long value and represents the window size in samples that is used when + /// performing a + /// [Fast Fourier Transform](https://en.wikipedia.org/wiki/Fast_Fourier_transform) + /// (FFT) to get frequency domain data. + external int get fftSize; + external set fftSize(int value); + + /// The **`frequencyBinCount`** read-only property of the [AnalyserNode] + /// interface contains the total number of data points available to + /// [AudioContext] [BaseAudioContext.sampleRate]. This is half of the `value` + /// of the [AnalyserNode.fftSize]. The two methods' indices have a linear + /// relationship with the frequencies they represent, between 0 and the + /// [Nyquist frequency](https://en.wikipedia.org/wiki/Nyquist_frequency). + external int get frequencyBinCount; + + /// The **`minDecibels`** property of the [AnalyserNode] interface is a double + /// value representing the minimum power value in the scaling range for the + /// FFT analysis data, for conversion to unsigned byte values — basically, + /// this specifies the minimum value for the range of results when using + /// `getByteFrequencyData()`. + external double get minDecibels; + external set minDecibels(num value); + + /// The **`maxDecibels`** property of the [AnalyserNode] interface is a double + /// value representing the maximum power value in the scaling range for the + /// FFT analysis data, for conversion to unsigned byte values — basically, + /// this specifies the maximum value for the range of results when using + /// `getByteFrequencyData()`. + external double get maxDecibels; + external set maxDecibels(num value); + + /// The **`smoothingTimeConstant`** property of the [AnalyserNode] interface + /// is a double value representing the averaging constant with the last + /// analysis frame. It's basically an average between the current buffer and + /// the last buffer the `AnalyserNode` processed, and results in a much + /// smoother set of value changes over time. + external double get smoothingTimeConstant; + external set smoothingTimeConstant(num value); +} +extension type AnalyserOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory AnalyserOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + int fftSize, + num maxDecibels, + num minDecibels, + num smoothingTimeConstant, + }); + + external int get fftSize; + external set fftSize(int value); + external double get maxDecibels; + external set maxDecibels(num value); + external double get minDecibels; + external set minDecibels(num value); + external double get smoothingTimeConstant; + external set smoothingTimeConstant(num value); +} + +/// The **`AudioBufferSourceNode`** interface is an [AudioScheduledSourceNode] +/// which represents an audio source consisting of in-memory audio data, stored +/// in an [AudioBuffer]. +/// +/// This interface is especially useful for playing back audio which has +/// particularly stringent timing accuracy requirements, such as for sounds that +/// must match a specific rhythm and can be kept in memory rather than being +/// played from disk or the network. To play sounds which require accurate +/// timing but must be streamed from the network or played from disk, use a +/// [AudioWorkletNode] to implement its playback. +/// +/// An `AudioBufferSourceNode` has no inputs and exactly one output, which has +/// the same number of channels as the `AudioBuffer` indicated by its +/// [AudioBufferSourceNode.buffer] property. If there's no buffer set—that is, +/// if `buffer` is `null`—the output contains a single channel of silence (every +/// sample is 0). +/// +/// An `AudioBufferSourceNode` can only be played once; after each call to +/// [AudioBufferSourceNode.start], you have to create a new node if you want to +/// play the same sound again. Fortunately, these nodes are very inexpensive to +/// create, and the actual `AudioBuffer`s can be reused for multiple plays of +/// the sound. Indeed, you can use these nodes in a "fire and forget" manner: +/// create the node, call `start()` to begin playing the sound, and don't even +/// bother to hold a reference to it. It will automatically be garbage-collected +/// at an appropriate time, which won't be until sometime after the sound has +/// finished playing. +/// +/// Multiple calls to [AudioScheduledSourceNode.stop] are allowed. The most +/// recent call replaces the previous one, if the `AudioBufferSourceNode` has +/// not already reached the end of the buffer. +/// +/// ![The AudioBufferSourceNode takes the content of an AudioBuffer and m](webaudioaudiobuffersourcenode.png) +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs0
Number of outputs1
Channel countdefined by the associated [AudioBuffer]
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode). +extension type AudioBufferSourceNode._(JSObject _) + implements AudioScheduledSourceNode, JSObject { + external factory AudioBufferSourceNode( + BaseAudioContext context, [ + AudioBufferSourceOptions options, + ]); + + /// The `start()` method of the [AudioBufferSourceNode] + /// Interface is used to schedule playback of the audio data contained in the + /// buffer, or + /// to begin playback immediately. + external void start([ + num when, + num offset, + num duration, + ]); + + /// The **`buffer`** property of the [AudioBufferSourceNode] interface + /// provides the ability to play back audio using an [AudioBuffer] as the + /// source of the sound data. + /// + /// If the `buffer` property is set to the value `null`, the node + /// generates a single channel containing silence (that is, every sample is + /// 0). + external AudioBuffer? get buffer; + external set buffer(AudioBuffer? value); + + /// The **`playbackRate`** property of + /// the [AudioBufferSourceNode] interface Is a + /// [k-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#k-rate) + /// [AudioParam] that + /// defines the speed at which the audio asset will be played. + /// + /// A value of 1.0 indicates it should play at the same speed as its sampling + /// rate, + /// values less than 1.0 cause the sound to play more slowly, while values + /// greater than + /// 1.0 result in audio playing faster than normal. The default value is + /// `1.0`. + /// When set to another value, the `AudioBufferSourceNode` resamples the audio + /// before sending it to the output. + external AudioParam get playbackRate; + + /// The **`detune`** property of the + /// [AudioBufferSourceNode] interface is a + /// [k-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#k-rate) + /// [AudioParam] + /// representing detuning of oscillation in + /// [cents](https://en.wikipedia.org/wiki/Cent_%28music%29). + /// + /// For example, values of +100 and -100 detune the source up or down by one + /// semitone, + /// while +1200 and -1200 detune it up or down by one octave. + external AudioParam get detune; + + /// The `loop` property of the [AudioBufferSourceNode] + /// interface is a Boolean indicating if the audio asset must be replayed when + /// the end of + /// the [AudioBuffer] is reached. + /// + /// The `loop` property's default value is `false`. + external bool get loop; + external set loop(bool value); + + /// The **`loopStart`** property of the [AudioBufferSourceNode] interface is a + /// floating-point value indicating, in seconds, where in the [AudioBuffer] + /// the restart of the play must happen. + /// + /// The `loopStart` property's default value is `0`. + external double get loopStart; + external set loopStart(num value); + + /// The `loopEnd` property of the [AudioBufferSourceNode] + /// interface specifies is a floating point number specifying, in seconds, at + /// what offset + /// into playing the [AudioBuffer] playback should loop back to the time + /// indicated by the [AudioBufferSourceNode.loopStart] property. + /// This is only used if the [AudioBufferSourceNode.loop] property is + /// `true`. + external double get loopEnd; + external set loopEnd(num value); +} +extension type AudioBufferSourceOptions._(JSObject _) implements JSObject { + external factory AudioBufferSourceOptions({ + AudioBuffer? buffer, + num detune, + bool loop, + num loopEnd, + num loopStart, + num playbackRate, + }); + + external AudioBuffer? get buffer; + external set buffer(AudioBuffer? value); + external double get detune; + external set detune(num value); + external bool get loop; + external set loop(bool value); + external double get loopEnd; + external set loopEnd(num value); + external double get loopStart; + external set loopStart(num value); + external double get playbackRate; + external set playbackRate(num value); +} + +/// The `AudioDestinationNode` interface represents the end destination of an +/// audio graph in a given context — usually the speakers of your device. It can +/// also be the node that will "record" the audio data when used with an +/// `OfflineAudioContext`. +/// +/// `AudioDestinationNode` has no output (as it _is_ the output, no more +/// `AudioNode` can be linked after it in the audio graph) and one input. The +/// number of channels in the input must be between `0` and the +/// `maxChannelCount` value or an exception is raised. +/// +/// The `AudioDestinationNode` of a given `AudioContext` can be retrieved using +/// the [BaseAudioContext.destination] property. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs0
Channel count mode"explicit"
Channel count2
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioDestinationNode). +extension type AudioDestinationNode._(JSObject _) + implements AudioNode, JSObject { + /// The `maxchannelCount` property of the [AudioDestinationNode] interface is + /// an `unsigned long` defining the maximum amount of channels that the + /// physical device can handle. + /// + /// The [AudioNode.channelCount] property can be set between 0 and this value + /// (both included). If `maxChannelCount` is `0`, like in + /// [OfflineAudioContext], the channel count cannot be changed. + external int get maxChannelCount; +} + +/// The `AudioListener` interface represents the position and orientation of the +/// unique person listening to the audio scene, and is used in +/// [audio spatialization](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Web_audio_spatialization_basics). +/// All [PannerNode]s spatialize in relation to the `AudioListener` stored in +/// the [BaseAudioContext.listener] attribute. +/// +/// It is important to note that there is only one listener per context and that +/// it isn't an [AudioNode]. +/// +/// ![We see the position, up and front vectors of an AudioListener, with the up +/// and front vectors at 90° from the other.](webaudiolistenerreduced.png) +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioListener). +extension type AudioListener._(JSObject _) implements JSObject { + /// The `setPosition()` method of the [AudioListener] Interface defines the + /// position of the listener. + /// + /// The three parameters `x`, `y` and `z` are unitless and describe the + /// listener's position in 3D space according to the right-hand Cartesian + /// coordinate system. [PannerNode] objects use this position relative to + /// individual audio sources for spatialization. + /// + /// The default value of the position vector is `(0, 0, 0)`. + /// + /// > **Note:** As this method is deprecated, use the three + /// > [AudioListener.positionX], [AudioListener.positionY], and + /// > [AudioListener.positionZ] properties instead. + external void setPosition( + num x, + num y, + num z, + ); + + /// The `setOrientation()` method of the [AudioListener] interface defines the + /// orientation of the listener. + /// + /// It consists of two direction vectors: + /// + /// - The _front vector_, defined by the three unitless parameters `x`, `y` + /// and `z`, describes the direction of the face of the listener, that is + /// the direction the nose of the person is pointing towards. The front + /// vector's default value is `(0, 0, -1)`. + /// - The _up vector_, defined by three unitless parameters `xUp`, `yUp` and + /// `zUp`, describes the direction of the top of the listener's head. The up + /// vector's default value is `(0, 1, 0)`. + /// + /// The two vectors must be separated by an angle of 90° — in linear analysis + /// terms, they must be perpendicular to each other. + external void setOrientation( + num x, + num y, + num z, + num xUp, + num yUp, + num zUp, + ); + + /// The `positionX` read-only property of the [AudioListener] interface is an + /// [AudioParam] representing the x position of the listener in 3D cartesian + /// space. + /// + /// > **Note:** The parameter is + /// > [_a-rate_](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// > when used with a [PannerNode] whose [PannerNode.panningModel] is set to + /// > equalpower, or + /// > [_k-rate_](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#k-rate) + /// > otherwise. + external AudioParam get positionX; + + /// The `positionY` read-only property of the [AudioListener] interface is an + /// [AudioParam] representing the y position of the listener in 3D cartesian + /// space. + /// + /// > **Note:** The parameter is + /// > [_a-rate_](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// > when used with a [PannerNode] whose [PannerNode.panningModel] is set to + /// > equalpower, or + /// > [_k-rate_](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#k-rate) + /// > otherwise. + external AudioParam get positionY; + + /// The `positionZ` read-only property of the [AudioListener] interface is an + /// [AudioParam] representing the z position of the listener in 3D cartesian + /// space. + /// + /// > **Note:** The parameter is + /// > [_a-rate_](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// > when used with a [PannerNode] whose [PannerNode.panningModel] is set to + /// > equalpower, or + /// > [_k-rate_](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#k-rate) + /// > otherwise. + external AudioParam get positionZ; + + /// The `forwardX` read-only property of the [AudioListener] interface is an + /// [AudioParam] representing the x value of the direction vector defining the + /// forward direction the listener is pointing in. + /// + /// > **Note:** The parameter is _a-rate_ when used with a [PannerNode] whose + /// > [PannerNode.panningModel] is set to equalpower, or _k-rate_ otherwise. + external AudioParam get forwardX; + + /// The `forwardY` read-only property of the [AudioListener] interface is an + /// [AudioParam] representing the y value of the direction vector defining the + /// forward direction the listener is pointing in. + /// + /// > **Note:** The parameter is _a-rate_ when used with a [PannerNode] whose + /// > [PannerNode.panningModel] is set to equalpower, or _k-rate_ otherwise. + external AudioParam get forwardY; + + /// The `forwardZ` read-only property of the [AudioListener] interface is an + /// [AudioParam] representing the z value of the direction vector defining the + /// forward direction the listener is pointing in. + /// + /// > **Note:** The parameter is _a-rate_ when used with a [PannerNode] whose + /// > [PannerNode.panningModel] is set to equalpower, or _k-rate_ otherwise. + external AudioParam get forwardZ; + + /// The `upX` read-only property of the [AudioListener] interface is an + /// [AudioParam] representing the x value of the direction vector defining the + /// up direction the listener is pointing in. + /// + /// > **Note:** The parameter is _a-rate_ when used with a [PannerNode] whose + /// > [PannerNode.panningModel] is set to equalpower, or _k-rate_ otherwise. + external AudioParam get upX; + + /// The `upY` read-only property of the [AudioListener] interface is an + /// [AudioParam] representing the y value of the direction vector defining the + /// up direction the listener is pointing in. + /// + /// > **Note:** The parameter is _a-rate_ when used with a [PannerNode] whose + /// > [PannerNode.panningModel] is set to equalpower, or _k-rate_ otherwise. + external AudioParam get upY; + + /// The `upZ` read-only property of the [AudioListener] interface is an + /// [AudioParam] representing the z value of the direction vector defining the + /// up direction the listener is pointing in. + /// + /// > **Note:** The parameter is _a-rate_ when used with a [PannerNode] whose + /// > [PannerNode.panningModel] is set to equalpower, or _k-rate_ otherwise. + external AudioParam get upZ; +} + +/// The `AudioProcessingEvent` interface of the +/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) +/// represents events that occur when a [ScriptProcessorNode] input buffer is +/// ready to be processed. +/// +/// An `audioprocess` event with this interface is fired on a +/// [ScriptProcessorNode] when audio processing is required. During audio +/// processing, the input buffer is read and processed to produce output audio +/// data, which is then written to the output buffer. +/// +/// > **Warning:** This feature has been deprecated and should be replaced by an +/// > [`AudioWorklet`](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet). +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioProcessingEvent). +extension type AudioProcessingEvent._(JSObject _) implements Event, JSObject { + external factory AudioProcessingEvent( + String type, + AudioProcessingEventInit eventInitDict, + ); + + /// The **`playbackTime`** read-only property of the [AudioProcessingEvent] + /// interface represents the time when the audio will be played. It is in the + /// same coordinate system as the time used by the [AudioContext]. + external double get playbackTime; + + /// The **`inputBuffer`** read-only property of the [AudioProcessingEvent] + /// interface represents the input buffer of an audio processing event. + /// + /// The input buffer is represented by an [AudioBuffer] object, which contains + /// a collection of audio channels, each of which is an array of + /// floating-point values representing the audio signal waveform encoded as a + /// series of amplitudes. The number of channels and the length of each + /// channel are determined by the channel count and buffer size properties of + /// the `AudioBuffer`. + external AudioBuffer get inputBuffer; + + /// The **`outputBuffer`** read-only property of the [AudioProcessingEvent] + /// interface represents the output buffer of an audio processing event. + /// + /// The output buffer is represented by an [AudioBuffer] object, which + /// contains a collection of audio channels, each of which is an array of + /// floating-point values representing the audio signal waveform encoded as a + /// series of amplitudes. The number of channels and the length of each + /// channel are determined by the channel count and buffer size properties of + /// the `AudioBuffer`. + external AudioBuffer get outputBuffer; +} +extension type AudioProcessingEventInit._(JSObject _) + implements EventInit, JSObject { + external factory AudioProcessingEventInit({ + bool bubbles, + bool cancelable, + bool composed, + required num playbackTime, + required AudioBuffer inputBuffer, + required AudioBuffer outputBuffer, + }); + + external double get playbackTime; + external set playbackTime(num value); + external AudioBuffer get inputBuffer; + external set inputBuffer(AudioBuffer value); + external AudioBuffer get outputBuffer; + external set outputBuffer(AudioBuffer value); +} + +/// The `BiquadFilterNode` interface represents a simple low-order filter, and +/// is created using the [BaseAudioContext.createBiquadFilter] method. It is an +/// [AudioNode] that can represent different kinds of filters, tone control +/// devices, and graphic equalizers. A `BiquadFilterNode` always has exactly one +/// input and one output. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1
Channel count mode"max"
Channel count2 (not used in the default count mode)
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/BiquadFilterNode). +extension type BiquadFilterNode._(JSObject _) implements AudioNode, JSObject { + external factory BiquadFilterNode( + BaseAudioContext context, [ + BiquadFilterOptions options, + ]); + + /// The `getFrequencyResponse()` method of the [BiquadFilterNode] interface + /// takes the current filtering algorithm's settings and calculates the + /// frequency response for frequencies specified in a specified array of + /// frequencies. + /// + /// The two output arrays, `magResponseOutput` and + /// `phaseResponseOutput`, must be created before calling this method; they + /// must be the same size as the array of input frequency values + /// (`frequencyArray`). + external void getFrequencyResponse( + JSFloat32Array frequencyHz, + JSFloat32Array magResponse, + JSFloat32Array phaseResponse, + ); + + /// The `type` property of the [BiquadFilterNode] interface is a string (enum) + /// value defining the kind of filtering algorithm the node is implementing. + external BiquadFilterType get type; + external set type(BiquadFilterType value); + + /// The `frequency` property of the [BiquadFilterNode] interface is an + /// [a-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// [AudioParam] — a double representing a frequency in the current filtering + /// algorithm measured in hertz (Hz). + /// + /// Its default value is `350`, with a nominal range of `10` to the + /// [Nyquist frequency](https://en.wikipedia.org/wiki/Nyquist_frequency) — + /// that is, half of the sample rate. + external AudioParam get frequency; + + /// The `detune` property of the [BiquadFilterNode] interface is an + /// [a-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// [AudioParam] representing detuning of the frequency in + /// [cents](https://en.wikipedia.org/wiki/Cent_%28music%29). + external AudioParam get detune; + + /// The `Q` property of the [BiquadFilterNode] interface is an + /// [a-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// [AudioParam], a double representing a + /// [Q factor](https://en.wikipedia.org/wiki/Q_factor), or _quality factor_. + /// + /// It is a dimensionless value with a default value of `1` and a nominal + /// range of `0.0001` to `1000`. + external AudioParam get Q; + + /// The `gain` property of the [BiquadFilterNode] interface is an + /// [a-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// [AudioParam] — a double representing the + /// [gain](https://en.wikipedia.org/wiki/Gain) used in the current filtering + /// algorithm. + /// + /// When its value is positive, it represents a real gain; when negative, it + /// represents an attenuation. + /// + /// It is expressed in dB, has a default value of `0`, and can take a value in + /// a nominal range of `-40` to `40`. + external AudioParam get gain; +} +extension type BiquadFilterOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory BiquadFilterOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + BiquadFilterType type, + num Q, + num detune, + num frequency, + num gain, + }); + + external BiquadFilterType get type; + external set type(BiquadFilterType value); + external double get Q; + external set Q(num value); + external double get detune; + external set detune(num value); + external double get frequency; + external set frequency(num value); + external double get gain; + external set gain(num value); +} + +/// The `ChannelMergerNode` interface, often used in conjunction with its +/// opposite, [ChannelSplitterNode], reunites different mono inputs into a +/// single output. Each input is used to fill a channel of the output. This is +/// useful for accessing each channels separately, e.g. for performing channel +/// mixing where gain must be separately controlled on each channel. +/// +/// ![Default channel merger node with six mono inputs combining to form a +/// single output.](webaudiomerger.png) +/// +/// If `ChannelMergerNode` has one single output, but as many inputs as there +/// are channels to merge; the number of inputs is defined as a parameter of its +/// constructor and the call to [BaseAudioContext.createChannelMerger]. In the +/// case that no value is given, it will default to `6`. +/// +/// Using a `ChannelMergerNode`, it is possible to create outputs with more +/// channels than the rendering hardware is able to process. In that case, when +/// the signal is sent to the [BaseAudioContext.listener] object, supernumerary +/// channels will be ignored. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputsvariable; default to 6.
Number of outputs1
Channel count mode"explicit"
Channel count2 (not used in the default count mode)
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/ChannelMergerNode). +extension type ChannelMergerNode._(JSObject _) implements AudioNode, JSObject { + external factory ChannelMergerNode( + BaseAudioContext context, [ + ChannelMergerOptions options, + ]); +} +extension type ChannelMergerOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory ChannelMergerOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + int numberOfInputs, + }); + + external int get numberOfInputs; + external set numberOfInputs(int value); +} + +/// The `ChannelSplitterNode` interface, often used in conjunction with its +/// opposite, [ChannelMergerNode], separates the different channels of an audio +/// source into a set of mono outputs. This is useful for accessing each channel +/// separately, e.g. for performing channel mixing where gain must be separately +/// controlled on each channel. +/// +/// ![Default channel splitter node with a single input splitting to form 6 mono +/// outputs.](webaudiosplitter.png) +/// +/// If your `ChannelSplitterNode` always has one single input, the amount of +/// outputs is defined by a parameter on its constructor and the call to +/// [BaseAudioContext.createChannelSplitter]. In the case that no value is +/// given, it will default to `6`. If there are fewer channels in the input than +/// there are outputs, supernumerary outputs are silent. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputsvariable; default to 6.
Channel count mode +/// "explicit" Older implementations, as per earlier versions +/// of the spec use "max". +///
Channel count +/// Fixed to the number of outputs. Older implementations, as per earlier +/// versions of the spec use 2 (not used in the default count +/// mode). +///
Channel interpretation"discrete"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/ChannelSplitterNode). +extension type ChannelSplitterNode._(JSObject _) + implements AudioNode, JSObject { + external factory ChannelSplitterNode( + BaseAudioContext context, [ + ChannelSplitterOptions options, + ]); +} +extension type ChannelSplitterOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory ChannelSplitterOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + int numberOfOutputs, + }); + + external int get numberOfOutputs; + external set numberOfOutputs(int value); +} + +/// The `ConstantSourceNode` interface—part of the Web Audio API—represents an +/// audio source (based upon [AudioScheduledSourceNode]) whose output is single +/// unchanging value. This makes it useful for cases in which you need a +/// constant value coming in from an audio source. In addition, it can be used +/// like a constructible [AudioParam] by automating the value of its +/// [ConstantSourceNode.offset] or by connecting another node to it; see +/// [Controlling multiple parameters with ConstantSourceNode](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API/Controlling_multiple_parameters_with_ConstantSourceNode). +/// +/// A `ConstantSourceNode` has no inputs and exactly one monaural (one-channel) +/// output. The output's value is always the same as the value of the +/// [ConstantSourceNode.offset] parameter. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs0
Number of outputs1
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/ConstantSourceNode). +extension type ConstantSourceNode._(JSObject _) + implements AudioScheduledSourceNode, JSObject { + external factory ConstantSourceNode( + BaseAudioContext context, [ + ConstantSourceOptions options, + ]); + + /// The read-only `offset` property of the [ConstantSourceNode] + /// interface returns a [AudioParam] object indicating the numeric + /// [a-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// value which is always returned + /// by the source when asked for the next sample. + /// + /// > **Note:** While the `AudioParam` named `offset` is read-only, the + /// > `value` property within is not. So you can change the value of + /// > `offset` by setting the value of + /// > `ConstantSourceNode.offset.value`: + /// > + /// > ```js + /// > myConstantSourceNode.offset.value = newValue; + /// > ``` + external AudioParam get offset; +} +extension type ConstantSourceOptions._(JSObject _) implements JSObject { + external factory ConstantSourceOptions({num offset}); + + external double get offset; + external set offset(num value); +} + +/// The `ConvolverNode` interface is an [AudioNode] that performs a Linear +/// Convolution on a given [AudioBuffer], often used to achieve a reverb effect. +/// A `ConvolverNode` always has exactly one input and one output. +/// +/// > **Note:** For more information on the theory behind Linear Convolution, +/// > see the +/// > [Convolution article on Wikipedia](https://en.wikipedia.org/wiki/Convolution). +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1
Channel count mode"clamped-max"
Channel count1, 2, or 4
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/ConvolverNode). +extension type ConvolverNode._(JSObject _) implements AudioNode, JSObject { + external factory ConvolverNode( + BaseAudioContext context, [ + ConvolverOptions options, + ]); + + /// The **`buffer`** property of the [ConvolverNode] interface represents a + /// mono, stereo, or 4-channel [AudioBuffer] containing the (possibly + /// multichannel) impulse response used by the `ConvolverNode` to create the + /// reverb effect. + /// + /// This is normally a simple recording of as-close-to-an-impulse as can be + /// found in the space you want to model. For example, if you want to model + /// the reverb in your bathroom, you might set up a microphone near the door + /// to record the sound of a balloon pop or synthesized impulse from the sink. + /// That audio recording could then be used as the buffer. + /// + /// This audio buffer must have the same sample-rate as the `AudioContext` or + /// an exception will be thrown. At the time when this attribute is set, the + /// buffer and the state of the attribute will be used to configure the + /// `ConvolverNode` with this impulse response having the given normalization. + /// The initial value of this attribute is `null`. + external AudioBuffer? get buffer; + external set buffer(AudioBuffer? value); + + /// The `normalize` property of the [ConvolverNode] interface + /// is a boolean that controls whether the impulse response from the buffer + /// will be + /// scaled by an equal-power normalization when the `buffer` attribute is set, + /// or not. + /// + /// Its default value is `true` in order to achieve a more uniform output + /// level from the convolver, when loaded with diverse impulse responses. If + /// normalize is + /// set to `false`, then the convolution will be rendered with no + /// pre-processing/scaling of the impulse response. Changes to this value do + /// not take + /// effect until the next time the `buffer` attribute is set. + external bool get normalize; + external set normalize(bool value); +} +extension type ConvolverOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory ConvolverOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + AudioBuffer? buffer, + bool disableNormalization, + }); + + external AudioBuffer? get buffer; + external set buffer(AudioBuffer? value); + external bool get disableNormalization; + external set disableNormalization(bool value); +} + +/// The **`DelayNode`** interface represents a +/// [delay-line](https://en.wikipedia.org/wiki/Digital_delay_line); an +/// [AudioNode] audio-processing module that causes a delay between the arrival +/// of an input data and its propagation to the output. +/// +/// A `DelayNode` always has exactly one input and one output, both with the +/// same amount of channels. +/// +/// ![The DelayNode acts as a delay-line, here with a value of +/// 1s.](webaudiodelaynode.png) +/// +/// When creating a graph that has a cycle, it is mandatory to have at least one +/// `DelayNode` in the cycle, or the nodes taking part in the cycle will be +/// muted. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1
Channel count mode"max"
Channel count2 (not used in the default count mode)
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/DelayNode). +extension type DelayNode._(JSObject _) implements AudioNode, JSObject { + external factory DelayNode( + BaseAudioContext context, [ + DelayOptions options, + ]); + + /// The `delayTime` property of the [DelayNode] interface is an + /// [a-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// [AudioParam] representing the amount of delay to apply. + /// + /// `delayTime` is expressed in seconds, its minimal value is `0`, and its + /// maximum value is defined by the `maxDelayTime` argument of the + /// [BaseAudioContext.createDelay] method that created it. + /// + /// > **Note:** Though the [AudioParam] returned is read-only, the value it + /// > represents is not. + external AudioParam get delayTime; +} +extension type DelayOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory DelayOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + num maxDelayTime, + num delayTime, + }); + + external double get maxDelayTime; + external set maxDelayTime(num value); + external double get delayTime; + external set delayTime(num value); +} + +/// The `DynamicsCompressorNode` interface provides a compression effect, which +/// lowers the volume of the loudest parts of the signal in order to help +/// prevent clipping and distortion that can occur when multiple sounds are +/// played and multiplexed together at once. This is often used in musical +/// production and game audio. `DynamicsCompressorNode` is an [AudioNode] that +/// has exactly one input and one output. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1
Channel count mode"clamped-max"
Channel count2
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/DynamicsCompressorNode). +extension type DynamicsCompressorNode._(JSObject _) + implements AudioNode, JSObject { + external factory DynamicsCompressorNode( + BaseAudioContext context, [ + DynamicsCompressorOptions options, + ]); + + /// The `threshold` property of the [DynamicsCompressorNode] interface is a + /// [k-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#k-rate) + /// [AudioParam] representing the decibel value above which the compression + /// will start taking effect. + /// + /// The `threshold` property's default value is `-24` and it can be set + /// between `-100` and `0`. + /// + /// ![The threshold attribute has no effect on signals lowers than its value, + /// but induce volume reduction on signal stronger than its + /// value.](webaudiothreshold.png) + external AudioParam get threshold; + + /// The `knee` property of the [DynamicsCompressorNode] interface is a + /// [k-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#k-rate) + /// [AudioParam] containing a decibel value representing the range above the + /// threshold where the curve smoothly transitions to the compressed portion. + /// + /// The `knee` property's default value is `30` and it can be set between `0` + /// and `40`. + /// + /// ![Describes the effect of a knee, showing two curves one for a hard knee, + /// the other for a soft knee.](webaudioknee.png) + external AudioParam get knee; + + /// The `ratio` property of the [DynamicsCompressorNode] interface Is a + /// [k-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#k-rate) + /// [AudioParam] representing the amount of change, in dB, needed in the input + /// for a 1 dB change in the output. + /// + /// The `ratio` property's default value is `12` and it can be set between `1` + /// and `20`. + /// + /// ![Describes the effect of different ratio on the output signal](webaudioratio.png) + external AudioParam get ratio; + + /// The **`reduction`** read-only property of the [DynamicsCompressorNode] + /// interface is a float representing the amount of gain reduction currently + /// applied by the compressor to the signal. + /// + /// Intended for metering purposes, it returns a value in dB, or `0` (no gain + /// reduction) if no signal is fed into the `DynamicsCompressorNode`. The + /// range of this value is between `-20` and `0` (in dB). + external double get reduction; + + /// The `attack` property of the [DynamicsCompressorNode] interface is a + /// [k-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#k-rate) + /// [AudioParam] representing the amount of time, in seconds, required to + /// reduce the gain by 10 dB. It defines how quickly the signal is adapted + /// when its volume is increased. + /// + /// The `attack` property's default value is `0.003` and it can be set between + /// `0` and `1`. + external AudioParam get attack; + + /// The `release` property of the [DynamicsCompressorNode] interface Is a + /// [k-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#k-rate) + /// [AudioParam] representing the amount of time, in seconds, required to + /// increase the gain by 10 dB. It defines how quick the signal is adapted + /// when its volume is reduced. + /// + /// The `release` property's default value is `0.25` and it can be set between + /// `0` and `1`. + external AudioParam get release; +} +extension type DynamicsCompressorOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory DynamicsCompressorOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + num attack, + num knee, + num ratio, + num release, + num threshold, + }); + + external double get attack; + external set attack(num value); + external double get knee; + external set knee(num value); + external double get ratio; + external set ratio(num value); + external double get release; + external set release(num value); + external double get threshold; + external set threshold(num value); +} + +/// The `GainNode` interface represents a change in volume. It is an [AudioNode] +/// audio-processing module that causes a given gain to be applied to the input +/// data before its propagation to the output. A `GainNode` always has exactly +/// one input and one output, both with the same number of channels. +/// +/// The gain is a unitless value, changing with time, that is multiplied to each +/// corresponding sample of all input channels. If modified, the new gain is +/// instantly applied, causing unaesthetic 'clicks' in the resulting audio. To +/// prevent this from happening, never change the value directly but use the +/// exponential interpolation methods on the [AudioParam] interface. +/// +/// ![The GainNode is increasing the gain of the output.](webaudiogainnode.png) +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1
Channel count mode"max"
Channel count2 (not used in the default count mode)
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/GainNode). +extension type GainNode._(JSObject _) implements AudioNode, JSObject { + external factory GainNode( + BaseAudioContext context, [ + GainOptions options, + ]); + + /// The `gain` property of the [GainNode] interface is an + /// [a-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// [AudioParam] representing the amount of gain to apply. + external AudioParam get gain; +} +extension type GainOptions._(JSObject _) implements AudioNodeOptions, JSObject { + external factory GainOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + num gain, + }); + + external double get gain; + external set gain(num value); +} + +/// The **`IIRFilterNode`** interface of the +/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) +/// is a [AudioNode] processor which implements a general +/// **[infinite impulse response](https://en.wikipedia.org/wiki/Infinite_impulse_response)** +/// (IIR) filter; this type of filter can be used to implement tone control +/// devices and graphic equalizers as well. It lets the parameters of the filter +/// response be specified, so that it can be tuned as needed. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1
Channel count mode"max"
Channel countSame as on the input
Channel interpretation"speakers"
+/// +/// Typically, it's best to use the [BiquadFilterNode] interface to implement +/// higher-order filters. There are several reasons why: +/// +/// - Biquad filters are typically less sensitive to numeric quirks. +/// - The filter parameters of biquad filters can be automated. +/// - All even-ordered IIR filters can be created using [BiquadFilterNode]. +/// +/// However, if you need to create an odd-ordered IIR filter, you'll need to use +/// `IIRFilterNode`. You may also find this interface useful if you don't need +/// automation, or for other reasons. +/// +/// > **Note:** Once the node has been created, you can't change its +/// > coefficients. +/// +/// `IIRFilterNode`s have a tail-time reference; they continue to output +/// non-silent audio with zero input. As an IIR filter, the non-zero input +/// continues forever, but this can be limited after some finite time in +/// practice, when the output has approached zero closely enough. The actual +/// time that takes depends on the filter coefficients provided. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/IIRFilterNode). +extension type IIRFilterNode._(JSObject _) implements AudioNode, JSObject { + external factory IIRFilterNode( + BaseAudioContext context, + IIRFilterOptions options, + ); + + /// The `getFrequencyResponse()` method of the [IIRFilterNode] + /// interface takes the current filtering algorithm's settings and calculates + /// the + /// frequency response for frequencies specified in a specified array of + /// frequencies. + /// + /// The two output arrays, `magResponseOutput` and + /// `phaseResponseOutput`, must be created before calling this method; they + /// must be the same size as the array of input frequency values + /// (`frequencyArray`). + external void getFrequencyResponse( + JSFloat32Array frequencyHz, + JSFloat32Array magResponse, + JSFloat32Array phaseResponse, + ); +} +extension type IIRFilterOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory IIRFilterOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + required JSArray feedforward, + required JSArray feedback, + }); + + external JSArray get feedforward; + external set feedforward(JSArray value); + external JSArray get feedback; + external set feedback(JSArray value); +} + +/// The `MediaElementAudioSourceNode` interface represents an audio source +/// consisting of an HTML `audio` or `video` element. It is an [AudioNode] that +/// acts as an audio source. +/// +/// A `MediaElementAudioSourceNode` has no inputs and exactly one output, and is +/// created using the [AudioContext.createMediaElementSource] method. The number +/// of channels in the output equals the number of channels of the audio +/// referenced by the [HTMLMediaElement] used in the creation of the node, or is +/// 1 if the [HTMLMediaElement] has no audio. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs0
Number of outputs1
Channel count +/// 2 (but note that [AudioNode.channelCount] is only used for up-mixing and +/// down-mixing [AudioNode] inputs, and [MediaElementAudioSourceNode] doesn't +/// have any input) +///
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/MediaElementAudioSourceNode). +extension type MediaElementAudioSourceNode._(JSObject _) + implements AudioNode, JSObject { + external factory MediaElementAudioSourceNode( + AudioContext context, + MediaElementAudioSourceOptions options, + ); + + /// The [MediaElementAudioSourceNode] interface's + /// read-only **`mediaElement`** property indicates the + /// [HTMLMediaElement] that contains the audio track from which the node is + /// receiving audio. + /// + /// This stream was specified when the node was first created, + /// either using the [MediaElementAudioSourceNode.MediaElementAudioSourceNode] + /// constructor or the [AudioContext.createMediaElementSource] method. + external HTMLMediaElement get mediaElement; +} +extension type MediaElementAudioSourceOptions._(JSObject _) + implements JSObject { + external factory MediaElementAudioSourceOptions( + {required HTMLMediaElement mediaElement}); + + external HTMLMediaElement get mediaElement; + external set mediaElement(HTMLMediaElement value); +} + +/// The `MediaStreamAudioDestinationNode` interface represents an audio +/// destination consisting of a +/// [WebRTC](https://developer.mozilla.org/en-US/docs/Web/API/WebRTC_API) +/// [MediaStream] with a single `AudioMediaStreamTrack`, which can be used in a +/// similar way to a `MediaStream` obtained from [MediaDevices.getUserMedia]. +/// +/// It is an [AudioNode] that acts as an audio destination, created using the +/// [AudioContext.createMediaStreamDestination] method. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs0
Channel count2
Channel count mode"explicit"
Channel count interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamAudioDestinationNode). +extension type MediaStreamAudioDestinationNode._(JSObject _) + implements AudioNode, JSObject { + external factory MediaStreamAudioDestinationNode( + AudioContext context, [ + AudioNodeOptions options, + ]); + + /// The `stream` property of the [AudioContext] interface represents a + /// [MediaStream] containing a single audio [MediaStreamTrack] with the same + /// number of channels as the node itself. + /// + /// You can use this property to get a stream out of the audio graph and feed + /// it into another construct, such as a + /// [Media Recorder](https://developer.mozilla.org/en-US/docs/Web/API/MediaStream_Recording_API). + external MediaStream get stream; +} + +/// The **`MediaStreamAudioSourceNode`** interface is a type of [AudioNode] +/// which operates as an audio source whose media is received from a +/// [MediaStream] obtained using the WebRTC or Media Capture and Streams APIs. +/// +/// This media could be from a microphone (through [MediaDevices.getUserMedia]) +/// or from a remote peer on a WebRTC call (using the [RTCPeerConnection]'s +/// audio tracks). +/// +/// A `MediaStreamAudioSourceNode` has no inputs and exactly one output, and is +/// created using the [AudioContext.createMediaStreamSource] method. +/// +/// The `MediaStreamAudioSourceNode` takes the audio from the _first_ +/// [MediaStreamTrack] whose [MediaStreamTrack.kind] attribute's value is +/// `audio`. See [Track ordering](#track_ordering) for more information about +/// the order of tracks. +/// +/// The number of channels output by the node matches the number of tracks found +/// in the selected audio track. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs0
Number of outputs1
Channel count +/// 2 (but note that [AudioNode.channelCount] is only used for up-mixing and +/// down-mixing [AudioNode] inputs, and [MediaStreamAudioSourceNode] doesn't +/// have any input) +///
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamAudioSourceNode). +extension type MediaStreamAudioSourceNode._(JSObject _) + implements AudioNode, JSObject { + external factory MediaStreamAudioSourceNode( + AudioContext context, + MediaStreamAudioSourceOptions options, + ); + + /// The [MediaStreamAudioSourceNode] interface's + /// read-only **`mediaStream`** property indicates the + /// [MediaStream] that contains the audio track from which the node is + /// receiving audio. + /// + /// This stream was specified when the node was first created, + /// either using the [MediaStreamAudioSourceNode.MediaStreamAudioSourceNode] + /// constructor or the [AudioContext.createMediaStreamSource] method. + external MediaStream get mediaStream; +} +extension type MediaStreamAudioSourceOptions._(JSObject _) implements JSObject { + external factory MediaStreamAudioSourceOptions( + {required MediaStream mediaStream}); + + external MediaStream get mediaStream; + external set mediaStream(MediaStream value); +} + +/// The **`MediaStreamTrackAudioSourceNode`** interface is a type of [AudioNode] +/// which represents a source of audio data taken from a specific +/// [MediaStreamTrack] obtained through the +/// [WebRTC](https://developer.mozilla.org/en-US/docs/Web/API/WebRTC_API) or +/// [Media Capture and Streams](https://developer.mozilla.org/en-US/docs/Web/API/Media_Capture_and_Streams_API) +/// APIs. +/// +/// The audio itself might be input from a microphone or other audio sampling +/// device, or might be received through a [RTCPeerConnection], among other +/// possible options. +/// +/// A `MediaStreamTrackAudioSourceNode` has no inputs and exactly one output, +/// and is created using the [AudioContext.createMediaStreamTrackSource] method. +/// This interface is similar to [MediaStreamAudioSourceNode], except it lets +/// you specifically state the track to use, rather than assuming the first +/// audio track on a stream. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs0
Number of outputs1
Channel count +/// defined by the first audio [MediaStreamTrack] +/// passed to the +/// [AudioContext.createMediaStreamTrackSource] +/// method that created it. +///
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrackAudioSourceNode). +extension type MediaStreamTrackAudioSourceNode._(JSObject _) + implements AudioNode, JSObject { + external factory MediaStreamTrackAudioSourceNode( + AudioContext context, + MediaStreamTrackAudioSourceOptions options, + ); +} +extension type MediaStreamTrackAudioSourceOptions._(JSObject _) + implements JSObject { + external factory MediaStreamTrackAudioSourceOptions( + {required MediaStreamTrack mediaStreamTrack}); + + external MediaStreamTrack get mediaStreamTrack; + external set mediaStreamTrack(MediaStreamTrack value); +} + +/// The **`OscillatorNode`** interface represents a periodic waveform, such as a +/// sine wave. It is an [AudioScheduledSourceNode] audio-processing module that +/// causes a specified frequency of a given wave to be created—in effect, a +/// constant tone. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs0
Number of outputs1
Channel count modemax
Channel count2 (not used in the default count mode)
Channel interpretationspeakers
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/OscillatorNode). +extension type OscillatorNode._(JSObject _) + implements AudioScheduledSourceNode, JSObject { + external factory OscillatorNode( + BaseAudioContext context, [ + OscillatorOptions options, + ]); + + /// The **`setPeriodicWave()`** method of the [OscillatorNode] interface is + /// used to point to a [PeriodicWave] + /// defining a periodic waveform that can be used to shape the oscillator's + /// output, when + /// [OscillatorNode.type] is `custom`. + external void setPeriodicWave(PeriodicWave periodicWave); + + /// The **`type`** property of the [OscillatorNode] interface specifies what + /// shape of [waveform](https://en.wikipedia.org/wiki/Waveform) the + /// oscillator will output. There are several common waveforms available, as + /// well as an + /// option to specify a custom waveform shape. The shape of the waveform will + /// affect the + /// tone that is produced. + external OscillatorType get type; + external set type(OscillatorType value); + + /// The **`frequency`** property of the [OscillatorNode] interface is an + /// [a-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// [AudioParam] representing the frequency of oscillation in hertz. + /// + /// > **Note:** though the `AudioParam` returned is read-only, the value it + /// > represents is not. + external AudioParam get frequency; + + /// The `detune` property of the [OscillatorNode] interface is an + /// [a-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// [AudioParam] representing detuning of oscillation in + /// [cents](https://en.wikipedia.org/wiki/Cent_%28music%29). + /// + /// > **Note:** though the `AudioParam` returned is read-only, the value it + /// > represents is not. + external AudioParam get detune; +} +extension type OscillatorOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory OscillatorOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + OscillatorType type, + num frequency, + num detune, + PeriodicWave periodicWave, + }); + + external OscillatorType get type; + external set type(OscillatorType value); + external double get frequency; + external set frequency(num value); + external double get detune; + external set detune(num value); + external PeriodicWave get periodicWave; + external set periodicWave(PeriodicWave value); +} + +/// The `PannerNode` interface defines an audio-processing object that +/// represents the location, direction, and behavior of an audio source signal +/// in a simulated physical space. This [AudioNode] uses right-hand Cartesian +/// coordinates to describe the source's _position_ as a vector and its +/// _orientation_ as a 3D directional cone. +/// +/// A `PannerNode` always has exactly one input and one output: the input can be +/// _mono_ or _stereo_ but the output is always _stereo_ (2 channels); you can't +/// have panning effects without at least two audio channels! +/// +/// ![The PannerNode defines a spatial position and direction for a given +/// signal.](webaudiopannernode.png) +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1
Channel count mode"clamped-max"
Channel count2
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/PannerNode). +extension type PannerNode._(JSObject _) implements AudioNode, JSObject { + external factory PannerNode( + BaseAudioContext context, [ + PannerOptions options, + ]); + + /// > **Note:** The suggested replacement for this deprecated method is to + /// > instead set the + /// > [`positionX`](https://developer.mozilla.org/en-US/docs/Web/API/PannerNode/positionX), + /// > [`positionY`](https://developer.mozilla.org/en-US/docs/Web/API/PannerNode/positionY), + /// > and + /// > [`positionZ`](https://developer.mozilla.org/en-US/docs/Web/API/PannerNode/positionZ) + /// > attributes directly. + /// + /// The `setPosition()` method of the [PannerNode] Interface defines the + /// position of the audio source relative to the listener (represented by an + /// [AudioListener] object stored in the [BaseAudioContext.listener] + /// attribute.) The three parameters `x`, `y` and `z` are unitless and + /// describe the source's position in 3D space using the right-hand Cartesian + /// coordinate system. + /// + /// The `setPosition()` method's default value of the position is `(0, 0, 0)`. + external void setPosition( + num x, + num y, + num z, + ); + + /// > **Note:** The suggested replacement for this deprecated method is to + /// > instead set the + /// > [`orientationX`](https://developer.mozilla.org/en-US/docs/Web/API/PannerNode/orientationX), + /// > [`orientationY`](https://developer.mozilla.org/en-US/docs/Web/API/PannerNode/orientationY), + /// > and + /// > [`orientationZ`](https://developer.mozilla.org/en-US/docs/Web/API/PannerNode/orientationZ) + /// > attributes directly. + /// + /// The `setOrientation()` method of the [PannerNode] Interface defines the + /// direction the audio source is playing in. + /// + /// This can have a big effect if the sound is very directional — controlled + /// by the three cone-related attributes [PannerNode.coneInnerAngle], + /// [PannerNode.coneOuterAngle], and [PannerNode.coneOuterGain]. In such a + /// case, a sound pointing away from the listener can be very quiet or even + /// silent. + /// + /// The three parameters `x`, `y` and `z` are unitless and describe a + /// direction vector in 3D space using the right-hand Cartesian coordinate + /// system. The default value of the direction vector is `(1, 0, 0)`. + external void setOrientation( + num x, + num y, + num z, + ); + + /// The `panningModel` property of the [PannerNode] interface is an enumerated + /// value determining which spatialization algorithm to use to position the + /// audio in 3D space. + /// + /// The possible values are: + /// + /// - `equalpower`: Represents the equal-power panning algorithm, generally + /// regarded as simple and efficient. `equalpower` is the default value. + /// - `HRTF`: Renders a stereo output of higher quality than `equalpower` — it + /// uses a convolution with measured impulse responses from human subjects. + external PanningModelType get panningModel; + external set panningModel(PanningModelType value); + + /// The **`positionX`** property of the [PannerNode] interface specifies the X + /// coordinate of the audio source's position in 3D Cartesian + /// coordinates, corresponding to the _horizontal_ axis (left-right). + /// + /// The complete vector is defined by the position of the audio source, given + /// as + /// ([PannerNode.positionX], [PannerNode.positionY], + /// [PannerNode.positionZ]), and the orientation + /// of the audio source (that is, the direction in which it's facing), given + /// as + /// ([PannerNode.orientationX], + /// [PannerNode.orientationY], + /// [PannerNode.orientationZ]). + /// + /// Depending on the directionality of the sound (as specified using the + /// attributes + /// [PannerNode.coneInnerAngle], + /// [PannerNode.coneOuterAngle], and + /// [PannerNode.coneOuterGain]), the orientation of the + /// sound may alter the perceived volume of the sound as it's being played. If + /// the sound + /// is pointing toward the listener, it will be louder than if the sound is + /// pointed away + /// from the listener. + /// + /// The [AudioParam] contained by this property is read only; however, you + /// can still change the value of the parameter by assigning a new value to + /// its + /// [AudioParam.value] property. + external AudioParam get positionX; + + /// The **`positionY`** property of the [PannerNode] interface specifies the Y + /// coordinate of the audio source's position in 3D Cartesian + /// coordinates, corresponding to the _vertical_ axis (top-bottom). The + /// complete + /// vector is defined by the position of the audio source, given as + /// ([PannerNode.positionX], [PannerNode.positionY], [PannerNode.positionZ]), + /// and the orientation + /// of the audio source (that is, the direction in which it's facing), given + /// as + /// ([PannerNode.orientationX], + /// [PannerNode.orientationY], + /// [PannerNode.orientationZ]). + /// + /// Depending on the directionality of the sound (as specified using the + /// attributes + /// [PannerNode.coneInnerAngle], + /// [PannerNode.coneOuterAngle], and + /// [PannerNode.coneOuterGain]), the orientation of the + /// sound may alter the perceived volume of the sound as it's being played. If + /// the sound + /// is pointing toward the listener, it will be louder than if the sound is + /// pointed away + /// from the listener. + /// + /// The [AudioParam] contained by this property is read only; however, you + /// can still change the value of the parameter by assigning a new value to + /// its + /// [AudioParam.value] property. + external AudioParam get positionY; + + /// The **`positionZ`** property of the [PannerNode] interface specifies the Z + /// coordinate of the audio source's position in 3D Cartesian + /// coordinates, corresponding to the _depth_ axis (behind-in front of the + /// listener). The complete vector is defined by the position of the audio + /// source, given + /// as ([PannerNode.positionX], + /// [PannerNode.positionY], + /// [PannerNode.positionZ]), + /// and the orientation of the audio source (that is, the direction in + /// which it's facing), given as ([PannerNode.orientationX], + /// [PannerNode.orientationY], + /// [PannerNode.orientationZ]). + /// + /// Depending on the directionality of the sound (as specified using the + /// attributes + /// [PannerNode.coneInnerAngle], + /// [PannerNode.coneOuterAngle], and + /// [PannerNode.coneOuterGain]), the orientation of the + /// sound may alter the perceived volume of the sound as it's being played. If + /// the sound + /// is pointing toward the listener, it will be louder than if the sound is + /// pointed away + /// from the listener. + /// + /// The [AudioParam] contained by this property is read only; however, you + /// can still change the value of the parameter by assigning a new value to + /// its + /// [AudioParam.value] property. + external AudioParam get positionZ; + + /// The **`orientationX`** property of the [PannerNode] interface indicates + /// the X (horizontal) component of the + /// direction in which the audio source is facing, in a 3D Cartesian + /// coordinate space. + /// + /// The complete vector is defined by the position of the audio source, given + /// as + /// ([PannerNode.positionX], [PannerNode.positionY], + /// [PannerNode.positionZ]), and the orientation + /// of the audio source (that is, the direction in which it's facing), given + /// as + /// ([PannerNode.orientationX], + /// [PannerNode.orientationY], + /// [PannerNode.orientationZ]). + /// + /// Depending on the directionality of the sound (as specified using the + /// attributes + /// [PannerNode.coneInnerAngle], + /// [PannerNode.coneOuterAngle], and + /// [PannerNode.coneOuterGain]), the orientation of the + /// sound may alter the perceived volume of the sound as it's being played. If + /// the sound + /// is pointing toward the listener, it will be louder than if the sound is + /// pointed away + /// from the listener. + /// + /// The [AudioParam] contained by this property is read only; however, you + /// can still change the value of the parameter by assigning a new value to + /// its + /// [AudioParam.value] property. + external AudioParam get orientationX; + + /// The **`orientationY`** property of the [PannerNode] interface + /// indicates the Y (vertical) component of the direction the audio source is + /// facing, in 3D Cartesian coordinate space. + /// + /// The complete vector is defined by the position of the audio source, given + /// as + /// ([PannerNode.positionX], [PannerNode.positionY], + /// [PannerNode.positionZ]), and the orientation + /// of the audio source (that is, the direction in which it's facing), given + /// as + /// ([PannerNode.orientationX], + /// [PannerNode.orientationY], + /// [PannerNode.orientationZ]). + /// + /// Depending on the directionality of the sound (as specified using the + /// attributes + /// [PannerNode.coneInnerAngle], + /// [PannerNode.coneOuterAngle], and + /// [PannerNode.coneOuterGain]), the orientation of the + /// sound may alter the perceived volume of the sound as it's being played. If + /// the sound + /// is pointing toward the listener, it will be louder than if the sound is + /// pointed away + /// from the listener. + /// + /// The [AudioParam] contained by this property is read only; however, you + /// can still change the value of the parameter by assigning a new value to + /// its + /// [AudioParam.value] property. + external AudioParam get orientationY; + + /// The **`orientationZ`** property of the [PannerNode] interface + /// indicates the Z (depth) component of the direction the audio source is + /// facing, in 3D Cartesian coordinate space. + /// + /// The complete vector is defined by the position of the audio source, given + /// as + /// ([PannerNode.positionX], [PannerNode.positionY], + /// [PannerNode.positionZ]), and the orientation + /// of the audio source (that is, the direction in which it's facing), given + /// as + /// ([PannerNode.orientationX], + /// [PannerNode.orientationY], + /// [PannerNode.orientationZ]). + /// + /// Depending on the directionality of the sound (as specified using the + /// attributes + /// [PannerNode.coneInnerAngle], + /// [PannerNode.coneOuterAngle], and + /// [PannerNode.coneOuterGain]), the orientation of the + /// sound may alter the perceived volume of the sound as it's being played. If + /// the sound + /// is pointing toward the listener, it will be louder than if the sound is + /// pointed away + /// from the listener. + /// + /// The [AudioParam] contained by this property is read only; however, you + /// can still change the value of the parameter by assigning a new value to + /// its + /// [AudioParam.value] property. + external AudioParam get orientationZ; + + /// The `distanceModel` property of the [PannerNode] interface is an + /// enumerated value determining which algorithm to use to reduce the volume + /// of the audio source as it moves away from the listener. + /// + /// The possible values are: + /// + /// - `linear`: A _linear distance model_ calculating the gain induced by the + /// distance according to: + /// `1 - rolloffFactor * (distance - refDistance) / (maxDistance - + /// refDistance)` + /// - `inverse`: An _inverse distance model_ calculating the gain induced by + /// the distance according to: + /// `refDistance / (refDistance + rolloffFactor * (Math.max(distance, + /// refDistance) - refDistance))` + /// - `exponential`: An _exponential distance model_ calculating the gain + /// induced by the distance according to: + /// `pow((Math.max(distance, refDistance) / refDistance, -rolloffFactor)`. + /// + /// `inverse` is the default value of `distanceModel`. + external DistanceModelType get distanceModel; + external set distanceModel(DistanceModelType value); + + /// The `refDistance` property of the [PannerNode] interface is a double value + /// representing the reference distance for reducing volume as the audio + /// source moves further from the listener – i.e. the distance at which the + /// volume reduction starts taking effect. This value is used by all distance + /// models. + /// + /// The `refDistance` property's default value is `1`. + external double get refDistance; + external set refDistance(num value); + + /// The `maxDistance` property of the [PannerNode] interface is a double value + /// representing the maximum distance between the audio source and the + /// listener, after which the volume is not reduced any further. This value is + /// used only by the `linear` distance model. + /// + /// The `maxDistance` property's default value is `10000`. + external double get maxDistance; + external set maxDistance(num value); + + /// The `rolloffFactor` property of the [PannerNode] interface is a double + /// value describing how quickly the volume is reduced as the source moves + /// away from the listener. This value is used by all distance models. The + /// `rolloffFactor` property's default value is `1`. + external double get rolloffFactor; + external set rolloffFactor(num value); + + /// The `coneInnerAngle` property of the [PannerNode] interface is a double + /// value describing the angle, in degrees, of a cone inside of which there + /// will be no volume reduction. + /// + /// The `coneInnerAngle` property's default value is `360`, suitable for a + /// non-directional source. + external double get coneInnerAngle; + external set coneInnerAngle(num value); + + /// The `coneOuterAngle` property of the [PannerNode] interface is a double + /// value describing the angle, in degrees, of a cone outside of which the + /// volume will be reduced by a constant value, defined by the + /// [PannerNode.coneOuterGain] property. + /// + /// The `coneOuterAngle` property's default value is `0`. + external double get coneOuterAngle; + external set coneOuterAngle(num value); + + /// The `coneOuterGain` property of the [PannerNode] interface is a double + /// value, describing the amount of volume reduction outside the cone, defined + /// by the [PannerNode.coneOuterAngle] attribute. + /// + /// The `coneOuterGain` property's default value is `0`, meaning that no sound + /// can be heard outside the cone. + external double get coneOuterGain; + external set coneOuterGain(num value); +} +extension type PannerOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory PannerOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + PanningModelType panningModel, + DistanceModelType distanceModel, + num positionX, + num positionY, + num positionZ, + num orientationX, + num orientationY, + num orientationZ, + num refDistance, + num maxDistance, + num rolloffFactor, + num coneInnerAngle, + num coneOuterAngle, + num coneOuterGain, + }); + + external PanningModelType get panningModel; + external set panningModel(PanningModelType value); + external DistanceModelType get distanceModel; + external set distanceModel(DistanceModelType value); + external double get positionX; + external set positionX(num value); + external double get positionY; + external set positionY(num value); + external double get positionZ; + external set positionZ(num value); + external double get orientationX; + external set orientationX(num value); + external double get orientationY; + external set orientationY(num value); + external double get orientationZ; + external set orientationZ(num value); + external double get refDistance; + external set refDistance(num value); + external double get maxDistance; + external set maxDistance(num value); + external double get rolloffFactor; + external set rolloffFactor(num value); + external double get coneInnerAngle; + external set coneInnerAngle(num value); + external double get coneOuterAngle; + external set coneOuterAngle(num value); + external double get coneOuterGain; + external set coneOuterGain(num value); +} + +/// The **`PeriodicWave`** interface defines a periodic waveform that can be +/// used to shape the output of an [OscillatorNode]. +/// +/// `PeriodicWave` has no inputs or outputs; it is used to define custom +/// oscillators when calling [OscillatorNode.setPeriodicWave]. The +/// `PeriodicWave` itself is created/returned by +/// [BaseAudioContext.createPeriodicWave]. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/PeriodicWave). +extension type PeriodicWave._(JSObject _) implements JSObject { + external factory PeriodicWave( + BaseAudioContext context, [ + PeriodicWaveOptions options, + ]); +} +extension type PeriodicWaveConstraints._(JSObject _) implements JSObject { + external factory PeriodicWaveConstraints({bool disableNormalization}); + + external bool get disableNormalization; + external set disableNormalization(bool value); +} +extension type PeriodicWaveOptions._(JSObject _) + implements PeriodicWaveConstraints, JSObject { + external factory PeriodicWaveOptions({ + bool disableNormalization, + JSArray real, + JSArray imag, + }); + + external JSArray get real; + external set real(JSArray value); + external JSArray get imag; + external set imag(JSArray value); +} + +/// The `ScriptProcessorNode` interface allows the generation, processing, or +/// analyzing of audio using JavaScript. +/// +/// > **Note:** This feature was replaced by +/// > [AudioWorklets](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet) +/// > and the [AudioWorkletNode] interface. +/// +/// The `ScriptProcessorNode` interface is an [AudioNode] audio-processing +/// module that is linked to two buffers, one containing the input audio data, +/// one containing the processed output audio data. An event, implementing the +/// [AudioProcessingEvent] interface, is sent to the object each time the input +/// buffer contains new data, and the event handler terminates when it has +/// filled the output buffer with data. +/// +/// ![The ScriptProcessorNode stores the input in a buffer, send the +/// audioprocess event. The EventHandler takes the input buffer and fill the +/// output buffer which is sent to the output by the +/// ScriptProcessorNode.](webaudioscriptprocessingnode.png) +/// +/// The size of the input and output buffer are defined at the creation time, +/// when the [BaseAudioContext.createScriptProcessor] method is called (both are +/// defined by [BaseAudioContext.createScriptProcessor]'s `bufferSize` +/// parameter). The buffer size must be a power of 2 between `256` and `16384`, +/// that is `256`, `512`, `1024`, `2048`, `4096`, `8192` or `16384`. Small +/// numbers lower the _latency_, but large number may be necessary to avoid +/// audio breakup and glitches. +/// +/// If the buffer size is not defined, which is recommended, the browser will +/// pick one that its heuristic deems appropriate. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1
Channel count mode"max"
Channel count2 (not used in the default count mode)
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/ScriptProcessorNode). +extension type ScriptProcessorNode._(JSObject _) + implements AudioNode, JSObject { + external EventHandler get onaudioprocess; + external set onaudioprocess(EventHandler value); + + /// The `bufferSize` property of the [ScriptProcessorNode] interface returns + /// an integer representing both the input and output buffer size, in + /// sample-frames. Its value can be a power of 2 value in the range `256` – + /// `16384`. + /// + /// > **Note:** This feature was replaced by + /// > [AudioWorklets](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet) + /// > and the [AudioWorkletNode] interface. + external int get bufferSize; +} + +/// The `StereoPannerNode` interface of the +/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) +/// represents a simple stereo panner node that can be used to pan an audio +/// stream left or right. It is an [AudioNode] audio-processing module that +/// positions an incoming audio stream in a stereo image using a low-cost +/// equal-power +/// [panning algorithm](https://webaudio.github.io/web-audio-api/#panning-algorithm). +/// +/// The [StereoPannerNode.pan] property takes a unitless value between `-1` +/// (full left pan) and `1` (full right pan). This interface was introduced as a +/// much simpler way to apply a simple panning effect than having to use a full +/// [PannerNode]. +/// +/// ![The Stereo Panner Node moved the sound's position from the center of two +/// speakers to the left.](stereopannernode.png) +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1
Channel count mode"clamped-max"
Channel count2
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/StereoPannerNode). +extension type StereoPannerNode._(JSObject _) implements AudioNode, JSObject { + external factory StereoPannerNode( + BaseAudioContext context, [ + StereoPannerOptions options, + ]); + + /// The `pan` property of the [StereoPannerNode] interface is an + /// [a-rate](https://developer.mozilla.org/en-US/docs/Web/API/AudioParam#a-rate) + /// [AudioParam] representing the amount of panning to apply. The value can + /// range between `-1` (full left pan) and `1` (full right pan). + external AudioParam get pan; +} +extension type StereoPannerOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory StereoPannerOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + num pan, + }); + + external double get pan; + external set pan(num value); +} + +/// The **`WaveShaperNode`** interface represents a non-linear distorter. +/// +/// It is an [AudioNode] that uses a curve to apply a wave shaping distortion to +/// the signal. Beside obvious distortion effects, it is often used to add a +/// warm feeling to the signal. +/// +/// A `WaveShaperNode` always has exactly one input and one output. +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +/// +///
Number of inputs1
Number of outputs1
Channel count mode"max"
Channel count2 (not used in the default count mode)
Channel interpretation"speakers"
+/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/WaveShaperNode). +extension type WaveShaperNode._(JSObject _) implements AudioNode, JSObject { + external factory WaveShaperNode( + BaseAudioContext context, [ + WaveShaperOptions options, + ]); + + /// The `curve` property of the [WaveShaperNode] interface is a `Float32Array` + /// of numbers describing the distortion to apply. + /// + /// The mid-element of the array is applied to any signal value of `0`, the + /// first one to signal values of `-1`, and the last to signal values of `1`; + /// values lower than `-1` or greater than `1` are treated like `-1` or `1` + /// respectively. + /// + /// If necessary, intermediate values of the distortion curve are linearly + /// interpolated. + /// + /// > **Note:** The array can be a `null` value: in that case, no distortion + /// > is applied to the input signal. + external JSFloat32Array? get curve; + external set curve(JSFloat32Array? value); + + /// The `oversample` property of the [WaveShaperNode] interface is an + /// enumerated value indicating if oversampling must be used. Oversampling is + /// a technique for creating more samples (up-sampling) before applying a + /// distortion effect to the audio signal. + /// + /// Once applied, the number of samples is reduced to its initial numbers. + /// This leads to better results by avoiding some aliasing, but comes at the + /// expense of a lower precision shaping curve. + /// + /// The possible `oversample` values are: + /// + /// | Value | Effect | + /// | -------- | ---------------------------------------------------------------------- | + /// | `'none'` | Do not perform any oversampling. | + /// | `'2x'` | Double the amount of samples before applying the shaping curve. | + /// | `'4x'` | Multiply by 4 the amount of samples before applying the shaping curve. | + external OverSampleType get oversample; + external set oversample(OverSampleType value); +} +extension type WaveShaperOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory WaveShaperOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + JSArray curve, + OverSampleType oversample, + }); + + external JSArray get curve; + external set curve(JSArray value); + external OverSampleType get oversample; + external set oversample(OverSampleType value); +} + +/// The **`AudioWorklet`** interface of the +/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) +/// is used to supply custom audio processing scripts that execute in a separate +/// thread to provide very low latency audio processing. +/// +/// The worklet's code is run in the [AudioWorkletGlobalScope] global execution +/// context, using a separate Web Audio thread which is shared by the worklet +/// and other audio nodes. +/// +/// Access the audio context's instance of `AudioWorklet` through the +/// [BaseAudioContext.audioWorklet] property. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet). +extension type AudioWorklet._(JSObject _) implements Worklet, JSObject {} + +/// The **`AudioWorkletGlobalScope`** interface of the +/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) +/// represents a global execution context for user-supplied code, which defines +/// custom [AudioWorkletProcessor]-derived classes. +/// +/// Each [BaseAudioContext] has a single [AudioWorklet] available under the +/// [BaseAudioContext.audioWorklet] property, which runs its code in a single +/// `AudioWorkletGlobalScope`. +/// +/// As the global execution context is shared across the current +/// `BaseAudioContext`, it's possible to define any other variables and perform +/// any actions allowed in worklets — apart from defining +/// `AudioWorkletProcessor` derived classes. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletGlobalScope). +extension type AudioWorkletGlobalScope._(JSObject _) + implements WorkletGlobalScope, JSObject { + /// The **`registerProcessor`** method of the + /// [AudioWorkletGlobalScope] interface registers a class constructor derived + /// from [AudioWorkletProcessor] interface under a specified _name_. + external void registerProcessor( + String name, + AudioWorkletProcessorConstructor processorCtor, + ); + + /// The read-only **`currentFrame`** property of the [AudioWorkletGlobalScope] + /// interface returns an integer that represents the ever-increasing current + /// sample-frame of the audio block being processed. It is incremented by 128 + /// (the size of a render quantum) after the processing of each audio block. + external int get currentFrame; + + /// The read-only **`currentTime`** property of the [AudioWorkletGlobalScope] + /// interface returns a double that represents the ever-increasing context + /// time of the audio block being processed. It is equal to the + /// [BaseAudioContext.currentTime] property of the [BaseAudioContext] the + /// worklet belongs to. + external double get currentTime; + + /// The read-only **`sampleRate`** property of the [AudioWorkletGlobalScope] + /// interface returns a float that represents the sample rate of the + /// associated [BaseAudioContext] the worklet belongs to. + external double get sampleRate; +} + +/// The **`AudioParamMap`** interface of the +/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) +/// represents an iterable and read-only set of multiple audio parameters. +/// +/// An `AudioParamMap` instance is a read-only +/// [`Map`-like object](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Map#map-like_browser_apis), +/// in which each key is the name string for a parameter, and the corresponding +/// value is an [AudioParam] containing the value of that parameter. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioParamMap). +extension type AudioParamMap._(JSObject _) implements JSObject {} + +/// > **Note:** Although the interface is available outside +/// > [secure contexts](https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts), +/// > the [BaseAudioContext.audioWorklet] property is not, thus custom +/// > [AudioWorkletProcessor]s cannot be defined outside them. +/// +/// The **`AudioWorkletNode`** interface of the +/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) +/// represents a base class for a user-defined [AudioNode], which can be +/// connected to an audio routing graph along with other nodes. It has an +/// associated [AudioWorkletProcessor], which does the actual audio processing +/// in a Web Audio rendering thread. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletNode). +extension type AudioWorkletNode._(JSObject _) implements AudioNode, JSObject { + external factory AudioWorkletNode( + BaseAudioContext context, + String name, [ + AudioWorkletNodeOptions options, + ]); + + /// The read-only **`parameters`** property of the + /// [AudioWorkletNode] interface returns the associated + /// [AudioParamMap] — that is, a `Map`-like collection of + /// [AudioParam] objects. They are instantiated during creation of the + /// underlying [AudioWorkletProcessor] according to its + /// [AudioWorkletProcessor.parameterDescriptors] static + /// getter. + external AudioParamMap get parameters; + + /// The read-only **`port`** property of the + /// [AudioWorkletNode] interface returns the associated + /// [MessagePort]. It can be used to communicate between the node and its + /// associated [AudioWorkletProcessor]. + /// + /// > **Note:** The port at the other end of the channel is + /// > available under the [AudioWorkletProcessor.port] property of the + /// > processor. + external MessagePort get port; + external EventHandler get onprocessorerror; + external set onprocessorerror(EventHandler value); +} +extension type AudioWorkletNodeOptions._(JSObject _) + implements AudioNodeOptions, JSObject { + external factory AudioWorkletNodeOptions({ + int channelCount, + ChannelCountMode channelCountMode, + ChannelInterpretation channelInterpretation, + int numberOfInputs, + int numberOfOutputs, + JSArray outputChannelCount, + JSObject parameterData, + JSObject processorOptions, + }); + + external int get numberOfInputs; + external set numberOfInputs(int value); + external int get numberOfOutputs; + external set numberOfOutputs(int value); + external JSArray get outputChannelCount; + external set outputChannelCount(JSArray value); + external JSObject get parameterData; + external set parameterData(JSObject value); + external JSObject get processorOptions; + external set processorOptions(JSObject value); +} + +/// The **`AudioWorkletProcessor`** interface of the +/// [Web Audio API](https://developer.mozilla.org/en-US/docs/Web/API/Web_Audio_API) +/// represents an audio processing code behind a custom [AudioWorkletNode]. It +/// lives in the [AudioWorkletGlobalScope] and runs on the Web Audio rendering +/// thread. In turn, an [AudioWorkletNode] based on it runs on the main thread. +/// +/// --- +/// +/// API documentation sourced from +/// [MDN Web Docs](https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletProcessor). +extension type AudioWorkletProcessor._(JSObject _) implements JSObject { + external factory AudioWorkletProcessor(); + + /// The read-only **`port`** property of the + /// [AudioWorkletProcessor] interface returns the associated + /// [MessagePort]. It can be used to communicate between the processor and the + /// [AudioWorkletNode] to which it belongs. + /// + /// > **Note:** The port at the other end of the channel is + /// > available under the [AudioWorkletNode.port] property of the node. + external MessagePort get port; +}