소스 검색

Implement WebAudio driver (#711)

Pavel Alexandrov 5 년 전
부모
커밋
40d2f9c0e1

+ 2 - 0
hxd/impl/TypedArray.hx

@@ -5,6 +5,7 @@ package hxd.impl;
 #if haxe4
 typedef Float32Array = js.lib.Float32Array;
 typedef Uint16Array = js.lib.Uint16Array;
+typedef Int16Array = js.lib.Int16Array;
 typedef Uint8Array = js.lib.Uint8Array;
 typedef ArrayBuffer = js.lib.ArrayBuffer;
 typedef Uint32Array = js.lib.Uint32Array;
@@ -12,6 +13,7 @@ typedef ArrayBufferView = js.lib.ArrayBufferView;
 #else
 typedef Float32Array = js.html.Float32Array;
 typedef Uint16Array = js.html.Uint16Array;
+typedef Int16Array = js.html.Int16Array;
 typedef Uint8Array = js.html.Uint8Array;
 typedef ArrayBuffer = js.html.ArrayBuffer;
 typedef Uint32Array = js.html.Uint32Array;

+ 3 - 0
hxd/snd/Driver.hx

@@ -3,6 +3,9 @@ package hxd.snd;
 #if usesys
 typedef SourceHandle = haxe.AudioTypes.SourceHandle;
 typedef BufferHandle = haxe.AudioTypes.BufferHandle;
+#elseif (js && !useal)
+typedef SourceHandle = hxd.snd.webaudio.AudioTypes.SourceHandle;
+typedef BufferHandle = hxd.snd.webaudio.AudioTypes.BufferHandle;
 #else
 typedef SourceHandle = hxd.snd.openal.AudioTypes.SourceHandle;
 typedef BufferHandle = hxd.snd.openal.AudioTypes.BufferHandle;

+ 4 - 1
hxd/snd/Manager.hx

@@ -101,6 +101,8 @@ class Manager {
 		try {
 			#if usesys
 			driver = new haxe.AudioTypes.SoundDriver();
+			#elseif (js && !useal)
+			driver = new hxd.snd.webaudio.Driver();
 			#else
 			driver = new hxd.snd.openal.Driver();
 			#end
@@ -662,8 +664,9 @@ class Manager {
 	var targetChannels : Int;
 
 	function checkTargetFormat(dat : hxd.snd.Data, forceMono = false) {
+		
 		targetRate = dat.samplingRate;
-		#if (!usesys && !hlopenal)
+		#if (!usesys && !hlopenal && (!js || useal))
 		// perform resampling to nativechannel frequency
 		targetRate = hxd.snd.openal.Emulator.NATIVE_FREQ;
 		#end

+ 1 - 1
hxd/snd/Mp3Data.hx

@@ -64,7 +64,7 @@ class Mp3Data extends Data {
 
 		#elseif js
 
-		var ctx = @:privateAccess NativeChannel.getContext();
+		var ctx = hxd.snd.webaudio.Context.get();
 		if( ctx == null ) return;
 		ctx.decodeAudioData(bytes.getData(), processBuffer);
 

+ 15 - 73
hxd/snd/NativeChannel.hx

@@ -108,38 +108,9 @@ class NativeChannel {
 	var snd : flash.media.Sound;
 	var channel : flash.media.SoundChannel;
 	#elseif js
-	static var ctx : js.html.audio.AudioContext;
-	static var destination : js.html.audio.AudioNode;
-	static var masterGain : js.html.audio.GainNode;
-	static function getContext() : js.html.audio.AudioContext {
-		if( ctx == null ) {
-			try {
-				ctx = new js.html.audio.AudioContext();
-			} catch( e : Dynamic ) try {
-				#if (haxe_ver >= 4)
-				ctx = js.Syntax.code('new window.webkitAudioContext()');
-				#else
-				ctx = untyped __js__('new window.webkitAudioContext()');
-				#end
-			} catch( e : Dynamic ) {
-				ctx = null;
-			}
-			if( ctx != null ) {
-				if( ctx.state == SUSPENDED ) waitForPageInput();
-				ctx.addEventListener("statechange", function(_) if( ctx.state == SUSPENDED ) waitForPageInput());
-				masterGain = ctx.createGain();
-				masterGain.connect(ctx.destination);
-
-				destination = masterGain;
-			}
-		}
-		return ctx;
-	}
 	// Avoid excessive buffer allocation when playing many sounds.
 	// bufferSamples is constant and never change at runtime, so it's safe to use general pool.
-	static var pool : Array<js.html.audio.AudioBuffer> = new Array();
 	static var bufferPool : Array<haxe.io.Float32Array> = new Array();
-	static var gainPool : Array<js.html.audio.GainNode> = new Array();
 	
 	var front : js.html.audio.AudioBuffer;
 	var back : js.html.audio.AudioBuffer;
@@ -160,21 +131,18 @@ class NativeChannel {
 		snd.addEventListener(flash.events.SampleDataEvent.SAMPLE_DATA, onFlashSample);
 		channel = snd.play(0, 0x7FFFFFFF);
 		#elseif js
-		var ctx = getContext();
-		if( ctx == null ) return;
-		
-		if ( pool.length > 0 ) front = pool.pop();
-		else front = ctx.createBuffer(2, bufferSamples, ctx.sampleRate);
-		if ( pool.length > 0 ) back = pool.pop();
-		else back = ctx.createBuffer(2, bufferSamples, ctx.sampleRate);
+		var ctx = hxd.snd.webaudio.Context.get();
+
+		var rate = Std.int(ctx.sampleRate);
+		front = hxd.snd.webaudio.Context.getBuffer(2, bufferSamples, rate);
+		back = hxd.snd.webaudio.Context.getBuffer(2, bufferSamples, rate);
 		
 		if ( bufferPool.length > 0 ) tmpBuffer = bufferPool.pop();
 		else tmpBuffer = new haxe.io.Float32Array(bufferSamples * 2);
-		
-		if ( gainPool.length != 0 ) gain = gainPool.pop();
-		else gain = ctx.createGain();
-		gain.connect(destination);
 
+		gain = hxd.snd.webaudio.Context.getGain();
+		gain.connect(hxd.snd.webaudio.Context.destination);
+		
 		fill(front);
 		fill(back);
 		
@@ -209,30 +177,6 @@ class NativeChannel {
 
 	#if js
 
-	static var waitDiv = null;
-	static function waitForPageInput() {
-		if( waitDiv != null ) waitDiv.remove();
-		// insert invisible div on top of the page to capture events
-		// see https://developers.google.com/web/updates/2017/09/autoplay-policy-changes#webaudio
-		var div = js.Browser.document.createDivElement();
-		div.setAttribute("style","width:100%;height:100%;background:transparent;z-index:9999;position:fixed;left:0;top:0");
-		div.onclick = stopInput;
-		div.onkeydown = stopInput;
-		js.Browser.document.body.addEventListener("keydown",stopInput);
-		js.Browser.document.body.addEventListener("touchend",stopInput);
-		js.Browser.document.body.appendChild(div);
-		waitDiv = div;
-	}
-
-	static function stopInput(_) {
-		if( waitDiv == null ) return;
-		waitDiv.remove();
-		waitDiv = null;
-		js.Browser.document.body.removeEventListener("keydown",stopInput);
-		js.Browser.document.body.removeEventListener("touchend",stopInput);
-		if( ctx != null ) ctx.resume();
-	}
-	
 	function swap( event : js.html.Event ) {
 		var tmp = front;
 		front = back;
@@ -242,7 +186,7 @@ class NativeChannel {
 		current.removeEventListener("ended", swap);
 		// current.disconnect(); // Should not be required as it's a one-shot object by design.
 		current = queued;
-		var ctx = getContext();
+		var ctx = hxd.snd.webaudio.Context.get();
 		queued = ctx.createBufferSource();
 		queued.buffer = tmp;
 		queued.addEventListener("ended", swap);
@@ -264,7 +208,7 @@ class NativeChannel {
 			right[i] = tmpBuffer[r++];
 		}
 	}
-	
+
 	#end
 
 	function onSample( out : haxe.io.Float32Array ) {
@@ -278,24 +222,22 @@ class NativeChannel {
 		}
 		#elseif js
 		if ( front != null ) {
-			current.disconnect();
 			current.removeEventListener("ended", swap);
 			current.stop();
+			current.disconnect();
 			current = null;
-			
+
 			queued.removeEventListener("ended", swap);
 			queued.disconnect();
 			queued.stop();
 			queued = null;
 
-			gainPool.push(gain);
 			gain.disconnect();
+			hxd.snd.webaudio.Context.putGain(gain);
 			gain = null;
 
-			pool.push(front);
-			front = null;
-			pool.push(back);
-			back = null;
+			hxd.snd.webaudio.Context.putBuffer(front);
+			hxd.snd.webaudio.Context.putBuffer(back);
 			
 			bufferPool.push(tmpBuffer);
 			tmpBuffer = null;

+ 2 - 2
hxd/snd/openal/Emulator.hx

@@ -181,7 +181,7 @@ class Emulator {
 	static var CACHED_FREQ : Null<Int>;
 	static function get_NATIVE_FREQ() {
 		if( CACHED_FREQ == null )
-			CACHED_FREQ = #if js @:privateAccess Std.int(NativeChannel.getContext() == null ? 44100 : NativeChannel.getContext().sampleRate) #else 44100 #end;
+			CACHED_FREQ = #if js Std.int(hxd.snd.webaudio.Context.get().sampleRate) #else 44100 #end;
 		return CACHED_FREQ;
 	}
 
@@ -254,7 +254,7 @@ class Emulator {
 		#if js
 		switch (param) {
 			case GAIN:
-				@:privateAccess hxd.snd.NativeChannel.masterGain.gain.value = value;
+				hxd.snd.webaudio.Context.masterGain.gain.value = value;
 		}
 		#end
 	}

+ 159 - 0
hxd/snd/webaudio/AudioTypes.hx

@@ -0,0 +1,159 @@
+package hxd.snd.webaudio;
+#if (js && !useal)
+import js.html.audio.*;
+
+class BufferHandle {
+	public var inst : AudioBuffer;
+	public var isEnd : Bool;
+	public var samples : Int;
+	public function new() { }
+}
+
+@:allow(hxd.snd.webaudio.Driver)
+class SourceHandle {
+	public var sampleOffset   : Int;
+	public var playing        : Bool;
+
+	public var driver : Driver;
+	public var lowPass : BiquadFilterNode;
+	public var gain : GainNode;
+	public var destination : AudioNode;
+	public var buffers : Array<BufferPlayback>;
+	public var pitch : Float;
+	public var firstPlay : Bool;
+
+	public function new() {
+		buffers = [];
+		sampleOffset = 0;
+		pitch = 1;
+		firstPlay = true;
+	}
+
+	public function updateDestination() {
+		if (lowPass != null) {
+			destination = lowPass;
+			lowPass.connect(gain);
+		} else {
+			destination = gain;
+		}
+		gain.connect(driver.destination);
+		for (b in buffers) {
+			if ( b.node != null ) {
+				b.node.connect(destination);
+			}
+		}
+	}
+
+	public function applyPitch() {
+		// BUG: Because pitch is k-rate parameter, it applies it once per 128 sample block, which throws timings off and creates audio skips.
+		// Noticeable mainly with low pitch values, so it's not particularly usable to reduce pitch gradually.
+		var t = 0.;
+		for ( b in buffers ) {
+			t = b.readjust(t, this);
+		}
+	}
+}
+
+class BufferPlayback {
+
+	public var buffer : BufferHandle;
+	public var node : AudioBufferSourceNode;
+	public var offset : Float;
+	public var dirty : Bool; // Playback was started - node no longer usable.
+	public var consumed : Bool; // Node was played completely (ended event fired)
+	public var starts : Float;
+	public var ends : Float;
+
+	public var currentSample(get, never):Int;
+	
+	static inline var FADE_SAMPLES = 10; // Click prevent at the start.
+
+	public function new()
+	{
+
+	}
+
+	function get_currentSample ( ):Int {
+		if ( consumed ) return buffer.samples;
+		if ( node == null || !dirty ) return 0;
+		return Math.floor((node.context.currentTime - starts) / ((buffer.inst.duration - offset) / node.playbackRate.value) * buffer.samples);
+	}
+
+	public function set(buf : BufferHandle, grainOffset : Float) {
+		buffer = buf;
+		offset = Math.isNaN(grainOffset) ? 0 : grainOffset;
+		dirty = false;
+		consumed = false;
+		starts = 0;
+		ends = 0;
+	}
+
+	public function start( ctx : AudioContext, source : SourceHandle, time : Float) {
+		dirty = true;
+		consumed = false;
+		if (node != null) {
+			stop();
+		}
+		if ( source.firstPlay && buffer.samples > FADE_SAMPLES ) {
+			source.firstPlay = false;
+			var channels = [for (i in 0...buffer.inst.numberOfChannels) buffer.inst.getChannelData(i)];
+			var j = 0, fade = 0.;
+			while ( j < FADE_SAMPLES ) {
+				var i = 0;
+				while ( i < channels.length ) {
+					channels[i][j] *= fade;
+					i++;
+				}
+				j++;
+				fade += 1 / FADE_SAMPLES;
+				if (fade > 1) fade = 1;
+			}
+		}
+		node = ctx.createBufferSource();
+		node.buffer = buffer.inst;
+		node.addEventListener("ended", onBufferConsumed);
+		node.connect(source.destination);
+		node.playbackRate.value = source.pitch;
+		node.start(time, offset);
+		starts = time;
+		return ends = time + (buffer.inst.duration - offset) / source.pitch;
+	}
+
+	public function readjust( time : Float, source : SourceHandle ) {
+		if (consumed || node == null) return ends;
+		node.playbackRate.value = source.pitch;
+		if (dirty) {
+			var elapsed = node.context.currentTime - starts;
+			return ends = starts + elapsed + (buffer.inst.duration - offset - elapsed) / source.pitch;
+		}
+		starts = time == 0 ? node.context.currentTime : time;
+		if (source.playing)
+			node.start(starts, offset);
+		return ends = starts + (buffer.inst.duration - offset) / source.pitch;
+	}
+
+	public function stop( immediate : Bool = true ) {
+		if ( node != null ) {
+			node.removeEventListener("ended", onBufferConsumed);
+			if (immediate) node.disconnect();
+			else node.stop();
+			node = null;
+		}
+	}
+
+	function onBufferConsumed ( e : js.html.Event ) {
+		node.removeEventListener("ended", onBufferConsumed);
+		node.disconnect();
+		node = null;
+		consumed = true;
+	}
+
+	public function clear()
+	{
+		buffer = null;
+		node = null;
+	}
+
+}
+
+#end

+ 138 - 0
hxd/snd/webaudio/Context.hx

@@ -0,0 +1,138 @@
+package hxd.snd.webaudio;
+
+#if js
+import js.html.audio.AudioBuffer;
+import js.html.audio.GainNode;
+import js.html.audio.AudioNode;
+import js.html.audio.AudioContext;
+
+/**
+	Common part between webaudio and OpenAL emulator - AudioContext and masterGain.
+**/
+class Context {
+	
+	static var ctx : AudioContext;
+	static var suspended : Bool;
+	static var bufferPool : Array<BufferPool>;
+	static var gainPool : Array<GainNode>;
+	public static var destination : AudioNode;
+	public static var masterGain : GainNode;
+
+	public static function get() : AudioContext {
+		if ( ctx == null ) {
+			try {
+				ctx = new js.html.audio.AudioContext();
+			} catch( e : Dynamic ) try {
+				// Fallback to Chrome webkit prefix.
+				#if (haxe_ver >= 4)
+				ctx = js.Syntax.code('new window.webkitAudioContext()');
+				#else
+				ctx = untyped __js__('new window.webkitAudioContext()');
+				#end
+			} catch( e : Dynamic ) {
+				ctx = null;
+			}
+			if ( ctx == null ) {
+				throw "WebAudio API not available in this browser!";
+			}
+
+			// AudioContext starts in suspended mode until user input - add hooks to resume it.
+			// see https://developers.google.com/web/updates/2017/09/autoplay-policy-changes#webaudio
+			if ( ctx.state == SUSPENDED ) waitForPageInput();
+			ctx.addEventListener("statechange", function(_) if ( ctx.state == SUSPENDED ) waitForPageInput() );
+			
+			bufferPool = [];
+			gainPool = [];
+
+			masterGain = ctx.createGain();
+			masterGain.connect(ctx.destination);
+			destination = masterGain;
+		}
+		return ctx;
+	}
+	
+	public static inline function getGain():GainNode
+	{
+		return gainPool.length != 0 ? gainPool.pop() : ctx.createGain();
+	}
+
+	public static inline function putGain(gain:GainNode) {
+		gainPool.push(gain);
+		gain.gain.value = 1;
+	}
+
+	static function waitForPageInput() {
+		if ( !suspended ) {
+			
+			js.Browser.document.addEventListener("click", resumeContext);
+			js.Browser.document.addEventListener("keydown", resumeContext);
+			js.Browser.document.body.addEventListener("keydown", resumeContext);
+			js.Browser.document.body.addEventListener("touchend", resumeContext);
+
+			suspended = true;
+		}
+	}
+
+	static function resumeContext(_) {
+		if ( suspended ) {
+			if ( ctx != null ) ctx.resume();
+
+			js.Browser.document.removeEventListener("click", resumeContext);
+			js.Browser.document.removeEventListener("keydown", resumeContext);
+			js.Browser.document.body.removeEventListener("keydown", resumeContext);
+			js.Browser.document.body.removeEventListener("touchend", resumeContext);
+
+			suspended = false;
+		}
+	}
+
+	/**
+		Returns free AudioBuffer instance corresponding to sample count, amount of channels and sample-rate.
+	**/
+	public static function getBuffer( channels : Int, sampleCount : Int, rate : Int ) : AudioBuffer
+	{
+		for ( pool in bufferPool ) {
+			if ( pool.channels == channels && pool.samples == sampleCount && pool.rate == rate ) {
+				if ( pool.pool.length != 0 ) return pool.pool.pop();
+				else return ctx.createBuffer(channels, sampleCount, rate);
+			}
+		}
+		var pool = new BufferPool(channels, sampleCount, rate);
+		bufferPool.push(pool);
+		return ctx.createBuffer(channels, sampleCount, rate);
+	}
+
+	/**
+		Puts AudioBuufer back to it's pool.
+	**/
+	public static function putBuffer( buf : AudioBuffer ) {
+		var rate = Std.int(buf.sampleRate);
+		for (pool in bufferPool) {
+			if (pool.channels == buf.numberOfChannels && pool.samples == buf.length && pool.rate == rate) {
+				pool.pool.push(buf);
+				break;
+			}
+		}
+	}
+
+
+}
+
+// AudioBuffer pool to minimize allocation count.
+private class BufferPool {
+
+	public var pool : Array<AudioBuffer>;
+	public var channels : Int;
+	public var samples : Int;
+	public var rate : Int;
+
+	public function new( channels : Int, samples : Int, rate : Int ) {
+		this.pool = [];
+		this.channels = channels;
+		this.samples = samples;
+		this.rate = rate;
+	}
+
+}
+
+#end

+ 261 - 0
hxd/snd/webaudio/Driver.hx

@@ -0,0 +1,261 @@
+package hxd.snd.webaudio;
+#if (js && !useal)
+
+import hxd.snd.webaudio.AudioTypes;
+import hxd.snd.Driver.DriverFeature;
+import js.html.audio.*;
+
+class Driver implements hxd.snd.Driver {
+
+	public var ctx : AudioContext;
+	public var masterGain(get, never) : GainNode;
+	public var destination(get, set) : AudioNode;
+
+	var playbackPool : Array<BufferPlayback>;
+
+	public function new()
+	{
+		playbackPool = [];
+
+		ctx = Context.get();
+	}
+
+	/**
+		Returns free AudioBuffer instance corresponding to sample count, amount of channels and sample-rate.
+	**/
+	public inline function getBuffer(channels : Int, sampleCount : Int, rate : Int) : AudioBuffer {
+		return Context.getBuffer(channels, sampleCount, rate);
+	}
+	
+	/**
+		Puts AudioBuufer back to it's pool.
+	**/
+	public inline function putBuffer( buf : AudioBuffer ) {
+		Context.putBuffer(buf);
+	}
+
+	/**
+		Returns free Gain node
+	**/
+	public inline function getGain():GainNode
+	{
+		return Context.getGain();
+	}
+
+	public inline function putGain(gain:GainNode) {
+		Context.putGain(gain);
+	}
+
+	public function hasFeature (d : DriverFeature) : Bool {
+		switch (d) {
+			case MasterVolume: return true;
+		}
+	}
+
+	public function setMasterVolume (value : Float) : Void {
+		masterGain.gain.value = value;
+	}
+
+	public function setListenerParams (position : h3d.Vector, direction : h3d.Vector, up : h3d.Vector, ?velocity : h3d.Vector) : Void {
+		// Not supported
+	}
+
+	public function createSource () : SourceHandle {
+		var s = new SourceHandle();
+		s.driver = this;
+		s.gain = getGain();
+		s.updateDestination();
+		return s;
+	}
+
+	public function playSource (source : SourceHandle) : Void {
+		if ( !source.playing ) {
+			source.playing = true;
+			if ( source.buffers.length != 0 ) {
+				var time = ctx.currentTime;
+				for ( b in source.buffers ) {
+					if ( b.consumed ) continue;
+					time = b.start(ctx, source, time);
+				}
+			}
+		}
+	}
+
+	public function stopSource (source : SourceHandle) : Void {
+		source.playing = false;
+		source.sampleOffset = 0;
+	}
+
+	public function setSourceVolume (source : SourceHandle, value : Float) : Void {
+		source.gain.gain.value = value;
+	}
+	
+	public function destroySource (source : SourceHandle) : Void {
+		stopSource(source);
+		source.gain.disconnect();
+		source.driver = null;
+		putGain(source.gain);
+		source.gain = null;
+		for ( b in source.buffers ) {
+			b.stop();
+			b.clear();
+			playbackPool.push(b);
+		}
+		source.buffers = [];
+	}
+
+	public function createBuffer () : BufferHandle {
+		var b = new BufferHandle();
+		b.samples = 0;
+		return b;
+	}
+	
+	public function setBufferData (buffer : BufferHandle, data : haxe.io.Bytes, size : Int, format : Data.SampleFormat, channelCount : Int, samplingRate : Int) : Void {
+		var sampleCount = Std.int(size / hxd.snd.Data.formatBytes(format) / channelCount);
+		buffer.samples = sampleCount;
+		if (sampleCount == 0) return;
+
+		if ( buffer.inst == null ) {
+			buffer.inst = getBuffer(channelCount, sampleCount, samplingRate);
+		} else if ( buffer.inst.sampleRate != samplingRate || buffer.inst.numberOfChannels != channelCount || buffer.inst.length != sampleCount ) {
+			putBuffer(buffer.inst);
+			buffer.inst = getBuffer(channelCount, sampleCount, samplingRate);
+		}
+		switch (format)
+		{
+			case UI8:
+				var ui8 = new hxd.impl.TypedArray.Uint8Array(data.getData());
+				if (channelCount == 1) {
+					var chn = buffer.inst.getChannelData(0);
+					for ( i in 0...sampleCount ) {
+						chn[i] = ui8[i] / 0xff;
+					}
+				} else {
+					var left = buffer.inst.getChannelData(0);
+					var right = buffer.inst.getChannelData(1);
+					// TODO: 3+ channels
+					var r = 0;
+					for ( i in 0...sampleCount ) {
+						left[i] = ui8[r] / 0xff;
+						right[i] = ui8[r+1] / 0xff;
+						r += channelCount;
+					}
+				}
+			case I16:
+				var i16 = new hxd.impl.TypedArray.Int16Array(data.getData());
+				if (channelCount == 1) {
+					var chn = buffer.inst.getChannelData(0);
+					for ( i in 0...sampleCount ) {
+						chn[i] = i16[i] / 0x8000;
+					}
+				} else {
+					var left = buffer.inst.getChannelData(0);
+					var right = buffer.inst.getChannelData(1);
+					// TODO: 3+ channels
+					var r = 0;
+					for ( i in 0...sampleCount ) {
+						left[i] = i16[r] / 0x8000;
+						right[i] = i16[r+1] / 0x8000;
+						r += channelCount;
+					}
+				}
+			case F32:
+				var f32 = new hxd.impl.TypedArray.Float32Array(data.getData());
+				if (channelCount == 1) {
+					var chn = buffer.inst.getChannelData(0);
+					for ( i in 0...sampleCount ) {
+						chn[i] = f32[i];
+					}
+				} else {
+					var left = buffer.inst.getChannelData(0);
+					var right = buffer.inst.getChannelData(1);
+					// TODO: 3+ channels
+					var r = 0;
+					for ( i in 0...sampleCount ) {
+						left[i] = f32[r];
+						right[i] = f32[r+1];
+						r += channelCount;
+					}
+				}
+		}
+	}
+	public function destroyBuffer (buffer : BufferHandle) : Void {
+		if ( buffer.inst != null ) putBuffer(buffer.inst);
+		buffer.inst = null;
+	}
+
+	public function queueBuffer (source : SourceHandle, buffer : BufferHandle, sampleStart : Int, endOfStream : Bool) : Void {
+		var buf = playbackPool.length != 0 ? playbackPool.pop() : new BufferPlayback();
+		if (buffer.inst == null) return;
+		buf.set(buffer, (sampleStart / buffer.inst.length) * buffer.inst.duration);
+		buffer.isEnd = endOfStream;
+		source.buffers.push(buf);
+		if ( source.playing ) {
+			if ( source.buffers.length != 1 ) {
+				var t = source.buffers[source.buffers.length - 2].ends;
+				buf.start(ctx, source, (js.Syntax.code("isFinite({0})", t):Bool) ? t : ctx.currentTime);
+			} else {
+				buf.start(ctx, source, ctx.currentTime);
+			}
+		}
+	}
+	public function unqueueBuffer (source : SourceHandle, buffer : BufferHandle) : Void {
+		var i = 0;
+		while ( i < source.buffers.length ) {
+			var b = source.buffers[i];
+			if ( b.buffer == buffer ) {
+				source.buffers.splice(i, 1);
+				b.stop(!buffer.isEnd);
+				b.clear();
+				playbackPool.push(b);
+				break;
+			}
+		}
+		if (buffer.isEnd || !source.playing) source.sampleOffset = 0;
+		else source.sampleOffset += buffer.samples;
+	}
+	public function getProcessedBuffers (source : SourceHandle) : Int {
+		var cnt = 0;
+		for (b in source.buffers) if ( b.consumed ) cnt++;
+		return cnt;
+	}
+	public function getPlayedSampleCount (source : SourceHandle) : Int {
+		var consumed:Int = 0;
+		var buf : BufferPlayback = null;
+		for (b in source.buffers) {
+			if (b.consumed) {
+				consumed += b.buffer.samples;
+			} else if ( b.dirty ) {
+				buf = b;
+				break;
+			}
+		}
+		if ( buf != null ) {
+			return source.sampleOffset + consumed + buf.currentSample;
+		}
+
+		return source.sampleOffset + consumed;
+	}
+
+	public function update () : Void { }
+	public function dispose () : Void {
+		// TODO
+	}
+
+	public function getEffectDriver(type : String) : hxd.snd.Driver.EffectDriver<Dynamic> {
+		return switch(type) {
+			case "pitch"          : new PitchDriver();
+			// case "spatialization" : new SpatializationDriver(this);
+			case "lowpass"        : new LowPassDriver();
+			// case "reverb"         : new ReverbDriver(this);
+			default               : new hxd.snd.Driver.EffectDriver<Dynamic>();
+		}
+	}
+
+	inline function get_masterGain() return Context.masterGain;
+	inline function set_destination(node : AudioNode) return Context.destination = node;
+	inline function get_destination() return Context.destination;
+
+}
+
+#end

+ 48 - 0
hxd/snd/webaudio/LowPassDriver.hx

@@ -0,0 +1,48 @@
+package hxd.snd.webaudio;
+
+#if (js && !useal)
+import js.html.audio.BiquadFilterType;
+import js.html.audio.AudioContext;
+import js.html.audio.BiquadFilterNode;
+import hxd.snd.effect.LowPass;
+import hxd.snd.Driver.EffectDriver;
+import hxd.snd.webaudio.AudioTypes;
+
+class LowPassDriver extends EffectDriver<LowPass> {
+
+	var pool : Array<BiquadFilterNode>;
+
+	public function new() {
+		pool = [];
+		super();
+	}
+
+	function get( ctx : AudioContext ) {
+		if ( pool.length != 0 ) {
+			return pool.pop();
+		}
+		var node = ctx.createBiquadFilter();
+		node.type = BiquadFilterType.LOWPASS;
+		return node;
+	}
+
+	override function apply(e : LowPass, source : SourceHandle) : Void {
+		if ( source.lowPass == null ) {
+			source.lowPass = get(source.driver.ctx);
+			source.updateDestination();
+		}
+		var min = 40;
+		var max = source.driver.ctx.sampleRate / 2;
+		var octaves = js.lib.Math.log(max / min) / js.lib.Math.LN2;
+		source.lowPass.frequency.value = max * Math.pow(2, octaves * (e.gainHF - 1));
+	}
+
+	override function unbind(e : LowPass, source : SourceHandle) : Void {
+		pool.push(source.lowPass);
+		source.lowPass.disconnect();
+		source.lowPass = null;
+		if ( source.driver != null )
+			source.updateDestination();
+	}
+}
+#end

+ 22 - 0
hxd/snd/webaudio/PitchDriver.hx

@@ -0,0 +1,22 @@
+package hxd.snd.webaudio;
+
+#if (js && !useal)
+import hxd.snd.Driver;
+import hxd.snd.webaudio.AudioTypes;
+import hxd.snd.effect.Pitch;
+
+class PitchDriver extends EffectDriver<Pitch> {
+
+	override function apply(e : Pitch, source : SourceHandle) : Void {
+		if ( source.pitch != e.value ) {
+			source.pitch = e.value;
+			source.applyPitch();
+		}
+	}
+
+	override function unbind(e : Pitch, source : SourceHandle) : Void {
+		source.pitch = 1;
+		source.applyPitch();
+	}
+}
+#end

+ 6 - 1
samples/Sound.hx

@@ -27,6 +27,7 @@ class Sound extends SampleApp {
 
 		var res = if( hxd.res.Sound.supportedFormat(Mp3) || hxd.res.Sound.supportedFormat(OggVorbis) ) hxd.Res.music_loop else null;
 		var pitch = new hxd.snd.effect.Pitch();
+		var lowpass = new hxd.snd.effect.LowPass();
 		if( res != null ) {
 			trace("Playing "+res);
 			music = res.play(true);
@@ -34,6 +35,9 @@ class Sound extends SampleApp {
 			music.onEnd = function() trace("LOOP");
 			// Use effect processing on the channel
 			music.addEffect(pitch);
+			#if hlopenal
+			music.addEffect(lowpass);
+			#end
 		}
 
 		slider = new h2d.Slider(300, 10);
@@ -67,8 +71,9 @@ class Sound extends SampleApp {
 			tf.textAlign = Right;
 			f.addChild(slider);
 			f.addChild(musicPosition);
-			#if hlopenal
 			addSlider("Pitch val", function() { return pitch.value; }, function(v) { pitch.value = v; }, 0, 2);
+			#if hlopenal
+			addSlider("Lowpass gain", function() { return lowpass.gainHF; }, function(v) { lowpass.gainHF = v; }, 0, 1);
 			#end
 		}
 	}