diff --git a/src/lib/libwebaudio.js b/src/lib/libwebaudio.js index b693cc58f0fb2..51150b8d8dc1c 100644 --- a/src/lib/libwebaudio.js +++ b/src/lib/libwebaudio.js @@ -9,25 +9,45 @@ #endif var LibraryWebAudio = { - $EmAudio: {}, - $EmAudioCounter: 0, + $emAudio__deps: ['$HandleAllocator'], + $emAudio: 'new HandleAllocator();', // Call this function from JavaScript to register a Wasm-side handle to an AudioContext that // you have already created manually without calling emscripten_create_audio_context(). // Note: To let that AudioContext be garbage collected later, call the function // emscriptenDestroyAudioContext() to unbind it from Wasm. - $emscriptenRegisterAudioObject__deps: ['$EmAudio', '$EmAudioCounter'], $emscriptenRegisterAudioObject: (object) => { #if ASSERTIONS assert(object, 'Called emscriptenRegisterAudioObject() with a null object handle!'); #endif - EmAudio[++EmAudioCounter] = object; + var id = emAudio.allocate(object); #if WEBAUDIO_DEBUG - console.log(`Registered new WebAudio object ${object} with ID ${EmAudioCounter}`); + dbg(`Registered new WebAudio object ${object} with ID ${id}`); #endif - return EmAudioCounter; + return id; }, +#if ASSERTIONS || WEBAUDIO_DEBUG + $emAudioCheckHandle__internal: true, + $emAudioCheckHandle(handle, methodName, isNode = false) { +#if WEBAUDIO_DEBUG + dbg(`called ${methodName}() with ID ${handle}`); +#endif +#if ASSERTIONS + assert(emAudio.has(handle), `Called ${methodName}() on a nonexisting handle ${handle}`); + var obj = emAudio.get(handle); + if (isNode == 2) { + // Some method accept either a node or an audio context + assert(obj instanceof window.AudioNode || obj instanceof (window.AudioContext || window.webkitAudioContext), `Called ${methodName}() on a context handle ${handle} that is not an AudioNode, but of type ${typeof obj}`); + } else if (isNode) { + assert(obj instanceof window.AudioNode, `Called ${methodName}() on a context handle ${handle} that is not an AudioNode, but of type ${typeof obj}`); + } else { + assert(obj instanceof (window.AudioContext || window.webkitAudioContext), `Called ${methodName}() on a context handle ${handle} that is not an AudioContext, but of type ${typeof obj}`); + } + #endif + }, +#endif + // Call this function from JavaScript to destroy a Wasm-side handle to an AudioContext. // After calling this function, it is no longer possible to reference this AudioContext // from Wasm code - and the GC can reclaim it after all references to it are cleared. @@ -35,12 +55,12 @@ var LibraryWebAudio = { // Call this function from JavaScript to get the Web Audio object corresponding to the given // Wasm handle ID. - $emscriptenGetAudioObject: (objectHandle) => EmAudio[objectHandle], + $emscriptenGetAudioObject: (objectHandle) => emAudio.get(objectHandle), // Performs the work of getting the AudioContext's render quantum size. $emscriptenGetContextQuantumSize: (contextHandle) => { // TODO: in a future release this will be something like: - // return EmAudio[contextHandle].renderQuantumSize || 128; + // return emAudio.get(contextHandle).renderQuantumSize || 128; // It comes two caveats: it needs the hint when generating the context adding to // emscripten_create_audio_context(), and altering the quantum requires a secure // context and fallback implementing. Until then we simply use the 1.0 API value: @@ -68,7 +88,7 @@ var LibraryWebAudio = { } : undefined; #if WEBAUDIO_DEBUG - console.log(`Creating new WebAudio context with parameters:`); + dbg(`Creating new WebAudio context with parameters:`); console.dir(opts); #endif @@ -81,61 +101,49 @@ var LibraryWebAudio = { }, emscripten_resume_audio_context_async: (contextHandle, callback, userData) => { + var audio = emAudio.get(contextHandle); function cb(state) { #if WEBAUDIO_DEBUG - console.log(`emscripten_resume_audio_context_async() callback: New audio state="${EmAudio[contextHandle].state}", ID=${state}`); + dbg(`emscripten_resume_audio_context_async() callback: New audio state="${audio.state}", ID=${state}`); #endif {{{ makeDynCall('viip', 'callback') }}}(contextHandle, state, userData); } #if WEBAUDIO_DEBUG - console.log(`emscripten_resume_audio_context_async() resuming...`); + dbg('emscripten_resume_audio_context_async() resuming...'); #endif - EmAudio[contextHandle].resume().then(() => { cb(1/*running*/) }).catch(() => { cb(0/*suspended*/) }); + audio.resume().then(() => { cb(1/*running*/) }).catch(() => { cb(0/*suspended*/) }); }, emscripten_resume_audio_context_sync: (contextHandle) => { -#if ASSERTIONS - assert(EmAudio[contextHandle], `Called emscripten_resume_audio_context_sync() on a nonexisting context handle ${contextHandle}`); - assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_resume_audio_context_sync() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); -#endif -#if WEBAUDIO_DEBUG - console.log(`AudioContext.resume() on WebAudio context with ID ${contextHandle}`); +#if ASSERTIONS || WEBAUDIO_DEBUG + emAudioCheckHandle(contextHandle, 'emscripten_resume_audio_context_sync'); #endif - EmAudio[contextHandle].resume(); + emAudio.get(contextHandle).resume(); }, emscripten_audio_context_state: (contextHandle) => { -#if ASSERTIONS - assert(EmAudio[contextHandle], `Called emscripten_audio_context_state() on a nonexisting context handle ${contextHandle}`); - assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_audio_context_state() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); +#if ASSERTIONS || WEBAUDIO_DEBUG + emAudioCheckHandle(contextHandle, 'emscripten_audio_context_state'); #endif - return ['suspended', 'running', 'closed', 'interrupted'].indexOf(EmAudio[contextHandle].state); + return ['suspended', 'running', 'closed', 'interrupted'].indexOf(emAudio.get(contextHandle).state); }, emscripten_destroy_audio_context: (contextHandle) => { -#if ASSERTIONS - assert(EmAudio[contextHandle], `Called emscripten_destroy_audio_context() on an already freed context handle ${contextHandle}`); - assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_destroy_audio_context() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); -#endif -#if WEBAUDIO_DEBUG - console.log(`Destroyed WebAudio context with ID ${contextHandle}`); +#if ASSERTIONS || WEBAUDIO_DEBUG + emAudioCheckHandle(contextHandle, 'emscripten_destroy_audio_context'); #endif - EmAudio[contextHandle].suspend(); - delete EmAudio[contextHandle]; + emAudio.get(contextHandle).suspend(); + emAudio.free(contextHandle); }, emscripten_destroy_web_audio_node: (objectHandle) => { -#if ASSERTIONS - assert(EmAudio[objectHandle], `Called emscripten_destroy_web_audio_node() on a nonexisting/already freed object handle ${objectHandle}`); - assert(EmAudio[objectHandle].disconnect, `Called emscripten_destroy_web_audio_node() on a handle ${objectHandle} that is not an Web Audio Node, but of type ${typeof EmAudio[objectHandle]}`); -#endif -#if WEBAUDIO_DEBUG - console.log(`Destroyed Web Audio Node with ID ${objectHandle}`); +#if ASSERTIONS || WEBAUDIO_DEBUG + emAudioCheckHandle(objectHandle, 'emscripten_destroy_web_audio_node', true); #endif // Explicitly disconnect the node from Web Audio graph before letting it GC, // to work around browser bugs such as https://webkit.org/b/222098#c23 - EmAudio[objectHandle].disconnect(); - delete EmAudio[objectHandle]; + emAduio.get(objectHandle).disconnect(); + emAudio.free(objectHandle); }, #if AUDIO_WORKLET @@ -143,17 +151,15 @@ var LibraryWebAudio = { // etc., but the created worklet does. emscripten_start_wasm_audio_worklet_thread_async__deps: [ '$_wasmWorkersID', - '$_EmAudioDispatchProcessorCallback', + '$_emAudioDispatchProcessorCallback', '$stackAlloc', '$stackRestore', '$stackSave'], emscripten_start_wasm_audio_worklet_thread_async: (contextHandle, stackLowestAddress, stackSize, callback, userData) => { -#if ASSERTIONS - assert(contextHandle, `Called emscripten_start_wasm_audio_worklet_thread_async() with a null Web Audio Context handle!`); - assert(EmAudio[contextHandle], `Called emscripten_start_wasm_audio_worklet_thread_async() with a nonexisting/already freed Web Audio Context handle ${contextHandle}!`); - assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_start_wasm_audio_worklet_thread_async() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); +#if ASSERTIONS || WEBAUDIO_DEBUG + emAudioCheckHandle(contextHandle, 'emscripten_start_wasm_audio_worklet_thread_async'); #endif - var audioContext = EmAudio[contextHandle]; + var audioContext = emAudio.get(contextHandle); var audioWorklet = audioContext.audioWorklet; #if ASSERTIONS @@ -166,12 +172,12 @@ var LibraryWebAudio = { #endif #if WEBAUDIO_DEBUG - console.log(`emscripten_start_wasm_audio_worklet_thread_async() adding audioworklet.js...`); + dbg(`emscripten_start_wasm_audio_worklet_thread_async() adding audioworklet.js...`); #endif var audioWorkletCreationFailed = () => { #if ASSERTIONS || WEBAUDIO_DEBUG - console.error(`emscripten_start_wasm_audio_worklet_thread_async() addModule() failed!`); + dbg(`emscripten_start_wasm_audio_worklet_thread_async() addModule() failed!`); #endif {{{ makeDynCall('viip', 'callback') }}}(contextHandle, 0/*EM_FALSE*/, userData); }; @@ -190,7 +196,7 @@ var LibraryWebAudio = { audioWorklet.addModule({{{ wasmWorkerJs }}}).then(() => { #if WEBAUDIO_DEBUG - console.log(`emscripten_start_wasm_audio_worklet_thread_async() addModule() completed`); + dbg(`emscripten_start_wasm_audio_worklet_thread_async() addModule() completed`); #endif #if MIN_FIREFOX_VERSION < 138 || MIN_CHROME_VERSION != TARGET_NOT_SUPPORTED || MIN_SAFARI_VERSION != TARGET_NOT_SUPPORTED @@ -234,13 +240,13 @@ var LibraryWebAudio = { stackLowestAddress, // sb = stack base stackSize, // sz = stack size }); - audioWorklet['port'].onmessage = _EmAudioDispatchProcessorCallback; + audioWorklet['port'].onmessage = _emAudioDispatchProcessorCallback; {{{ makeDynCall('viip', 'callback') }}}(contextHandle, 1/*EM_TRUE*/, userData); }).catch(audioWorkletCreationFailed); }, - $_EmAudioDispatchProcessorCallback__deps: ['$getWasmTableEntry'], - $_EmAudioDispatchProcessorCallback: (e) => { + $_emAudioDispatchProcessorCallback__deps: ['$getWasmTableEntry'], + $_emAudioDispatchProcessorCallback: (e) => { var data = e.data; // '_wsc' is short for 'wasm call', trying to use an identifier name that // will never conflict with user code. This is used to call both the 3-param @@ -250,10 +256,8 @@ var LibraryWebAudio = { }, emscripten_create_wasm_audio_worklet_processor_async: (contextHandle, options, callback, userData) => { -#if ASSERTIONS - assert(contextHandle, `Called emscripten_create_wasm_audio_worklet_processor_async() with a null Web Audio Context handle!`); - assert(EmAudio[contextHandle], `Called emscripten_create_wasm_audio_worklet_processor_async() with a nonexisting/already freed Web Audio Context handle ${contextHandle}!`); - assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_create_wasm_audio_worklet_processor_async() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); +#if ASSERTIONS || WEBAUDIO_DEBUG + emAudioCheckHandle(contextHandle, 'emscripten_create_wasm_audio_worklet_processor_async'); #endif var processorName = UTF8ToString({{{ makeGetValue('options', C_STRUCTS.WebAudioWorkletProcessorCreateOptions.name, '*') }}}); @@ -282,7 +286,7 @@ var LibraryWebAudio = { console.log(`emscripten_create_wasm_audio_worklet_processor_async() creating a new AudioWorklet processor with name ${processorName}`); #endif - EmAudio[contextHandle].audioWorklet['port'].postMessage({ + emAudio.get(contextHandle).audioWorklet['port'].postMessage({ // Deliberately mangled and short names used here ('_wpn', the 'Worklet // Processor Name' used as a 'key' to verify the message type so as to // not get accidentally mixed with user submitted messages, the remainder @@ -299,10 +303,8 @@ var LibraryWebAudio = { emscripten_create_wasm_audio_worklet_node__deps: ['$emscriptenGetContextQuantumSize'], emscripten_create_wasm_audio_worklet_node: (contextHandle, name, options, callback, userData) => { -#if ASSERTIONS - assert(contextHandle, `Called emscripten_create_wasm_audio_worklet_node() with a null Web Audio Context handle!`); - assert(EmAudio[contextHandle], `Called emscripten_create_wasm_audio_worklet_node() with a nonexisting/already freed Web Audio Context handle ${contextHandle}!`); - assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_create_wasm_audio_worklet_node() on a context handle ${contextHandle} that is not an AudioContext, but of type ${typeof EmAudio[contextHandle]}`); +#if ASSERTIONS || WEBAUDIO_DEBUG + emAudioCheckHandle(contextHandle, 'emscripten_create_wasm_audio_worklet_node'); #endif function readChannelCountArray(heapIndex, numOutputs) { @@ -329,41 +331,37 @@ var LibraryWebAudio = { } : undefined; #if WEBAUDIO_DEBUG - console.log(`Creating AudioWorkletNode "${UTF8ToString(name)}" on context=${contextHandle} with options:`); + dbg(`Creating AudioWorkletNode "${UTF8ToString(name)}" on context=${contextHandle} with options:`); console.dir(opts); #endif - return emscriptenRegisterAudioObject(new AudioWorkletNode(EmAudio[contextHandle], UTF8ToString(name), opts)); + return emscriptenRegisterAudioObject(new AudioWorkletNode(emAudio.get(contextHandle), UTF8ToString(name), opts)); }, #endif // ~AUDIO_WORKLET emscripten_audio_context_quantum_size__deps: ['$emscriptenGetContextQuantumSize'], emscripten_audio_context_quantum_size: (contextHandle) => { -#if ASSERTIONS - assert(EmAudio[contextHandle], `Called emscripten_audio_context_quantum_size() with an invalid Web Audio Context handle ${contextHandle}`); - assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_audio_context_quantum_size() on handle ${contextHandle} that is not an AudioContext, but of type ${EmAudio[contextHandle]}`); +#if ASSERTIONS || WEBAUDIO_DEBUG + emAudioCheckHandle(contextHandle, 'emscripten_audio_context_quantum_size') #endif return emscriptenGetContextQuantumSize(contextHandle); }, emscripten_audio_context_sample_rate: (contextHandle) => { -#if ASSERTIONS - assert(EmAudio[contextHandle], `Called emscripten_audio_context_sample_rate() with an invalid Web Audio Context handle ${contextHandle}`); - assert(EmAudio[contextHandle] instanceof (window.AudioContext || window.webkitAudioContext), `Called emscripten_audio_context_sample_rate() on handle ${contextHandle} that is not an AudioContext, but of type ${EmAudio[contextHandle]}`); +#if ASSERTIONS || WEBAUDIO_DEBUG + emAudioCheckHandle(contextHandle, 'emscripten_audio_context_sample_rate'); #endif - return EmAudio[contextHandle]['sampleRate']; + return emAudio.get(contextHandle)['sampleRate']; }, emscripten_audio_node_connect: (source, destination, outputIndex, inputIndex) => { - var srcNode = EmAudio[source]; - var dstNode = EmAudio[destination]; -#if ASSERTIONS - assert(srcNode, `Called emscripten_audio_node_connect() with an invalid AudioNode handle ${source}`); - assert(srcNode instanceof window.AudioNode, `Called emscripten_audio_node_connect() on handle ${source} that is not an AudiotNode, but of type ${srcNode}`); - assert(dstNode, `Called emscripten_audio_node_connect() with an invalid AudioNode handle ${destination}!`); - assert(dstNode instanceof (window.AudioContext || window.webkitAudioContext) || dstNode instanceof window.AudioNode, `Called emscripten_audio_node_connect() on handle ${destination} that is not an AudioContext or AudioNode, but of type ${dstNode}`); +#if ASSERTIONS || WEBAUDIO_DEBUG + emAudioCheckHandle(source, 'emscripten_audio_node_connect', 1); + emAudioCheckHandle(destination, 'emscripten_audio_node_connect', 2); #endif + var srcNode = emAudio.get(source); + var dstNode = emAudio.get(destination); #if WEBAUDIO_DEBUG - console.log(`Connecting audio node ID ${source} to audio node ID ${destination} (${srcNode} to ${dstNode})`); + dbg(`Connecting audio node ID ${source} to audio node ID ${destination} (${srcNode} to ${dstNode})`); #endif srcNode.connect(dstNode.destination || dstNode, outputIndex, inputIndex); }, @@ -371,11 +369,11 @@ var LibraryWebAudio = { emscripten_current_thread_is_audio_worklet: () => ENVIRONMENT_IS_AUDIO_WORKLET, emscripten_audio_worklet_post_function_v: (audioContext, funcPtr) => { - (audioContext ? EmAudio[audioContext].audioWorklet['port'] : port).postMessage({'_wsc': funcPtr, args: [] }); // "WaSm Call" + (audioContext ? emAudio.get(audioContext).audioWorklet['port'] : port).postMessage({'_wsc': funcPtr, args: [] }); // "WaSm Call" }, $emscripten_audio_worklet_post_function_1: (audioContext, funcPtr, arg0) => { - (audioContext ? EmAudio[audioContext].audioWorklet['port'] : port).postMessage({'_wsc': funcPtr, args: [arg0] }); // "WaSm Call" + (audioContext ? emAudio.get(audioContext).audioWorklet['port'] : port).postMessage({'_wsc': funcPtr, args: [arg0] }); // "WaSm Call" }, emscripten_audio_worklet_post_function_vi__deps: ['$emscripten_audio_worklet_post_function_1'], @@ -389,7 +387,7 @@ var LibraryWebAudio = { }, $emscripten_audio_worklet_post_function_2: (audioContext, funcPtr, arg0, arg1) => { - (audioContext ? EmAudio[audioContext].audioWorklet['port'] : port).postMessage({'_wsc': funcPtr, args: [arg0, arg1] }); // "WaSm Call" + (audioContext ? emAudio.get(audioContext).audioWorklet['port'] : port).postMessage({'_wsc': funcPtr, args: [arg0, arg1] }); // "WaSm Call" }, emscripten_audio_worklet_post_function_vii__deps: ['$emscripten_audio_worklet_post_function_2'], @@ -403,7 +401,7 @@ var LibraryWebAudio = { }, $emscripten_audio_worklet_post_function_3: (audioContext, funcPtr, arg0, arg1, arg2) => { - (audioContext ? EmAudio[audioContext].audioWorklet['port'] : port).postMessage({'_wsc': funcPtr, args: [arg0, arg1, arg2] }); // "WaSm Call" + (audioContext ? emAudio.get(audioContext).audioWorklet['port'] : port).postMessage({'_wsc': funcPtr, args: [arg0, arg1, arg2] }); // "WaSm Call" }, emscripten_audio_worklet_post_function_viii__deps: ['$emscripten_audio_worklet_post_function_3'], emscripten_audio_worklet_post_function_viii: (audioContext, funcPtr, arg0, arg1, arg2) => { @@ -423,8 +421,13 @@ var LibraryWebAudio = { assert(UTF8ToString(sigPtr)[0] != 'v', 'Do NOT specify the return argument in the signature string for a call to emscripten_audio_worklet_post_function_sig(), just pass the function arguments.'); assert(varargs); #endif - (audioContext ? EmAudio[audioContext].audioWorklet['port'] : port).postMessage({'_wsc': funcPtr, args: readEmAsmArgs(sigPtr, varargs) }); + (audioContext ? emAudio.get(audioContext).audioWorklet['port'] : port).postMessage({'_wsc': funcPtr, args: readEmAsmArgs(sigPtr, varargs) }); } }; +autoAddDeps(LibraryWebAudio, '$emAudio'); +#if ASSERTIONS || WEBAUDIO_DEBUG +autoAddDeps(LibraryWebAudio, '$emAudioCheckHandle'); +#endif + addToLibrary(LibraryWebAudio); diff --git a/test/codesize/audio_worklet_wasm.expected.js b/test/codesize/audio_worklet_wasm.expected.js index 24730436b4982..a109706e6ccec 100644 --- a/test/codesize/audio_worklet_wasm.expected.js +++ b/test/codesize/audio_worklet_wasm.expected.js @@ -1,4 +1,4 @@ -var m = globalThis.Module || "undefined" != typeof Module ? Module : {}, r = !!globalThis.AudioWorkletGlobalScope, t = "em-ww" == globalThis.name || r, u, z, I, J, G, E, w, X, F, D, C, Y, A, Z; +var m = globalThis.Module || "undefined" != typeof Module ? Module : {}, r = !!globalThis.AudioWorkletGlobalScope, t = "em-ww" == globalThis.name || r, u, z, I, J, H, E, w, X, F, D, C, Y, A, Z; function v(a) { u = a; @@ -21,15 +21,15 @@ if (r) { constructor(d) { super(); d = d.processorOptions; - this.v = A.get(d.v); - this.A = d.A; + this.A = A.get(d.A); + this.B = d.B; this.u = d.u; this.s = 4 * this.u; - this.B = Array(Math.min((u.F - 16) / this.s | 0, 64)); + this.v = Array(Math.min((u.F - 16) / this.s | 0, 64)); this.K(); } K() { - for (var d = C(), g = D(this.B.length * this.s) >> 2, e = this.B.length - 1; 0 <= e; e--) this.B[e] = E.subarray(g, g += this.u); + for (var d = C(), g = D(this.v.length * this.s) >> 2, e = this.v.length - 1; 0 <= e; e--) this.v[e] = E.subarray(g, g += this.u); F(d); } static get parameterDescriptors() { @@ -39,29 +39,29 @@ if (r) { var l = d.length, p = g.length, f, q, k = 12 * (l + p), n = 0; for (f of d) n += f.length; n *= this.s; - var H = 0; - for (f of g) H += f.length; - n += H * this.s; - var N = 0; - for (f in e) ++N, k += 8, n += e[f].byteLength; + var G = 0; + for (f of g) G += f.length; + n += G * this.s; + var M = 0; + for (f in e) ++M, k += 8, n += e[f].byteLength; var U = C(), B = k + n + 15 & -16; k = D(B); n = k + (B - n); B = k; for (f of d) { - G[k >> 2] = f.length; - G[k + 4 >> 2] = this.u; - G[k + 8 >> 2] = n; + H[k >> 2] = f.length; + H[k + 4 >> 2] = this.u; + H[k + 8 >> 2] = n; k += 12; for (q of f) E.set(q, n >> 2), n += this.s; } d = k; - for (f = 0; q = e[f++]; ) G[k >> 2] = q.length, G[k + 4 >> 2] = n, k += 8, E.set(q, n >> 2), + for (f = 0; q = e[f++]; ) H[k >> 2] = q.length, H[k + 4 >> 2] = n, k += 8, E.set(q, n >> 2), n += 4 * q.length; e = k; - for (f of g) G[k >> 2] = f.length, G[k + 4 >> 2] = this.u, G[k + 8 >> 2] = n, k += 12, + for (f of g) H[k >> 2] = f.length, H[k + 4 >> 2] = this.u, H[k + 8 >> 2] = n, k += 12, n += this.s * f.length; - if (l = this.v(l, B, p, e, N, d, this.A)) for (f of g) for (q of f) q.set(this.B[--H]); + if (l = this.A(l, B, p, e, M, d, this.B)) for (f of g) for (q of f) q.set(this.v[--G]); F(U); return !!l; } @@ -82,8 +82,8 @@ if (r) { await z; b = b.data; b._boot ? v(b) : b._wpn ? (registerProcessor(b._wpn, a(b.H)), port.postMessage({ - _wsc: b.v, - C: [ b.I, 1, b.A ] + _wsc: b.A, + C: [ b.I, 1, b.B ] })) : b._wsc && A.get(b._wsc)(...b.C); }; } @@ -92,7 +92,7 @@ function x() { var a = w.buffer; I = new Uint8Array(a); J = new Int32Array(a); - G = new Uint32Array(a); + H = new Uint32Array(a); E = new Float32Array(a); } @@ -106,14 +106,34 @@ var K = [], L = a => { a = a.data; let c = a._wsc; c && A.get(c)(...a.x); -}, M = a => { +}, N = a => { K.push(a); -}, P = (a, c, b, h) => { - c = O[c]; - O[a].connect(c.destination || c, b, h); -}, O = {}, Q = 0, R = globalThis.TextDecoder && new TextDecoder, S = (a = 0) => { +}; + +function O(a) { + var c = P, b = c.v.pop() || c.s.length; + c.s[b] = a; + return b; +} + +class Q { + s=[ void 0 ]; + v=[]; + get(a) { + return this.s[a]; + } + has(a) { + return void 0 !== this.s[a]; + } +} + +var P = new Q, R = (a, c, b, h) => { + a = P.get(a); + c = P.get(c); + a.connect(c.destination || c, b, h); +}, S = globalThis.TextDecoder && new TextDecoder, T = (a = 0) => { for (var c = I, b = a, h = b + void 0; c[b] && !(b >= h); ) ++b; - if (16 < b - a && c.buffer && R) return R.decode(c.slice(a, b)); + if (16 < b - a && c.buffer && S) return S.decode(c.slice(a, b)); for (h = ""; a < b; ) { var d = c[a++]; if (d & 128) { @@ -126,46 +146,42 @@ var K = [], L = a => { } else h += String.fromCharCode(d); } return h; -}, T = a => { +}, V = a => { if (a) { - var c = G[a >> 2]; + var c = H[a >> 2]; a = { - latencyHint: (c ? S(c) : "") || void 0, - sampleRate: G[a + 4 >> 2] || void 0 + latencyHint: (c ? T(c) : "") || void 0, + sampleRate: H[a + 4 >> 2] || void 0 }; } else a = void 0; - a = new AudioContext(a); - O[++Q] = a; - return Q; -}, V = (a, c, b, h, d) => { + return O(new AudioContext(a)); +}, W = (a, c, b, h, d) => { var g = b ? J[b + 4 >> 2] : 0; if (b) { - var e = J[b >> 2], l = G[b + 8 >> 2], p = g; + var e = J[b >> 2], l = H[b + 8 >> 2], p = g; if (l) { l >>= 2; - for (var f = []; p--; ) f.push(G[l++]); + for (var f = []; p--; ) f.push(H[l++]); l = f; } else l = void 0; b = { numberOfInputs: e, numberOfOutputs: g, outputChannelCount: l, - channelCount: G[b + 12 >> 2] || void 0, + channelCount: H[b + 12 >> 2] || void 0, channelCountMode: [ , "clamped-max", "explicit" ][J[b + 16 >> 2]], channelInterpretation: [ , "discrete" ][J[b + 20 >> 2]], processorOptions: { - v: h, - A: d, + A: h, + B: d, u: 128 } }; } else b = void 0; - a = new AudioWorkletNode(O[a], c ? S(c) : "", b); - O[++Q] = a; - return Q; -}, W = (a, c, b, h) => { - var d = (d = G[c >> 2]) ? S(d) : "", g = J[c + 4 >> 2]; - c = G[c + 8 >> 2]; + return O(new AudioWorkletNode(P.get(a), c ? T(c) : "", b)); +}, aa = (a, c, b, h) => { + var d = (d = H[c >> 2]) ? T(d) : "", g = J[c + 4 >> 2]; + c = H[c + 8 >> 2]; for (var e = [], l = 0; g--; ) e.push({ name: l++, defaultValue: E[c >> 2], @@ -173,19 +189,19 @@ var K = [], L = a => { maxValue: E[c + 8 >> 2], automationRate: (J[c + 12 >> 2] ? "k" : "a") + "-rate" }), c += 16; - O[a].audioWorklet.port.postMessage({ + P.get(a).audioWorklet.port.postMessage({ _wpn: d, H: e, I: a, - v: b, - A: h + A: b, + B: h }); -}, aa = () => !1, ba = 1, ca = a => { +}, ba = () => !1, ca = 1, da = a => { a = a.data; var c = a._wsc; c && A.get(c)(...a.C); -}, da = (a, c, b, h, d) => { - var g = O[a], e = g.audioWorklet, l = () => { +}, ea = (a, c, b, h, d) => { + var g = P.get(a), e = g.audioWorklet, l = () => { A.get(h)(a, 0, d); }; if (!e) return l(); @@ -201,22 +217,22 @@ var K = [], L = a => { }); e.port.postMessage({ _boot: 1, - N: ba++, + N: ca++, G: m.wasm, L: w, J: c, F: b }); - e.port.onmessage = ca; + e.port.onmessage = da; A.get(h)(a, 1, d); })).catch(l); }; -function ea(a) { +function fa(a) { let c = document.createElement("button"); c.innerHTML = "Toggle playback"; document.body.appendChild(c); - a = O[a]; + a = P.get(a); c.onclick = () => { "running" != a.state ? a.resume() : a.suspend(); }; @@ -224,13 +240,13 @@ function ea(a) { function y() { Z = { - f: ea, - g: P, - d: T, - h: V, - e: W, - b: aa, - c: da, + f: fa, + g: R, + d: V, + h: W, + e: aa, + b: ba, + c: ea, a: w }; z = WebAssembly.instantiate(m.wasm, { @@ -243,7 +259,7 @@ function y() { C = a.n; Y = a.o; A = a.k; - t ? (Y(u.J, u.F), r || (removeEventListener("message", M), K = K.forEach(L), addEventListener("message", L))) : a.i(); + t ? (Y(u.J, u.F), r || (removeEventListener("message", N), K = K.forEach(L), addEventListener("message", L))) : a.i(); t || X(); })); } diff --git a/test/codesize/test_minimal_runtime_code_size_audio_worklet.json b/test/codesize/test_minimal_runtime_code_size_audio_worklet.json index b2092c1fd2632..e67f1cf0bad5b 100644 --- a/test/codesize/test_minimal_runtime_code_size_audio_worklet.json +++ b/test/codesize/test_minimal_runtime_code_size_audio_worklet.json @@ -1,10 +1,10 @@ { "a.html": 519, "a.html.gz": 357, - "a.js": 4235, - "a.js.gz": 2170, + "a.js": 4394, + "a.js.gz": 2224, "a.wasm": 1329, "a.wasm.gz": 895, - "total": 6083, - "total_gz": 3422 + "total": 6242, + "total_gz": 3476 } diff --git a/tools/unsafe_optimizations.mjs b/tools/unsafe_optimizations.mjs index fb10c04a73db0..383254d46d55f 100755 --- a/tools/unsafe_optimizations.mjs +++ b/tools/unsafe_optimizations.mjs @@ -207,7 +207,7 @@ function optPassMergeVarInitializationAssignments(ast) { } function runOnJsText(js, pretty = false) { - const ast = acorn.parse(js, {ecmaVersion: 2021}); + const ast = acorn.parse(js, {ecmaVersion: 'latest'}); optPassRemoveRedundantOperatorNews(ast);