summaryrefslogtreecommitdiff
path: root/contrib/SDL-3.2.8/src/audio/emscripten
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/SDL-3.2.8/src/audio/emscripten')
-rw-r--r--contrib/SDL-3.2.8/src/audio/emscripten/SDL_emscriptenaudio.c359
-rw-r--r--contrib/SDL-3.2.8/src/audio/emscripten/SDL_emscriptenaudio.h33
2 files changed, 392 insertions, 0 deletions
diff --git a/contrib/SDL-3.2.8/src/audio/emscripten/SDL_emscriptenaudio.c b/contrib/SDL-3.2.8/src/audio/emscripten/SDL_emscriptenaudio.c
new file mode 100644
index 0000000..55fb5b4
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/audio/emscripten/SDL_emscriptenaudio.c
@@ -0,0 +1,359 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23#ifdef SDL_AUDIO_DRIVER_EMSCRIPTEN
24
25#include "../SDL_sysaudio.h"
26#include "SDL_emscriptenaudio.h"
27
28#include <emscripten/emscripten.h>
29
30// just turn off clang-format for this whole file, this INDENT_OFF stuff on
31// each EM_ASM section is ugly.
32/* *INDENT-OFF* */ // clang-format off
33
34static Uint8 *EMSCRIPTENAUDIO_GetDeviceBuf(SDL_AudioDevice *device, int *buffer_size)
35{
36 return device->hidden->mixbuf;
37}
38
39static bool EMSCRIPTENAUDIO_PlayDevice(SDL_AudioDevice *device, const Uint8 *buffer, int buffer_size)
40{
41 const int framelen = SDL_AUDIO_FRAMESIZE(device->spec);
42 MAIN_THREAD_EM_ASM({
43 /* Convert incoming buf pointer to a HEAPF32 offset. */
44 #ifdef __wasm64__
45 var buf = $0 / 4;
46 #else
47 var buf = $0 >>> 2;
48 #endif
49
50 var SDL3 = Module['SDL3'];
51 var numChannels = SDL3.audio_playback.currentPlaybackBuffer['numberOfChannels'];
52 for (var c = 0; c < numChannels; ++c) {
53 var channelData = SDL3.audio_playback.currentPlaybackBuffer['getChannelData'](c);
54 if (channelData.length != $1) {
55 throw 'Web Audio playback buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
56 }
57
58 for (var j = 0; j < $1; ++j) {
59 channelData[j] = HEAPF32[buf + (j*numChannels + c)];
60 }
61 }
62 }, buffer, buffer_size / framelen);
63 return true;
64}
65
66
67static void EMSCRIPTENAUDIO_FlushRecording(SDL_AudioDevice *device)
68{
69 // Do nothing, the new data will just be dropped.
70}
71
72static int EMSCRIPTENAUDIO_RecordDevice(SDL_AudioDevice *device, void *buffer, int buflen)
73{
74 MAIN_THREAD_EM_ASM({
75 var SDL3 = Module['SDL3'];
76 var numChannels = SDL3.audio_recording.currentRecordingBuffer.numberOfChannels;
77 for (var c = 0; c < numChannels; ++c) {
78 var channelData = SDL3.audio_recording.currentRecordingBuffer.getChannelData(c);
79 if (channelData.length != $1) {
80 throw 'Web Audio recording buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
81 }
82
83 if (numChannels == 1) { // fastpath this a little for the common (mono) case.
84 for (var j = 0; j < $1; ++j) {
85 setValue($0 + (j * 4), channelData[j], 'float');
86 }
87 } else {
88 for (var j = 0; j < $1; ++j) {
89 setValue($0 + (((j * numChannels) + c) * 4), channelData[j], 'float');
90 }
91 }
92 }
93 }, buffer, (buflen / sizeof(float)) / device->spec.channels);
94
95 return buflen;
96}
97
98static void EMSCRIPTENAUDIO_CloseDevice(SDL_AudioDevice *device)
99{
100 if (!device->hidden) {
101 return;
102 }
103
104 MAIN_THREAD_EM_ASM({
105 var SDL3 = Module['SDL3'];
106 if ($0) {
107 if (SDL3.audio_recording.silenceTimer !== undefined) {
108 clearInterval(SDL3.audio_recording.silenceTimer);
109 }
110 if (SDL3.audio_recording.stream !== undefined) {
111 var tracks = SDL3.audio_recording.stream.getAudioTracks();
112 for (var i = 0; i < tracks.length; i++) {
113 SDL3.audio_recording.stream.removeTrack(tracks[i]);
114 }
115 }
116 if (SDL3.audio_recording.scriptProcessorNode !== undefined) {
117 SDL3.audio_recording.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {};
118 SDL3.audio_recording.scriptProcessorNode.disconnect();
119 }
120 if (SDL3.audio_recording.mediaStreamNode !== undefined) {
121 SDL3.audio_recording.mediaStreamNode.disconnect();
122 }
123 SDL3.audio_recording = undefined;
124 } else {
125 if (SDL3.audio_playback.scriptProcessorNode != undefined) {
126 SDL3.audio_playback.scriptProcessorNode.disconnect();
127 }
128 if (SDL3.audio_playback.silenceTimer !== undefined) {
129 clearInterval(SDL3.audio_playback.silenceTimer);
130 }
131 SDL3.audio_playback = undefined;
132 }
133 if ((SDL3.audioContext !== undefined) && (SDL3.audio_playback === undefined) && (SDL3.audio_recording === undefined)) {
134 SDL3.audioContext.close();
135 SDL3.audioContext = undefined;
136 }
137 }, device->recording);
138
139 SDL_free(device->hidden->mixbuf);
140 SDL_free(device->hidden);
141 device->hidden = NULL;
142
143 SDL_AudioThreadFinalize(device);
144}
145
146EM_JS_DEPS(sdlaudio, "$autoResumeAudioContext,$dynCall");
147
148static bool EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device)
149{
150 // based on parts of library_sdl.js
151
152 // create context
153 const bool result = MAIN_THREAD_EM_ASM_INT({
154 if (typeof(Module['SDL3']) === 'undefined') {
155 Module['SDL3'] = {};
156 }
157 var SDL3 = Module['SDL3'];
158 if (!$0) {
159 SDL3.audio_playback = {};
160 } else {
161 SDL3.audio_recording = {};
162 }
163
164 if (!SDL3.audioContext) {
165 if (typeof(AudioContext) !== 'undefined') {
166 SDL3.audioContext = new AudioContext();
167 } else if (typeof(webkitAudioContext) !== 'undefined') {
168 SDL3.audioContext = new webkitAudioContext();
169 }
170 if (SDL3.audioContext) {
171 if ((typeof navigator.userActivation) === 'undefined') {
172 autoResumeAudioContext(SDL3.audioContext);
173 }
174 }
175 }
176 return (SDL3.audioContext !== undefined);
177 }, device->recording);
178
179 if (!result) {
180 return SDL_SetError("Web Audio API is not available!");
181 }
182
183 device->spec.format = SDL_AUDIO_F32; // web audio only supports floats
184
185 // Initialize all variables that we clean on shutdown
186 device->hidden = (struct SDL_PrivateAudioData *)SDL_calloc(1, sizeof(*device->hidden));
187 if (!device->hidden) {
188 return false;
189 }
190
191 // limit to native freq
192 device->spec.freq = EM_ASM_INT({ return Module['SDL3'].audioContext.sampleRate; });
193 device->sample_frames = SDL_GetDefaultSampleFramesFromFreq(device->spec.freq) * 2; // double the buffer size, some browsers need more, and we'll just have to live with the latency.
194
195 SDL_UpdatedAudioDeviceFormat(device);
196
197 if (!device->recording) {
198 device->hidden->mixbuf = (Uint8 *)SDL_malloc(device->buffer_size);
199 if (!device->hidden->mixbuf) {
200 return false;
201 }
202 SDL_memset(device->hidden->mixbuf, device->silence_value, device->buffer_size);
203 }
204
205 if (device->recording) {
206 /* The idea is to take the recording media stream, hook it up to an
207 audio graph where we can pass it through a ScriptProcessorNode
208 to access the raw PCM samples and push them to the SDL app's
209 callback. From there, we "process" the audio data into silence
210 and forget about it.
211
212 This should, strictly speaking, use MediaRecorder for recording, but
213 this API is cleaner to use and better supported, and fires a
214 callback whenever there's enough data to fire down into the app.
215 The downside is that we are spending CPU time silencing a buffer
216 that the audiocontext uselessly mixes into any playback. On the
217 upside, both of those things are not only run in native code in
218 the browser, they're probably SIMD code, too. MediaRecorder
219 feels like it's a pretty inefficient tapdance in similar ways,
220 to be honest. */
221
222 MAIN_THREAD_EM_ASM({
223 var SDL3 = Module['SDL3'];
224 var have_microphone = function(stream) {
225 //console.log('SDL audio recording: we have a microphone! Replacing silence callback.');
226 if (SDL3.audio_recording.silenceTimer !== undefined) {
227 clearInterval(SDL3.audio_recording.silenceTimer);
228 SDL3.audio_recording.silenceTimer = undefined;
229 SDL3.audio_recording.silenceBuffer = undefined
230 }
231 SDL3.audio_recording.mediaStreamNode = SDL3.audioContext.createMediaStreamSource(stream);
232 SDL3.audio_recording.scriptProcessorNode = SDL3.audioContext.createScriptProcessor($1, $0, 1);
233 SDL3.audio_recording.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {
234 if ((SDL3 === undefined) || (SDL3.audio_recording === undefined)) { return; }
235 audioProcessingEvent.outputBuffer.getChannelData(0).fill(0.0);
236 SDL3.audio_recording.currentRecordingBuffer = audioProcessingEvent.inputBuffer;
237 dynCall('ip', $2, [$3]);
238 };
239 SDL3.audio_recording.mediaStreamNode.connect(SDL3.audio_recording.scriptProcessorNode);
240 SDL3.audio_recording.scriptProcessorNode.connect(SDL3.audioContext.destination);
241 SDL3.audio_recording.stream = stream;
242 };
243
244 var no_microphone = function(error) {
245 //console.log('SDL audio recording: we DO NOT have a microphone! (' + error.name + ')...leaving silence callback running.');
246 };
247
248 // we write silence to the audio callback until the microphone is available (user approves use, etc).
249 SDL3.audio_recording.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate);
250 SDL3.audio_recording.silenceBuffer.getChannelData(0).fill(0.0);
251 var silence_callback = function() {
252 SDL3.audio_recording.currentRecordingBuffer = SDL3.audio_recording.silenceBuffer;
253 dynCall('ip', $2, [$3]);
254 };
255
256 SDL3.audio_recording.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000);
257
258 if ((navigator.mediaDevices !== undefined) && (navigator.mediaDevices.getUserMedia !== undefined)) {
259 navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(have_microphone).catch(no_microphone);
260 } else if (navigator.webkitGetUserMedia !== undefined) {
261 navigator.webkitGetUserMedia({ audio: true, video: false }, have_microphone, no_microphone);
262 }
263 }, device->spec.channels, device->sample_frames, SDL_RecordingAudioThreadIterate, device);
264 } else {
265 // setup a ScriptProcessorNode
266 MAIN_THREAD_EM_ASM({
267 var SDL3 = Module['SDL3'];
268 SDL3.audio_playback.scriptProcessorNode = SDL3.audioContext['createScriptProcessor']($1, 0, $0);
269 SDL3.audio_playback.scriptProcessorNode['onaudioprocess'] = function (e) {
270 if ((SDL3 === undefined) || (SDL3.audio_playback === undefined)) { return; }
271 // if we're actually running the node, we don't need the fake callback anymore, so kill it.
272 if (SDL3.audio_playback.silenceTimer !== undefined) {
273 clearInterval(SDL3.audio_playback.silenceTimer);
274 SDL3.audio_playback.silenceTimer = undefined;
275 SDL3.audio_playback.silenceBuffer = undefined;
276 }
277 SDL3.audio_playback.currentPlaybackBuffer = e['outputBuffer'];
278 dynCall('ip', $2, [$3]);
279 };
280
281 SDL3.audio_playback.scriptProcessorNode['connect'](SDL3.audioContext['destination']);
282
283 if (SDL3.audioContext.state === 'suspended') { // uhoh, autoplay is blocked.
284 SDL3.audio_playback.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate);
285 SDL3.audio_playback.silenceBuffer.getChannelData(0).fill(0.0);
286 var silence_callback = function() {
287 if ((typeof navigator.userActivation) !== 'undefined') {
288 if (navigator.userActivation.hasBeenActive) {
289 SDL3.audioContext.resume();
290 }
291 }
292
293 // the buffer that gets filled here just gets ignored, so the app can make progress
294 // and/or avoid flooding audio queues until we can actually play audio.
295 SDL3.audio_playback.currentPlaybackBuffer = SDL3.audio_playback.silenceBuffer;
296 dynCall('ip', $2, [$3]);
297 SDL3.audio_playback.currentPlaybackBuffer = undefined;
298 };
299
300 SDL3.audio_playback.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000);
301 }
302 }, device->spec.channels, device->sample_frames, SDL_PlaybackAudioThreadIterate, device);
303 }
304
305 return true;
306}
307
308static bool EMSCRIPTENAUDIO_Init(SDL_AudioDriverImpl *impl)
309{
310 bool available, recording_available;
311
312 impl->OpenDevice = EMSCRIPTENAUDIO_OpenDevice;
313 impl->CloseDevice = EMSCRIPTENAUDIO_CloseDevice;
314 impl->GetDeviceBuf = EMSCRIPTENAUDIO_GetDeviceBuf;
315 impl->PlayDevice = EMSCRIPTENAUDIO_PlayDevice;
316 impl->FlushRecording = EMSCRIPTENAUDIO_FlushRecording;
317 impl->RecordDevice = EMSCRIPTENAUDIO_RecordDevice;
318
319 impl->OnlyHasDefaultPlaybackDevice = true;
320
321 // technically, this is just runs in idle time in the main thread, but it's close enough to a "thread" for our purposes.
322 impl->ProvidesOwnCallbackThread = true;
323
324 // check availability
325 available = MAIN_THREAD_EM_ASM_INT({
326 if (typeof(AudioContext) !== 'undefined') {
327 return true;
328 } else if (typeof(webkitAudioContext) !== 'undefined') {
329 return true;
330 }
331 return false;
332 });
333
334 if (!available) {
335 SDL_SetError("No audio context available");
336 }
337
338 recording_available = available && MAIN_THREAD_EM_ASM_INT({
339 if ((typeof(navigator.mediaDevices) !== 'undefined') && (typeof(navigator.mediaDevices.getUserMedia) !== 'undefined')) {
340 return true;
341 } else if (typeof(navigator.webkitGetUserMedia) !== 'undefined') {
342 return true;
343 }
344 return false;
345 });
346
347 impl->HasRecordingSupport = recording_available;
348 impl->OnlyHasDefaultRecordingDevice = recording_available;
349
350 return available;
351}
352
353AudioBootStrap EMSCRIPTENAUDIO_bootstrap = {
354 "emscripten", "SDL emscripten audio driver", EMSCRIPTENAUDIO_Init, false, false
355};
356
357/* *INDENT-ON* */ // clang-format on
358
359#endif // SDL_AUDIO_DRIVER_EMSCRIPTEN
diff --git a/contrib/SDL-3.2.8/src/audio/emscripten/SDL_emscriptenaudio.h b/contrib/SDL-3.2.8/src/audio/emscripten/SDL_emscriptenaudio.h
new file mode 100644
index 0000000..aaf761b
--- /dev/null
+++ b/contrib/SDL-3.2.8/src/audio/emscripten/SDL_emscriptenaudio.h
@@ -0,0 +1,33 @@
1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21#include "SDL_internal.h"
22
23#ifndef SDL_emscriptenaudio_h_
24#define SDL_emscriptenaudio_h_
25
26#include "../SDL_sysaudio.h"
27
28struct SDL_PrivateAudioData
29{
30 Uint8 *mixbuf;
31};
32
33#endif // SDL_emscriptenaudio_h_