yaze 0.3.2
Link to the Past ROM Editor
 
Loading...
Searching...
No Matches
wasm_audio.cc
Go to the documentation of this file.
1// clang-format off
2// wasm_audio.cc - WebAudio Backend Implementation for WASM/Emscripten
3// Implements audio output using browser's WebAudio API via Emscripten
4
5#ifdef __EMSCRIPTEN__
6
8
9#include <emscripten.h>
10#include <emscripten/html5.h>
11#include <algorithm>
12#include <cstring>
13#include <iostream>
14
15namespace yaze {
16namespace emu {
17namespace audio {
18
19// JavaScript functions for WebAudio API interaction
20// These are implemented using EM_JS to directly embed JavaScript code
21
22EM_JS(void*, wasm_audio_create_context, (int sample_rate), {
23 try {
24 // Create AudioContext with specified sample rate
25 const AudioContext = window.AudioContext || window.webkitAudioContext;
26 if (!AudioContext) {
27 console.error("WebAudio API not supported in this browser");
28 return 0;
29 }
30
31 const ctx = new AudioContext({
32 sampleRate: sample_rate,
33 latencyHint: 'interactive'
34 });
35
36 // Store context in global object for access
37 if (!window.yazeAudio) {
38 window.yazeAudio = {};
39 }
40
41 // Generate unique ID for this context
42 const contextId = Date.now();
43 window.yazeAudio[contextId] = {
44 context: ctx,
45 processor: null,
46 bufferQueue: [],
47 isPlaying: false,
48 volume: 1.0
49 };
50
51 console.log('Created WebAudio context with sample rate:', sample_rate);
52 return contextId;
53 } catch (e) {
54 console.error('Failed to create WebAudio context:', e);
55 return 0;
56 }
57});
58
59EM_JS(void*, wasm_audio_create_processor, (void* context_handle, int buffer_size, int channels), {
60 try {
61 const audio = window.yazeAudio[context_handle];
62 if (!audio || !audio.context) {
63 console.error('Invalid audio context handle');
64 return 0;
65 }
66
67 const ctx = audio.context;
68
69 // Create gain node for volume control
70 const gainNode = ctx.createGain();
71 gainNode.gain.value = audio.volume;
72 audio.gainNode = gainNode;
73
74 // Try AudioWorklet first (modern, better performance)
75 // Fall back to ScriptProcessorNode if not available
76 const tryAudioWorklet = async () => {
77 try {
78 // Check if AudioWorklet is supported
79 if (typeof AudioWorkletNode === 'undefined' || !ctx.audioWorklet) {
80 throw new Error('AudioWorklet not supported');
81 }
82
83 // Load the AudioWorklet processor module
84 await ctx.audioWorklet.addModule('core/audio_worklet_processor.js');
85
86 // Create the worklet node
87 const workletNode = new AudioWorkletNode(ctx, 'snes-audio-processor', {
88 numberOfInputs: 0,
89 numberOfOutputs: 1,
90 outputChannelCount: [channels],
91 processorOptions: {
92 bufferSize: buffer_size * 4, // Larger ring buffer
93 channels: channels
94 }
95 });
96
97 // Connect worklet -> gain -> destination
98 workletNode.connect(gainNode);
99 gainNode.connect(ctx.destination);
100
101 // Store worklet reference
102 audio.workletNode = workletNode;
103 audio.useWorklet = true;
104
105 // Handle messages from worklet
106 workletNode.port.onmessage = (event) => {
107 if (event.data.type === 'status') {
108 audio.workletStatus = event.data;
109 }
110 };
111
112 console.log('[AudioWorklet] Created SNES audio processor with buffer size:', buffer_size);
113 return true;
114 } catch (e) {
115 console.warn('[AudioWorklet] Failed to initialize, falling back to ScriptProcessorNode:', e.message);
116 return false;
117 }
118 };
119
120 // Try AudioWorklet, fall back to ScriptProcessorNode
121 tryAudioWorklet().then(success => {
122 if (!success) {
123 // Fallback: Create ScriptProcessorNode (deprecated but widely supported)
124 const processor = ctx.createScriptProcessor(buffer_size, 0, channels);
125
126 // Connect processor -> gain -> destination
127 processor.connect(gainNode);
128 gainNode.connect(ctx.destination);
129
130 // Store nodes
131 audio.processor = processor;
132 audio.useWorklet = false;
133
134 // Setup audio processing callback
135 processor.onaudioprocess = function(e) {
136 const outputBuffer = e.outputBuffer;
137 const numChannels = outputBuffer.numberOfChannels;
138 const frameCount = outputBuffer.length;
139
140 if (!audio.isPlaying || audio.bufferQueue.length === 0) {
141 // Output silence
142 for (let ch = 0; ch < numChannels; ch++) {
143 const channel = outputBuffer.getChannelData(ch);
144 channel.fill(0);
145 }
146 return;
147 }
148
149 // Process queued buffers
150 let framesWritten = 0;
151 while (framesWritten < frameCount && audio.bufferQueue.length > 0) {
152 const buffer = audio.bufferQueue[0];
153 const remainingInBuffer = buffer.length - buffer.position;
154 const framesToCopy = Math.min(frameCount - framesWritten, remainingInBuffer);
155
156 // Copy samples to output channels
157 for (let ch = 0; ch < numChannels; ch++) {
158 const outputChannel = outputBuffer.getChannelData(ch);
159 for (let i = 0; i < framesToCopy; i++) {
160 const sampleIndex = (buffer.position + i) * numChannels + ch;
161 outputChannel[framesWritten + i] = buffer.samples[sampleIndex] || 0;
162 }
163 }
164
165 buffer.position += framesToCopy;
166 framesWritten += framesToCopy;
167
168 // Remove buffer if fully consumed
169 if (buffer.position >= buffer.length) {
170 audio.bufferQueue.shift();
171 }
172 }
173
174 // Fill remaining with silence if needed
175 if (framesWritten < frameCount) {
176 for (let ch = 0; ch < numChannels; ch++) {
177 const channel = outputBuffer.getChannelData(ch);
178 for (let i = framesWritten; i < frameCount; i++) {
179 channel[i] = 0;
180 }
181 }
182 }
183 };
184
185 console.log('[ScriptProcessor] Created audio processor with buffer size:', buffer_size);
186 }
187 });
188
189 return context_handle; // Return same handle since processor is stored in audio object
190 } catch (e) {
191 console.error('Failed to create audio processor:', e);
192 return 0;
193 }
194});
195
196EM_JS(void, wasm_audio_queue_samples, (void* context_handle, float* samples, int frame_count, int channels), {
197 const audio = window.yazeAudio[context_handle];
198 if (!audio) return;
199
200 // Copy samples from WASM memory to JavaScript array
201 const totalSamples = frame_count * channels;
202 const sampleArray = new Float32Array(totalSamples);
203 for (let i = 0; i < totalSamples; i++) {
204 sampleArray[i] = HEAPF32[(samples >> 2) + i];
205 }
206
207 // Route samples to appropriate backend
208 if (audio.useWorklet && audio.workletNode) {
209 // AudioWorklet: Send samples via MessagePort (more efficient)
210 audio.workletNode.port.postMessage({
211 type: 'samples',
212 samples: sampleArray,
213 frameCount: frame_count
214 });
215 } else {
216 // ScriptProcessorNode: Add to buffer queue
217 audio.bufferQueue.push({
218 samples: sampleArray,
219 length: frame_count,
220 position: 0
221 });
222
223 // Limit queue size to prevent excessive memory usage
224 const maxQueueSize = 32;
225 while (audio.bufferQueue.length > maxQueueSize) {
226 audio.bufferQueue.shift();
227 }
228 }
229});
230
231EM_JS(void, wasm_audio_play, (void* context_handle), {
232 const audio = window.yazeAudio[context_handle];
233 if (!audio || !audio.context) return;
234
235 audio.isPlaying = true;
236
237 // Resume context if suspended (due to autoplay policy)
238 if (audio.context.state === 'suspended') {
239 audio.context.resume().then(() => {
240 console.log('Audio context resumed');
241 }).catch(e => {
242 console.error('Failed to resume audio context:', e);
243 });
244 }
245});
246
247EM_JS(void, wasm_audio_pause, (void* context_handle), {
248 const audio = window.yazeAudio[context_handle];
249 if (!audio) return;
250
251 audio.isPlaying = false;
252});
253
254EM_JS(void, wasm_audio_stop, (void* context_handle), {
255 const audio = window.yazeAudio[context_handle];
256 if (!audio) return;
257
258 audio.isPlaying = false;
259 audio.bufferQueue = [];
260});
261
262EM_JS(void, wasm_audio_clear, (void* context_handle), {
263 const audio = window.yazeAudio[context_handle];
264 if (!audio) return;
265
266 audio.bufferQueue = [];
267});
268
269EM_JS(void, wasm_audio_set_volume, (void* context_handle, float volume), {
270 const audio = window.yazeAudio[context_handle];
271 if (!audio || !audio.gainNode) return;
272
273 audio.volume = Math.max(0, Math.min(1, volume));
274 audio.gainNode.gain.value = audio.volume;
275});
276
277EM_JS(float, wasm_audio_get_volume, (void* context_handle), {
278 const audio = window.yazeAudio[context_handle];
279 if (!audio) return 1.0;
280
281 return audio.volume;
282});
283
284EM_JS(int, wasm_audio_get_queued_frames, (void* context_handle), {
285 const audio = window.yazeAudio[context_handle];
286 if (!audio) return 0;
287
288 let total = 0;
289 for (const buffer of audio.bufferQueue) {
290 total += buffer.length - buffer.position;
291 }
292 return total;
293});
294
295EM_JS(bool, wasm_audio_is_playing, (void* context_handle), {
296 const audio = window.yazeAudio[context_handle];
297 if (!audio) return false;
298
299 return audio.isPlaying;
300});
301
302EM_JS(bool, wasm_audio_is_suspended, (void* context_handle), {
303 const audio = window.yazeAudio[context_handle];
304 if (!audio || !audio.context) return true;
305
306 return audio.context.state === 'suspended';
307});
308
309EM_JS(void, wasm_audio_resume_context, (void* context_handle), {
310 const audio = window.yazeAudio[context_handle];
311 if (!audio || !audio.context) return;
312
313 if (audio.context.state === 'suspended') {
314 audio.context.resume().then(() => {
315 console.log('Audio context resumed via user interaction');
316 }).catch(e => {
317 console.error('Failed to resume audio context:', e);
318 });
319 }
320});
321
322EM_JS(void, wasm_audio_shutdown, (void* context_handle), {
323 const audio = window.yazeAudio[context_handle];
324 if (!audio) return;
325
326 // Clean up AudioWorklet if used
327 if (audio.workletNode) {
328 audio.workletNode.port.postMessage({ type: 'clear' });
329 audio.workletNode.disconnect();
330 audio.workletNode = null;
331 audio.useWorklet = false;
332 console.log('[AudioWorklet] Processor disconnected');
333 }
334
335 // Clean up ScriptProcessorNode if used
336 if (audio.processor) {
337 audio.processor.disconnect();
338 audio.processor = null;
339 console.log('[ScriptProcessor] Processor disconnected');
340 }
341
342 if (audio.gainNode) {
343 audio.gainNode.disconnect();
344 audio.gainNode = null;
345 }
346
347 if (audio.context) {
348 audio.context.close().then(() => {
349 console.log('Audio context closed');
350 }).catch(e => {
351 console.error('Failed to close audio context:', e);
352 });
353 }
354
355 delete window.yazeAudio[context_handle];
356});
357
358// C++ Implementation
359
360WasmAudioBackend::WasmAudioBackend() {
361 conversion_buffer_.reserve(kDefaultBufferSize * 2); // Stereo
362 resampling_buffer_.reserve(kDefaultBufferSize * 2);
363}
364
365WasmAudioBackend::~WasmAudioBackend() {
366 Shutdown();
367}
368
369bool WasmAudioBackend::Initialize(const AudioConfig& config) {
370 if (initialized_) {
371 return true;
372 }
373
374 config_ = config;
375
376 // Create WebAudio context
377 audio_context_ = reinterpret_cast<void*>(wasm_audio_create_context(config.sample_rate));
378 if (!audio_context_) {
379 std::cerr << "Failed to create WebAudio context" << std::endl;
380 return false;
381 }
382
383 // Create script processor for audio output
384 script_processor_ = reinterpret_cast<void*>(
385 wasm_audio_create_processor(audio_context_, config.buffer_frames, config.channels));
386 if (!script_processor_) {
387 std::cerr << "Failed to create WebAudio processor" << std::endl;
388 wasm_audio_shutdown(audio_context_);
389 audio_context_ = nullptr;
390 return false;
391 }
392
393 initialized_ = true;
394 context_suspended_ = wasm_audio_is_suspended(audio_context_);
395
396 std::cout << "WasmAudioBackend initialized - Sample rate: " << config.sample_rate
397 << " Hz, Channels: " << config.channels
398 << ", Buffer: " << config.buffer_frames << " frames" << std::endl;
399
400 return true;
401}
402
403void WasmAudioBackend::Shutdown() {
404 if (!initialized_) {
405 return;
406 }
407
408 Stop();
409
410 if (audio_context_) {
411 wasm_audio_shutdown(audio_context_);
412 audio_context_ = nullptr;
413 }
414
415 script_processor_ = nullptr;
416 initialized_ = false;
417
418 // Clear buffers
419 {
420 std::lock_guard<std::mutex> lock(queue_mutex_);
421 while (!sample_queue_.empty()) {
422 sample_queue_.pop();
423 }
424 }
425
426 queued_samples_ = 0;
427 total_samples_played_ = 0;
428}
429
430void WasmAudioBackend::Play() {
431 if (!initialized_ || !audio_context_) {
432 return;
433 }
434
435 playing_ = true;
436 wasm_audio_play(audio_context_);
437
438 // Check if context needs resuming (autoplay policy)
439 if (context_suspended_) {
440 context_suspended_ = wasm_audio_is_suspended(audio_context_);
441 }
442}
443
444void WasmAudioBackend::Pause() {
445 if (!initialized_ || !audio_context_) {
446 return;
447 }
448
449 playing_ = false;
450 wasm_audio_pause(audio_context_);
451}
452
453void WasmAudioBackend::Stop() {
454 if (!initialized_ || !audio_context_) {
455 return;
456 }
457
458 playing_ = false;
459 wasm_audio_stop(audio_context_);
460
461 // Clear internal queue
462 {
463 std::lock_guard<std::mutex> lock(queue_mutex_);
464 while (!sample_queue_.empty()) {
465 sample_queue_.pop();
466 }
467 }
468
469 queued_samples_ = 0;
470 has_underrun_ = false;
471}
472
473void WasmAudioBackend::Clear() {
474 if (!initialized_ || !audio_context_) {
475 return;
476 }
477
478 wasm_audio_clear(audio_context_);
479
480 {
481 std::lock_guard<std::mutex> lock(queue_mutex_);
482 while (!sample_queue_.empty()) {
483 sample_queue_.pop();
484 }
485 }
486
487 queued_samples_ = 0;
488}
489
490bool WasmAudioBackend::QueueSamples(const int16_t* samples, int num_samples) {
491 if (!initialized_ || !audio_context_) {
492 return false;
493 }
494
495 // Convert 16-bit PCM to float32 for WebAudio
496 const int frame_count = num_samples / config_.channels;
497 conversion_buffer_.resize(num_samples);
498
499 if (!ConvertToFloat32(samples, conversion_buffer_.data(), num_samples)) {
500 return false;
501 }
502
503 // Apply volume
504 ApplyVolumeToBuffer(conversion_buffer_.data(), num_samples);
505
506 // Queue samples to WebAudio
507 wasm_audio_queue_samples(audio_context_, conversion_buffer_.data(),
508 frame_count, config_.channels);
509
510 queued_samples_ += num_samples;
511 total_samples_played_ += num_samples;
512
513 return true;
514}
515
516bool WasmAudioBackend::QueueSamples(const float* samples, int num_samples) {
517 if (!initialized_ || !audio_context_) {
518 return false;
519 }
520
521 // Copy and apply volume
522 conversion_buffer_.resize(num_samples);
523 std::memcpy(conversion_buffer_.data(), samples, num_samples * sizeof(float));
524 ApplyVolumeToBuffer(conversion_buffer_.data(), num_samples);
525
526 const int frame_count = num_samples / config_.channels;
527 wasm_audio_queue_samples(audio_context_, conversion_buffer_.data(),
528 frame_count, config_.channels);
529
530 queued_samples_ += num_samples;
531 total_samples_played_ += num_samples;
532
533 return true;
534}
535
536bool WasmAudioBackend::QueueSamplesNative(const int16_t* samples, int frames_per_channel,
537 int channels, int native_rate) {
538 if (!initialized_ || !audio_context_) {
539 return false;
540 }
541
542 // For now, just use the regular queue function
543 // TODO: Implement proper resampling if native_rate != config_.sample_rate
544 return QueueSamples(samples, frames_per_channel * channels);
545}
546
547AudioStatus WasmAudioBackend::GetStatus() const {
548 AudioStatus status;
549
550 if (!initialized_ || !audio_context_) {
551 return status;
552 }
553
554 status.is_playing = wasm_audio_is_playing(audio_context_);
555 status.queued_frames = wasm_audio_get_queued_frames(audio_context_);
556 status.queued_bytes = status.queued_frames * config_.channels *
557 (config_.format == SampleFormat::INT16 ? 2 : 4);
558 status.has_underrun = has_underrun_;
559
560 return status;
561}
562
563bool WasmAudioBackend::IsInitialized() const {
564 return initialized_;
565}
566
567AudioConfig WasmAudioBackend::GetConfig() const {
568 return config_;
569}
570
571void WasmAudioBackend::SetVolume(float volume) {
572 volume_ = std::max(0.0f, std::min(1.0f, volume));
573
574 if (initialized_ && audio_context_) {
575 wasm_audio_set_volume(audio_context_, volume_);
576 }
577}
578
579float WasmAudioBackend::GetVolume() const {
580 if (initialized_ && audio_context_) {
581 return wasm_audio_get_volume(audio_context_);
582 }
583 return volume_;
584}
585
586void WasmAudioBackend::SetAudioStreamResampling(bool enable, int native_rate, int channels) {
587 resampling_enabled_ = enable;
588 native_rate_ = native_rate;
589 native_channels_ = channels;
590}
591
592void WasmAudioBackend::HandleUserInteraction() {
593 if (initialized_ && audio_context_) {
594 wasm_audio_resume_context(audio_context_);
595 context_suspended_ = false;
596 }
597}
598
599bool WasmAudioBackend::IsContextSuspended() const {
600 if (initialized_ && audio_context_) {
601 return wasm_audio_is_suspended(audio_context_);
602 }
603 return true;
604}
605
606bool WasmAudioBackend::ConvertToFloat32(const int16_t* input, float* output, int num_samples) {
607 if (!input || !output) {
608 return false;
609 }
610
611 for (int i = 0; i < num_samples; ++i) {
612 output[i] = static_cast<float>(input[i]) * kInt16ToFloat;
613 }
614
615 return true;
616}
617
618void WasmAudioBackend::ApplyVolumeToBuffer(float* buffer, int num_samples) {
619 const float vol = volume_.load();
620 if (vol == 1.0f) {
621 return; // No need to apply volume
622 }
623
624 for (int i = 0; i < num_samples; ++i) {
625 buffer[i] *= vol;
626 }
627}
628
629} // namespace audio
630} // namespace emu
631} // namespace yaze
632
633#endif // __EMSCRIPTEN__
EM_JS(void, CallJsAiDriver,(const char *history_json), { if(window.yaze &&window.yaze.ai &&window.yaze.ai.processAgentRequest) { window.yaze.ai.processAgentRequest(UTF8ToString(history_json));} else { console.error("AI Driver not found in window.yaze.ai.processAgentRequest");} })