10#include <emscripten/html5.h>
22EM_JS(
void*, wasm_audio_create_context, (
int sample_rate), {
25 const AudioContext = window.AudioContext || window.webkitAudioContext;
27 console.error(
"WebAudio API not supported in this browser");
31 const ctx =
new AudioContext({
32 sampleRate: sample_rate,
33 latencyHint:
'interactive'
37 if (!window.yazeAudio) {
38 window.yazeAudio = {};
42 const contextId = Date.now();
43 window.yazeAudio[contextId] = {
51 console.log(
'Created WebAudio context with sample rate:', sample_rate);
54 console.error(
'Failed to create WebAudio context:', e);
59EM_JS(
void*, wasm_audio_create_processor, (
void* context_handle,
int buffer_size,
int channels), {
61 const audio = window.yazeAudio[context_handle];
62 if (!audio || !audio.context) {
63 console.error(
'Invalid audio context handle');
67 const ctx = audio.context;
70 const gainNode = ctx.createGain();
71 gainNode.gain.value = audio.volume;
72 audio.gainNode = gainNode;
76 const tryAudioWorklet = async () => {
79 if (typeof AudioWorkletNode ===
'undefined' || !ctx.audioWorklet) {
80 throw new Error(
'AudioWorklet not supported');
84 await ctx.audioWorklet.addModule(
'core/audio_worklet_processor.js');
87 const workletNode =
new AudioWorkletNode(ctx,
'snes-audio-processor', {
90 outputChannelCount: [channels],
92 bufferSize: buffer_size * 4,
98 workletNode.connect(gainNode);
99 gainNode.connect(ctx.destination);
102 audio.workletNode = workletNode;
103 audio.useWorklet =
true;
106 workletNode.port.onmessage = (event) => {
107 if (event.data.type ===
'status') {
108 audio.workletStatus =
event.data;
112 console.log(
'[AudioWorklet] Created SNES audio processor with buffer size:', buffer_size);
115 console.warn(
'[AudioWorklet] Failed to initialize, falling back to ScriptProcessorNode:', e.message);
121 tryAudioWorklet().then(success => {
124 const processor = ctx.createScriptProcessor(buffer_size, 0, channels);
127 processor.connect(gainNode);
128 gainNode.connect(ctx.destination);
131 audio.processor = processor;
132 audio.useWorklet =
false;
135 processor.onaudioprocess = function(e) {
136 const outputBuffer = e.outputBuffer;
137 const numChannels = outputBuffer.numberOfChannels;
138 const frameCount = outputBuffer.length;
140 if (!audio.isPlaying || audio.bufferQueue.length === 0) {
142 for (let ch = 0; ch < numChannels; ch++) {
143 const channel = outputBuffer.getChannelData(ch);
150 let framesWritten = 0;
151 while (framesWritten < frameCount && audio.bufferQueue.length > 0) {
152 const buffer = audio.bufferQueue[0];
153 const remainingInBuffer = buffer.length - buffer.position;
154 const framesToCopy = Math.min(frameCount - framesWritten, remainingInBuffer);
157 for (let ch = 0; ch < numChannels; ch++) {
158 const outputChannel = outputBuffer.getChannelData(ch);
159 for (let i = 0; i < framesToCopy; i++) {
160 const sampleIndex = (buffer.position + i) * numChannels + ch;
161 outputChannel[framesWritten + i] = buffer.samples[sampleIndex] || 0;
165 buffer.position += framesToCopy;
166 framesWritten += framesToCopy;
169 if (buffer.position >= buffer.length) {
170 audio.bufferQueue.shift();
175 if (framesWritten < frameCount) {
176 for (let ch = 0; ch < numChannels; ch++) {
177 const channel = outputBuffer.getChannelData(ch);
178 for (let i = framesWritten; i < frameCount; i++) {
185 console.log(
'[ScriptProcessor] Created audio processor with buffer size:', buffer_size);
189 return context_handle;
191 console.error(
'Failed to create audio processor:', e);
196EM_JS(
void, wasm_audio_queue_samples, (
void* context_handle,
float* samples,
int frame_count,
int channels), {
197 const audio = window.yazeAudio[context_handle];
201 const totalSamples = frame_count * channels;
202 const sampleArray =
new Float32Array(totalSamples);
203 for (let i = 0; i < totalSamples; i++) {
204 sampleArray[i] = HEAPF32[(samples >> 2) + i];
208 if (audio.useWorklet && audio.workletNode) {
210 audio.workletNode.port.postMessage({
212 samples: sampleArray,
213 frameCount: frame_count
217 audio.bufferQueue.push({
218 samples: sampleArray,
224 const maxQueueSize = 32;
225 while (audio.bufferQueue.length > maxQueueSize) {
226 audio.bufferQueue.shift();
231EM_JS(
void, wasm_audio_play, (
void* context_handle), {
232 const audio = window.yazeAudio[context_handle];
233 if (!audio || !audio.context)
return;
235 audio.isPlaying =
true;
238 if (audio.context.state ===
'suspended') {
239 audio.context.resume().then(() => {
240 console.log(
'Audio context resumed');
242 console.error(
'Failed to resume audio context:', e);
247EM_JS(
void, wasm_audio_pause, (
void* context_handle), {
248 const audio = window.yazeAudio[context_handle];
251 audio.isPlaying =
false;
254EM_JS(
void, wasm_audio_stop, (
void* context_handle), {
255 const audio = window.yazeAudio[context_handle];
258 audio.isPlaying =
false;
259 audio.bufferQueue = [];
262EM_JS(
void, wasm_audio_clear, (
void* context_handle), {
263 const audio = window.yazeAudio[context_handle];
266 audio.bufferQueue = [];
269EM_JS(
void, wasm_audio_set_volume, (
void* context_handle,
float volume), {
270 const audio = window.yazeAudio[context_handle];
271 if (!audio || !audio.gainNode)
return;
273 audio.volume = Math.max(0, Math.min(1, volume));
274 audio.gainNode.gain.value = audio.volume;
277EM_JS(
float, wasm_audio_get_volume, (
void* context_handle), {
278 const audio = window.yazeAudio[context_handle];
279 if (!audio)
return 1.0;
284EM_JS(
int, wasm_audio_get_queued_frames, (
void* context_handle), {
285 const audio = window.yazeAudio[context_handle];
286 if (!audio)
return 0;
289 for (
const buffer of audio.bufferQueue) {
290 total += buffer.length - buffer.position;
295EM_JS(
bool, wasm_audio_is_playing, (
void* context_handle), {
296 const audio = window.yazeAudio[context_handle];
297 if (!audio)
return false;
299 return audio.isPlaying;
302EM_JS(
bool, wasm_audio_is_suspended, (
void* context_handle), {
303 const audio = window.yazeAudio[context_handle];
304 if (!audio || !audio.context)
return true;
306 return audio.context.state ===
'suspended';
309EM_JS(
void, wasm_audio_resume_context, (
void* context_handle), {
310 const audio = window.yazeAudio[context_handle];
311 if (!audio || !audio.context)
return;
313 if (audio.context.state ===
'suspended') {
314 audio.context.resume().then(() => {
315 console.log(
'Audio context resumed via user interaction');
317 console.error(
'Failed to resume audio context:', e);
322EM_JS(
void, wasm_audio_shutdown, (
void* context_handle), {
323 const audio = window.yazeAudio[context_handle];
327 if (audio.workletNode) {
328 audio.workletNode.port.postMessage({ type:
'clear' });
329 audio.workletNode.disconnect();
330 audio.workletNode = null;
331 audio.useWorklet =
false;
332 console.log(
'[AudioWorklet] Processor disconnected');
336 if (audio.processor) {
337 audio.processor.disconnect();
338 audio.processor = null;
339 console.log(
'[ScriptProcessor] Processor disconnected');
342 if (audio.gainNode) {
343 audio.gainNode.disconnect();
344 audio.gainNode = null;
348 audio.context.close().then(() => {
349 console.log(
'Audio context closed');
351 console.error(
'Failed to close audio context:', e);
355 delete window.yazeAudio[context_handle];
360WasmAudioBackend::WasmAudioBackend() {
361 conversion_buffer_.reserve(kDefaultBufferSize * 2);
362 resampling_buffer_.reserve(kDefaultBufferSize * 2);
365WasmAudioBackend::~WasmAudioBackend() {
369bool WasmAudioBackend::Initialize(
const AudioConfig& config) {
377 audio_context_ =
reinterpret_cast<void*
>(wasm_audio_create_context(config.sample_rate));
378 if (!audio_context_) {
379 std::cerr <<
"Failed to create WebAudio context" << std::endl;
384 script_processor_ =
reinterpret_cast<void*
>(
385 wasm_audio_create_processor(audio_context_, config.buffer_frames, config.channels));
386 if (!script_processor_) {
387 std::cerr <<
"Failed to create WebAudio processor" << std::endl;
388 wasm_audio_shutdown(audio_context_);
389 audio_context_ =
nullptr;
394 context_suspended_ = wasm_audio_is_suspended(audio_context_);
396 std::cout <<
"WasmAudioBackend initialized - Sample rate: " << config.sample_rate
397 <<
" Hz, Channels: " << config.channels
398 <<
", Buffer: " << config.buffer_frames <<
" frames" << std::endl;
403void WasmAudioBackend::Shutdown() {
410 if (audio_context_) {
411 wasm_audio_shutdown(audio_context_);
412 audio_context_ =
nullptr;
415 script_processor_ =
nullptr;
416 initialized_ =
false;
420 std::lock_guard<std::mutex> lock(queue_mutex_);
421 while (!sample_queue_.empty()) {
427 total_samples_played_ = 0;
430void WasmAudioBackend::Play() {
431 if (!initialized_ || !audio_context_) {
436 wasm_audio_play(audio_context_);
439 if (context_suspended_) {
440 context_suspended_ = wasm_audio_is_suspended(audio_context_);
444void WasmAudioBackend::Pause() {
445 if (!initialized_ || !audio_context_) {
450 wasm_audio_pause(audio_context_);
453void WasmAudioBackend::Stop() {
454 if (!initialized_ || !audio_context_) {
459 wasm_audio_stop(audio_context_);
463 std::lock_guard<std::mutex> lock(queue_mutex_);
464 while (!sample_queue_.empty()) {
470 has_underrun_ =
false;
473void WasmAudioBackend::Clear() {
474 if (!initialized_ || !audio_context_) {
478 wasm_audio_clear(audio_context_);
481 std::lock_guard<std::mutex> lock(queue_mutex_);
482 while (!sample_queue_.empty()) {
490bool WasmAudioBackend::QueueSamples(
const int16_t* samples,
int num_samples) {
491 if (!initialized_ || !audio_context_) {
496 const int frame_count = num_samples / config_.channels;
497 conversion_buffer_.resize(num_samples);
499 if (!ConvertToFloat32(samples, conversion_buffer_.data(), num_samples)) {
504 ApplyVolumeToBuffer(conversion_buffer_.data(), num_samples);
507 wasm_audio_queue_samples(audio_context_, conversion_buffer_.data(),
508 frame_count, config_.channels);
510 queued_samples_ += num_samples;
511 total_samples_played_ += num_samples;
516bool WasmAudioBackend::QueueSamples(
const float* samples,
int num_samples) {
517 if (!initialized_ || !audio_context_) {
522 conversion_buffer_.resize(num_samples);
523 std::memcpy(conversion_buffer_.data(), samples, num_samples *
sizeof(
float));
524 ApplyVolumeToBuffer(conversion_buffer_.data(), num_samples);
526 const int frame_count = num_samples / config_.channels;
527 wasm_audio_queue_samples(audio_context_, conversion_buffer_.data(),
528 frame_count, config_.channels);
530 queued_samples_ += num_samples;
531 total_samples_played_ += num_samples;
536bool WasmAudioBackend::QueueSamplesNative(
const int16_t* samples,
int frames_per_channel,
537 int channels,
int native_rate) {
538 if (!initialized_ || !audio_context_) {
544 return QueueSamples(samples, frames_per_channel * channels);
547AudioStatus WasmAudioBackend::GetStatus()
const {
550 if (!initialized_ || !audio_context_) {
554 status.is_playing = wasm_audio_is_playing(audio_context_);
555 status.queued_frames = wasm_audio_get_queued_frames(audio_context_);
556 status.queued_bytes = status.queued_frames * config_.channels *
557 (config_.format == SampleFormat::INT16 ? 2 : 4);
558 status.has_underrun = has_underrun_;
563bool WasmAudioBackend::IsInitialized()
const {
567AudioConfig WasmAudioBackend::GetConfig()
const {
571void WasmAudioBackend::SetVolume(
float volume) {
572 volume_ = std::max(0.0f, std::min(1.0f, volume));
574 if (initialized_ && audio_context_) {
575 wasm_audio_set_volume(audio_context_, volume_);
579float WasmAudioBackend::GetVolume()
const {
580 if (initialized_ && audio_context_) {
581 return wasm_audio_get_volume(audio_context_);
586void WasmAudioBackend::SetAudioStreamResampling(
bool enable,
int native_rate,
int channels) {
587 resampling_enabled_ = enable;
588 native_rate_ = native_rate;
589 native_channels_ = channels;
592void WasmAudioBackend::HandleUserInteraction() {
593 if (initialized_ && audio_context_) {
594 wasm_audio_resume_context(audio_context_);
595 context_suspended_ =
false;
599bool WasmAudioBackend::IsContextSuspended()
const {
600 if (initialized_ && audio_context_) {
601 return wasm_audio_is_suspended(audio_context_);
606bool WasmAudioBackend::ConvertToFloat32(
const int16_t* input,
float* output,
int num_samples) {
607 if (!input || !output) {
611 for (
int i = 0; i < num_samples; ++i) {
612 output[i] =
static_cast<float>(input[i]) * kInt16ToFloat;
618void WasmAudioBackend::ApplyVolumeToBuffer(
float* buffer,
int num_samples) {
619 const float vol = volume_.load();
624 for (
int i = 0; i < num_samples; ++i) {
EM_JS(void, CallJsAiDriver,(const char *history_json), { if(window.yaze &&window.yaze.ai &&window.yaze.ai.processAgentRequest) { window.yaze.ai.processAgentRequest(UTF8ToString(history_json));} else { console.error("AI Driver not found in window.yaze.ai.processAgentRequest");} })