src/voice/captureManager.test.ts

import { test } from "bun:test"; import assert from "node:assert/strict"; import { EventEmitter } from "node:events"; import { VoiceSessionManager } from "./voiceSessionManager.ts"; import { createTestSettings } from "../testSettings.ts"; import { BARGE_IN_MIN_SPEECH_MS, CAPTURE_MAX_DURATION_MS, CAPTURE_NEAR_SILENCE_ABORT_MIN_AGE_MS, VOICE_TURN_PROMOTION_MIN_CLIP_MS } from "./voiceSessionManager.constants.ts"; import { getOrCreatePerUserAsrState } from "./voiceAsrBridge.ts"; import type { VoiceSession } from "./voiceSessionTypes.ts";

function makeMonoPcm16(sampleCount: number, amplitude: number) { const pcm = Buffer.alloc(sampleCount * 2); for (let i = 0; i < sampleCount; i += 1) { pcm.writeInt16LE(amplitude, i * 2); } return pcm; }

async function flushMicrotasks() { await Promise.resolve(); await Promise.resolve(); }

function createManager() { const logs: Array<Record<string, unknown>> = []; const touchCalls: Array<Record<string, unknown>> = [];

const manager = new VoiceSessionManager({ client: { on() {}, off() {}, channels: { async fetch() { return null; } }, guilds: { cache: new Map() }, users: { cache: new Map() }, user: { id: "bot-user", username: "clanky" } }, store: { logAction(entry: Record<string, unknown>) { logs.push(entry); }, getSettings() { return createTestSettings({ identity: { botName: "clanky" }, voice: { enabled: true, conversationPolicy: { replyPath: "brain" } } }); } }, appConfig: { openaiApiKey: "test-openai-key" }, llm: { isAsrReady() { return true; }, isSpeechSynthesisReady() { return true; }, async generate() { return { text: "ok" }; } }, memory: null });

manager.touchActivity = (guildId, settings) => { touchCalls.push({ guildId, settings }); }; manager.turnProcessor.queueRealtimeTurn = () => true; manager.turnProcessor.queueFileAsrTurn = () => true;

return { manager, logs, touchCalls }; }

function createSession(overrides: Partial = {}): VoiceSession { const now = Date.now(); return { id: "session-1", guildId: "guild-1", voiceChannelId: "voice-1", textChannelId: "text-1", requestedByUserId: "user-1", mode: "openai_realtime", realtimeProvider: "openai", realtimeInputSampleRateHz: 24_000, realtimeOutputSampleRateHz: 24_000, recentVoiceTurns: [], transcriptTurns: [], modelContextSummary: { generation: null, decider: null }, voxClient: null, realtimeClient: null, startedAt: now - 60_000, lastActivityAt: now - 2_000, maxEndsAt: now + 60_000, inactivityEndsAt: now + 60_000, maxTimer: null, inactivityTimer: null, botTurnResetTimer: null, botTurnOpen: false, bargeInSuppressionUntil: 0, bargeInSuppressedAudioChunks: 0, bargeInSuppressedAudioBytes: 0, lastBotActivityTouchAt: 0, responseFlushTimer: null, responseWatchdogTimer: null, responseDoneGraceTimer: null, botDisconnectTimer: null, lastResponseRequestAt: 0, lastAudioDeltaAt: 0, lastAssistantReplyAt: 0, lastDirectAddressAt: 0, lastDirectAddressUserId: null, musicWakeLatchedUntil: 0, musicWakeLatchedByUserId: null, lastInboundAudioAt: 0, realtimeReplySupersededCount: 0, pendingRealtimeInputBytes: 0, nextResponseRequestId: 1, pendingResponse: null, activeReplyInterruptionPolicy: null, lastRequestedRealtimeUtterance: null, pendingFileAsrTurns: 0, fileAsrTurnDrainActive: false, pendingFileAsrTurnsQueue: [], realtimeTurnDrainActive: false, pendingRealtimeTurns: [], openAiAsrSessions: new Map(), perUserAsrEnabled: false, sharedAsrEnabled: false, openAiSharedAsrState: null, openAiPerUserAsrModel: "", openAiPerUserAsrLanguage: "", openAiPerUserAsrPrompt: "", realtimePendingToolCalls: new Map(), realtimeToolCallExecutions: new Map(), realtimeToolResponseDebounceTimer: null, realtimeCompletedToolCallIds: new Map(), lastRealtimeAssistantAudioItemId: null, lastRealtimeAssistantAudioItemContentIndex: 0, lastRealtimeAssistantAudioItemReceivedMs: 0, realtimeToolDefinitions: [], lastRealtimeToolHash: "", lastRealtimeToolRefreshAt: 0, lastRealtimeToolCallerUserId: null, awaitingToolOutputs: false, toolCallEvents: [], mcpStatus: [], toolMusicTrackCatalog: new Map(), memoryWriteWindow: [], voiceCommandState: null, musicQueueState: { tracks: [], cursor: -1, version: 0 }, assistantOutput: { phase: "idle", reason: "idle", phaseEnteredAt: now, lastSyncedAt: now, requestId: null, ttsPlaybackState: "idle", ttsBufferedSamples: 0, lastTrigger: "test_seed" }, thoughtLoopTimer: null, thoughtLoopBusy: false, nextThoughtAt: 0, lastThoughtAttemptAt: 0, lastThoughtSpokenAt: 0, userCaptures: new Map(), streamWatch: { active: false, targetUserId: null, requestedByUserId: null, channelId: null, startedAt: 0, commentPending: false, lastFrameAt: 0, lastCommentaryAt: 0, lastCommentaryNote: null, lastMemoryRecapAt: 0, lastMemoryRecapText: null, lastMemoryRecapDurableSaved: false, lastMemoryRecapReason: null, latestFrameAt: 0, latestFrameMimeType: null, latestFrameDataBase64: null, acceptedFrameCountInWindow: 0, frameWindowStartedAt: 0, lastNoteAt: 0, lastNoteProvider: null, lastNoteModel: null, noteEntries: [], ingestedFrameCount: 0 }, music: { phase: "idle", active: false, ducked: false, pauseReason: null, startedAt: 0, stoppedAt: 0, provider: null, source: null, lastTrackId: null, lastTrackTitle: null, lastTrackArtists: [], lastTrackUrl: null, lastQuery: null, lastRequestedByUserId: null, lastRequestText: null, lastCommandAt: 0, lastCommandReason: null, pendingQuery: null, pendingPlatform: "auto", pendingResults: [], pendingRequestedByUserId: null, pendingRequestedAt: 0 }, soundboard: { playCount: 0, lastPlayedAt: 0 }, latencyStages: [], membershipEvents: [], baseVoiceInstructions: "", lastRealtimeInstructions: "", lastRealtimeInstructionsAt: 0, realtimeInstructionRefreshTimer: null, realtimeTurnContextRefreshState: { pending: false, lastStartedAt: 0, lastCompletedAt: 0, lastSkippedReason: null }, settingsSnapshot: createTestSettings({ identity: { botName: "clanky" }, voice: { enabled: true, transcription: { enabled: true }, conversationPolicy: { replyPath: "brain" } } }), cleanupHandlers: [], ending: false, deferredVoiceActions: {}, deferredVoiceActionTimers: {}, ...overrides } as VoiceSession; }

function seedReadyPerUserAsr(manager: VoiceSessionManager, session: VoiceSession, userId: string) { const asrState = getOrCreatePerUserAsrState(session, userId); assert.ok(asrState); asrState.phase = "ready"; asrState.client = { ws: { readyState: 1 }, clearInputAudioBuffer() {}, appendInputAudioPcm() {}, commitInputAudioBuffer() {} }; return asrState; }

function recordCommittedInterruptDecision(session: VoiceSession, utteranceId: number, source = "test_interrupt") { session.interruptDecisionsByUtteranceId = new Map([ [ utteranceId, { transcript: "", decision: "interrupt", decidedAt: Date.now(), source, burstId: 1 } ] ]); }

test("resolveCaptureTurnPromotionReason requires matching server VAD utterance id and local thresholds", () => { const { manager } = createManager(); const session = createSession(); const asrState = seedReadyPerUserAsr(manager, session, "speaker-1"); const capture = { userId: "speaker-1", asrUtteranceId: 9, bytesSent: Math.ceil((24_000 * 2 * (VOICE_TURN_PROMOTION_MIN_CLIP_MS + 40)) / 1000), signalSampleCount: 24_000, signalActiveSampleCount: 600, signalPeakAbs: 1024, signalSumSquares: 0 };

asrState.speechDetectedUtteranceId = 8; asrState.speechDetectedAt = Date.now(); assert.equal(manager.resolveCaptureTurnPromotionReason({ session, capture }), null);

asrState.speechDetectedUtteranceId = 9; capture.signalActiveSampleCount = 100; capture.signalPeakAbs = 200; assert.equal(manager.resolveCaptureTurnPromotionReason({ session, capture }), null);

capture.signalActiveSampleCount = 600; capture.signalPeakAbs = 1024; assert.equal(manager.resolveCaptureTurnPromotionReason({ session, capture }), "server_vad_confirmed"); });

test("resolveCaptureTurnPromotionReason allows strong local promotion without server VAD", () => { const { manager } = createManager(); const session = createSession(); const capture = { userId: "speaker-1", asrUtteranceId: 0, bytesSent: Math.ceil((24_000 * 2 * (VOICE_TURN_PROMOTION_MIN_CLIP_MS + 40)) / 1000), signalSampleCount: 24_000, signalActiveSampleCount: 4_200, signalPeakAbs: 4096, signalSumSquares: 24_000 * 1024 * 1024 };

assert.equal(manager.resolveCaptureTurnPromotionReason({ session, capture }), "strong_local_audio"); });

test("startInboundCapture leaves live bot interruption to transcript bursts when realtime ASR bridge is active", async () => { const { manager } = createManager(); manager.shouldUsePerUserTranscription = () => true; manager.shouldUseTranscriptOverlapInterrupts = () => true; const interruptCalls: Array<Record<string, unknown>> = []; manager.interruptBotSpeechForBargeIn = (args) => { interruptCalls.push(args); return true; };

const voxClient = new EventEmitter(); voxClient.subscribeUser = () => {}; const now = Date.now(); const session = createSession({ mode: "openai_realtime", realtimeInputSampleRateHz: 24_000, botTurnOpen: true, botTurnOpenAt: now - 2_500, assistantOutput: { phase: "speaking_live", reason: "bot_audio_live", phaseEnteredAt: now - 1_000, lastSyncedAt: now - 1_000, requestId: 12, ttsPlaybackState: "playing", ttsBufferedSamples: 24_000, lastTrigger: "test_seed" }, pendingResponse: { requestId: 12, requestedAt: now - 3_000, source: "voice_reply", handlingSilence: false, audioReceivedAt: now - 2_000, interruptionPolicy: { assertive: true, scope: "speaker", allowedUserId: "speaker-1" }, utteranceText: "still talking", latencyContext: null, userId: "speaker-1", retryCount: 0, hardRecoveryAttempted: false }, voxClient }); const asrState = seedReadyPerUserAsr(manager, session, "speaker-1");

manager.captureManager.startInboundCapture({ session, userId: "speaker-1", settings: session.settingsSnapshot });

const firstChunk = makeMonoPcm16( Math.ceil((24_000 * (BARGE_IN_MIN_SPEECH_MS + 50)) / 1000), 3000 ); voxClient.emit("userAudio", "speaker-1", firstChunk); await flushMicrotasks();

const capture = session.userCaptures.get("speaker-1"); assert.ok(capture); assert.equal(capture.asrUtteranceId, asrState.utterance.id); assert.equal(capture.promotionReason, "strong_local_audio"); assert.equal(interruptCalls.length, 0);

asrState.speechDetectedUtteranceId = capture.asrUtteranceId; asrState.speechDetectedAt = Date.now();

const followupChunk = makeMonoPcm16(Math.ceil((24_000 * 120) / 1000), 3000); voxClient.emit("userAudio", "speaker-1", followupChunk); await flushMicrotasks();

assert.equal(interruptCalls.length, 0); });

test("startInboundCapture arms a local pending overlap interrupt before provider speech_started arrives", async () => { const { manager, logs } = createManager(); manager.shouldUsePerUserTranscription = () => true; manager.shouldUseTranscriptOverlapInterrupts = () => true; const voxClient = new EventEmitter(); voxClient.subscribeUser = () => {}; voxClient.getTtsBufferDepthSamples = () => 18_000; voxClient.getTtsPlaybackState = () => "buffered"; const now = Date.now(); const session = createSession({ mode: "openai_realtime", realtimeInputSampleRateHz: 24_000, assistantOutput: { phase: "speaking_buffered", reason: "bot_audio_buffered", phaseEnteredAt: now - 1_200, lastSyncedAt: now - 200, requestId: 14, ttsPlaybackState: "buffered", ttsBufferedSamples: 18_000, lastTrigger: "test_seed" }, activeReplyInterruptionPolicy: { assertive: true, scope: "speaker", allowedUserId: "speaker-1" }, voxClient }); const asrState = seedReadyPerUserAsr(manager, session, "speaker-1");

manager.captureManager.startInboundCapture({ session, userId: "speaker-1", settings: session.settingsSnapshot });

const firstChunk = makeMonoPcm16( Math.ceil((24_000 * (VOICE_TURN_PROMOTION_MIN_CLIP_MS + 40)) / 1000), 3000 ); voxClient.emit("userAudio", "speaker-1", firstChunk); await flushMicrotasks();

const capture = session.userCaptures.get("speaker-1"); assert.ok(capture); assert.equal(capture.asrUtteranceId, asrState.utterance.id); assert.equal(capture.promotionReason, "strong_local_audio"); assert.equal( logs.some((entry) => entry?.content === "openai_realtime_asr_speech_started"), false );

const pendingLog = logs.find((entry) => entry?.content === "voice_interrupt_speech_started_pending"); assert.ok(pendingLog); assert.equal(pendingLog?.metadata?.utteranceId, capture.asrUtteranceId); assert.equal(pendingLog?.metadata?.eventType, "local_capture_overlap"); assert.equal(pendingLog?.metadata?.initialReason, "insufficient_capture_bytes"); });

test("startInboundCapture aborts near-silence captures once they age past the early-abort window", async () => { const { manager, logs } = createManager(); manager.shouldUsePerUserTranscription = () => false; const voxClient = new EventEmitter(); voxClient.subscribeUser = () => {}; const session = createSession({ voxClient });

manager.captureManager.startInboundCapture({ session, userId: "speaker-1", settings: session.settingsSnapshot });

const capture = session.userCaptures.get("speaker-1"); assert.ok(capture); capture.startedAt = Date.now() - CAPTURE_NEAR_SILENCE_ABORT_MIN_AGE_MS - 25;

const weakPcm = makeMonoPcm16(24_000, 64); voxClient.emit("userAudio", "speaker-1", weakPcm); await flushMicrotasks();

assert.equal(session.userCaptures.has("speaker-1"), false); const droppedLog = logs.find((entry) => entry?.content === "voice_turn_dropped_provisional_capture"); assert.ok(droppedLog); assert.equal(droppedLog?.metadata?.reason, "near_silence_early_abort"); });

test("startInboundCapture max duration timer finalizes long captures", async () => { const { manager, logs } = createManager(); manager.shouldUsePerUserTranscription = () => false; const voxClient = new EventEmitter(); voxClient.subscribeUser = () => {}; const session = createSession({ mode: "openai_realtime", voxClient }); const scheduled: Array<{ delay: number; callback: () => void; cleared: boolean }> = []; const originalSetTimeout = globalThis.setTimeout; const originalClearTimeout = globalThis.clearTimeout;

globalThis.setTimeout = ((callback: TimerHandler, delay?: number) => { const record = { delay: Number(delay || 0), callback: callback as () => void, cleared: false }; scheduled.push(record); // eslint-disable-next-line no-restricted-syntax return record as unknown as ReturnType; }) as typeof setTimeout; globalThis.clearTimeout = ((handle: ReturnType) => { // eslint-disable-next-line no-restricted-syntax const record = handle as unknown as { cleared?: boolean }; record.cleared = true; }) as typeof clearTimeout;

try { manager.captureManager.startInboundCapture({ session, userId: "speaker-1", settings: session.settingsSnapshot }); const strongPcm = makeMonoPcm16(Math.ceil((24_000 * (VOICE_TURN_PROMOTION_MIN_CLIP_MS + 40)) / 1000), 3000); voxClient.emit("userAudio", "speaker-1", strongPcm); await flushMicrotasks();

const maxTimer = scheduled.find((entry) => entry.delay === CAPTURE_MAX_DURATION_MS);
assert.ok(maxTimer);
maxTimer.callback();

assert.equal(session.userCaptures.has("speaker-1"), false);
const finalizedLog = logs.find((entry) => entry?.content === "voice_turn_finalized");
assert.ok(finalizedLog);
assert.equal(finalizedLog?.metadata?.reason, "max_duration");

} finally { globalThis.setTimeout = originalSetTimeout; globalThis.clearTimeout = originalClearTimeout; } });

test("startInboundCapture hands empty interrupted ASR turns back to the voice brain", async () => { const { manager, logs } = createManager(); manager.shouldUsePerUserTranscription = () => true; const runtimeEvents = []; manager.fireVoiceRuntimeEvent = async (payload) => { runtimeEvents.push(payload); return true; }; const voxClient = new EventEmitter(); voxClient.subscribeUser = () => {}; const session = createSession({ voxClient, interruptedAssistantReply: { utteranceText: "no, wait, one more thing", interruptedByUserId: "speaker-1", interruptedAt: Date.now() - 200, source: "barge_in_interrupt", interruptionPolicy: { assertive: true, scope: "speaker", allowedUserId: "speaker-1" } } }); const asrState = seedReadyPerUserAsr(manager, session, "speaker-1");

manager.captureManager.startInboundCapture({ session, userId: "speaker-1", settings: session.settingsSnapshot });

const strongPcm = makeMonoPcm16( Math.ceil((24_000 * (VOICE_TURN_PROMOTION_MIN_CLIP_MS + 40)) / 1000), 3000 ); voxClient.emit("userAudio", "speaker-1", strongPcm); await flushMicrotasks(); const utteranceId = Math.max( 0, Number(session.userCaptures.get("speaker-1")?.asrUtteranceId || asrState.utterance?.id || 0) ); assert.ok(utteranceId > 0); recordCommittedInterruptDecision(session, utteranceId); voxClient.emit("userAudioEnd", "speaker-1"); await new Promise((resolve) => setTimeout(resolve, 2600));

assert.equal(runtimeEvents.length, 1); assert.equal(runtimeEvents[0]?.source, "unclear_empty_asr_bridge_turn"); assert.match(String(runtimeEvents[0]?.transcript || ""), /interrupted you, but their words were unclear/i); assert.equal( logs.some((entry) => entry?.content === "openai_realtime_asr_bridge_empty_dropped"), true ); assert.equal( logs.some((entry) => entry?.content === "voice_interrupt_unclear_turn_handoff_requested"), true ); });

test("startInboundCapture does not hand empty ASR turns back to the voice brain without a committed interrupt", async () => { const { manager, logs } = createManager(); manager.shouldUsePerUserTranscription = () => true; const runtimeEvents = []; manager.fireVoiceRuntimeEvent = async (payload) => { runtimeEvents.push(payload); return true; }; const voxClient = new EventEmitter(); voxClient.subscribeUser = () => {}; const session = createSession({ voxClient, interruptedAssistantReply: { utteranceText: "no, wait, one more thing", interruptedByUserId: "speaker-1", interruptedAt: Date.now() - 200, source: "barge_in_interrupt", interruptionPolicy: { assertive: true, scope: "speaker", allowedUserId: "speaker-1" } } }); seedReadyPerUserAsr(manager, session, "speaker-1");

manager.captureManager.startInboundCapture({ session, userId: "speaker-1", settings: session.settingsSnapshot });

const strongPcm = makeMonoPcm16( Math.ceil((24_000 * (VOICE_TURN_PROMOTION_MIN_CLIP_MS + 40)) / 1000), 3000 ); voxClient.emit("userAudio", "speaker-1", strongPcm); await flushMicrotasks(); voxClient.emit("userAudioEnd", "speaker-1"); await new Promise((resolve) => setTimeout(resolve, 2600));

assert.equal(runtimeEvents.length, 0); assert.equal( logs.some((entry) => entry?.content === "voice_interrupt_unclear_turn_handoff_requested"), false ); const skippedLog = logs.find((entry) => entry?.content === "voice_interrupt_unclear_turn_handoff_skipped"); assert.equal(Boolean(skippedLog), true); assert.equal(skippedLog?.metadata?.skipReason, "missing_committed_interrupt_turn"); assert.equal( logs.some((entry) => entry?.content === "openai_realtime_asr_bridge_empty_dropped"), true ); });

test("server-vad-confirmed capture does not cancel a pending pre-audio normal reply", async () => { const { manager, logs } = createManager(); manager.shouldUsePerUserTranscription = () => true; const cancelCalls: boolean[] = []; const voxClient = new EventEmitter(); voxClient.subscribeUser = () => {}; const session = createSession({ voxClient, realtimeClient: { cancelActiveResponse() { cancelCalls.push(true); return true; } }, pendingResponse: { requestId: 7, requestedAt: Date.now(), source: "realtime:speech_1", handlingSilence: false, audioReceivedAt: 0, interruptionPolicy: { assertive: true, scope: "speaker", allowedUserId: "speaker-1" }, utteranceText: "yo", latencyContext: null, userId: null, retryCount: 0, hardRecoveryAttempted: false } }); const asrState = seedReadyPerUserAsr(manager, session, "speaker-1");

manager.captureManager.startInboundCapture({ session, userId: "speaker-1", settings: session.settingsSnapshot }); asrState.speechDetectedUtteranceId = asrState.utterance.id; asrState.speechDetectedAt = Date.now();

const strongPcm = makeMonoPcm16(Math.ceil((24_000 * (VOICE_TURN_PROMOTION_MIN_CLIP_MS + 40)) / 1000), 3000); voxClient.emit("userAudio", "speaker-1", strongPcm); await flushMicrotasks();

const capture = session.userCaptures.get("speaker-1"); assert.ok(capture); assert.ok(Number(capture.promotedAt || 0) > 0); assert.equal(capture.promotionReason, "server_vad_confirmed"); assert.ok(session.pendingResponse); assert.equal(cancelCalls.length, 0); assert.equal( logs.some((entry) => entry?.content === "voice_preplay_reply_superseded_for_user_speech"), false ); });

test("promoting a local-only strong-audio capture does not cancel pending pre-audio reply before server VAD confirmation", async () => { const { manager, logs } = createManager(); manager.shouldUsePerUserTranscription = () => true; const cancelCalls: boolean[] = []; const voxClient = new EventEmitter(); voxClient.subscribeUser = () => {}; const session = createSession({ voxClient, realtimeClient: { cancelActiveResponse() { cancelCalls.push(true); return true; } }, pendingResponse: { requestId: 7, requestedAt: Date.now(), source: "realtime:speech_1", handlingSilence: false, audioReceivedAt: 0, interruptionPolicy: { assertive: true, scope: "speaker", allowedUserId: "speaker-1" }, utteranceText: "yo", latencyContext: null, userId: null, retryCount: 0, hardRecoveryAttempted: false } }); seedReadyPerUserAsr(manager, session, "speaker-1");

manager.captureManager.startInboundCapture({ session, userId: "speaker-1", settings: session.settingsSnapshot });

const strongPcm = makeMonoPcm16( Math.ceil((24_000 * (VOICE_TURN_PROMOTION_MIN_CLIP_MS + 40)) / 1000), 3000 ); voxClient.emit("userAudio", "speaker-1", strongPcm); await flushMicrotasks();

const capture = session.userCaptures.get("speaker-1"); assert.ok(capture); assert.ok(Number(capture.promotedAt || 0) > 0); assert.equal(capture.promotionReason, "strong_local_audio"); assert.ok(session.pendingResponse); assert.equal(cancelCalls.length, 0); assert.equal( logs.some((entry) => entry?.content === "voice_preplay_reply_superseded_for_user_speech"), false ); });

test("startInboundCapture logs one voice_barge_in_gate for a promoted capture overlapping a pending pre-audio reply", async () => { const { manager, logs } = createManager(); manager.shouldUsePerUserTranscription = () => true; const voxClient = new EventEmitter(); voxClient.subscribeUser = () => {}; const session = createSession({ voxClient, pendingResponse: { requestId: 7, requestedAt: Date.now(), source: "realtime:speech_1", handlingSilence: false, audioReceivedAt: 0, interruptionPolicy: { assertive: true, scope: "speaker", allowedUserId: "speaker-1" }, utteranceText: "yo", latencyContext: null, userId: null, retryCount: 0, hardRecoveryAttempted: false } }); seedReadyPerUserAsr(manager, session, "speaker-1");

manager.captureManager.startInboundCapture({ session, userId: "speaker-1", settings: session.settingsSnapshot });

const strongPcm = makeMonoPcm16( Math.ceil((24_000 * (VOICE_TURN_PROMOTION_MIN_CLIP_MS + 40)) / 1000), 3000 ); voxClient.emit("userAudio", "speaker-1", strongPcm); await flushMicrotasks(); voxClient.emit("userAudio", "speaker-1", strongPcm); await flushMicrotasks();

const bargeGateLogs = logs.filter((entry) => entry?.content === "voice_barge_in_gate"); assert.equal(bargeGateLogs.length, 1); assert.equal(bargeGateLogs[0]?.userId, "speaker-1"); assert.equal(bargeGateLogs[0]?.metadata?.allow, false); assert.equal(bargeGateLogs[0]?.metadata?.reason, "pending_response_pre_audio"); assert.equal(bargeGateLogs[0]?.metadata?.stage, "barge_in"); assert.equal(bargeGateLogs[0]?.metadata?.sessionId, "session-1"); });

test( "server-vad-confirmed capture leaves pending pre-audio tool followup intact", async () => { const { manager, logs } = createManager(); manager.shouldUsePerUserTranscription = () => true; const cancelCalls: boolean[] = []; const voxClient = new EventEmitter(); voxClient.subscribeUser = () => {}; const session = createSession({ voxClient, realtimeClient: { cancelActiveResponse() { cancelCalls.push(true); return true; } }, voiceCommandState: { userId: "speaker-1", domain: "tool", intent: "tool_followup", startedAt: Date.now(), expiresAt: Date.now() + 10_000 }, pendingResponse: { requestId: 8, requestedAt: Date.now(), source: "tool_call_followup", handlingSilence: false, audioReceivedAt: 0, interruptionPolicy: { assertive: true, scope: "speaker", allowedUserId: "speaker-1" }, utteranceText: "which one did you want?", latencyContext: null, userId: "speaker-1", retryCount: 0, hardRecoveryAttempted: false } }); const asrState = seedReadyPerUserAsr(manager, session, "speaker-1");

manager.captureManager.startInboundCapture({ session, userId: "speaker-1", settings: session.settingsSnapshot }); asrState.speechDetectedUtteranceId = asrState.utterance.id; asrState.speechDetectedAt = Date.now();

const strongPcm = makeMonoPcm16(Math.ceil((24_000 * (VOICE_TURN_PROMOTION_MIN_CLIP_MS + 40)) / 1000), 3000); voxClient.emit("userAudio", "speaker-1", strongPcm); await flushMicrotasks();

const capture = session.userCaptures.get("speaker-1"); assert.ok(capture); assert.equal(capture.promotionReason, "server_vad_confirmed"); assert.ok(session.pendingResponse); assert.equal(cancelCalls.length, 0); assert.equal(manager.ensureVoiceCommandState(session)?.intent, "tool_followup");

const decision = await manager.evaluateVoiceReplyDecision({ session, userId: "speaker-1", settings: session.settingsSnapshot, transcript: "yeah do that" });

assert.equal(decision.allow, false); assert.equal( logs.some((entry) => entry?.content === "voice_preplay_reply_superseded_for_user_speech"), false ); });