feat: implement WebSocket server with VoiceAgent for real-time voice interaction

- Added a new WebSocket server implementation in `ws-server-2.ts` that utilizes the `VoiceAgent` for handling voice interactions.
- Integrated weather and time tools using the `ai` library for enhanced responses.
- Refactored existing `ws-server.ts` to streamline the connection handling and event logging.
- Enhanced `VoiceAgent` to support streaming speech generation with improved chunk handling and interruption capabilities.
- Introduced new event listeners for better logging and handling of speech-related events.
- Added graceful shutdown handling for the WebSocket server.
This commit is contained in:
Bijit Mondal
2026-02-13 17:16:12 +05:30
parent c1cd705d49
commit 6510232655
6 changed files with 1749 additions and 174 deletions

View File

@@ -1,140 +1,131 @@
import "dotenv/config";
import { WebSocketServer, WebSocket } from "ws";
import { readFile } from "fs/promises";
import { existsSync } from "fs";
import { WebSocketServer } from "ws";
import { VoiceAgent } from "../src";
import { tool } from "ai";
import { z } from "zod";
import { openai } from "@ai-sdk/openai";
const endpoint = process.env.VOICE_WS_ENDPOINT || "ws://localhost:8080";
const url = new URL(endpoint);
const port = Number(url.port || 8080);
const host = url.hostname || "localhost";
// Message types for type safety
interface BaseMessage {
type: string;
}
// ── Tools (same as demo.ts) ────────────────────────────────────────────
const weatherTool = tool({
description: "Get the weather in a location",
inputSchema: z.object({
location: z.string().describe("The location to get the weather for"),
}),
execute: async ({ location }) => ({
location,
temperature: 72 + Math.floor(Math.random() * 21) - 10,
conditions: ["sunny", "cloudy", "rainy", "partly cloudy"][
Math.floor(Math.random() * 4)
],
}),
});
interface TextDeltaMessage extends BaseMessage {
type: "text_delta";
text: string;
}
interface ToolCallMessage extends BaseMessage {
type: "tool_call";
toolName: string;
toolCallId: string;
input: unknown;
}
interface ToolResultMessage extends BaseMessage {
type: "tool_result";
toolName: string;
toolCallId: string;
result: unknown;
}
interface AudioMessage extends BaseMessage {
type: "audio";
data: string; // base64 encoded
format: string;
}
interface ResponseCompleteMessage extends BaseMessage {
type: "response_complete";
text: string;
toolCalls: Array<{ toolName: string; toolCallId: string; input: unknown }>;
toolResults: Array<{ toolName: string; toolCallId: string; output: unknown }>;
}
type AgentMessage =
| TextDeltaMessage
| ToolCallMessage
| ToolResultMessage
| AudioMessage
| ResponseCompleteMessage;
const timeTool = tool({
description: "Get the current time",
inputSchema: z.object({}),
execute: async () => ({
time: new Date().toLocaleTimeString(),
timezone: Intl.DateTimeFormat().resolvedOptions().timeZone,
}),
});
// ── WebSocket server ───────────────────────────────────────────────────
const wss = new WebSocketServer({ port, host });
wss.on("listening", () => {
console.log(`[ws-server] 🚀 listening on ${endpoint}`);
console.log(`[ws-server] listening on ${endpoint}`);
console.log("[ws-server] Waiting for connections...\n");
});
wss.on("connection", (socket: WebSocket) => {
wss.on("connection", (socket) => {
console.log("[ws-server] ✓ client connected");
let streamingText = "";
let audioChunks: Buffer[] = [];
// Send a sample transcript to test text pipeline end-to-end.
setTimeout(() => {
console.log("[ws-server] -> Sending test transcript...");
socket.send(
JSON.stringify({
type: "transcript",
text: "What is the weather in Berlin?",
}),
);
}, 500);
socket.on("message", async (data) => {
try {
const msg = JSON.parse(data.toString()) as AgentMessage;
switch (msg.type) {
case "text_delta":
// Real-time streaming text from the agent
streamingText += msg.text;
process.stdout.write(msg.text);
break;
case "tool_call":
console.log(`\n[ws-server] 🛠️ Tool call: ${msg.toolName}`);
console.log(` Input: ${JSON.stringify(msg.input)}`);
break;
case "tool_result":
console.log(`[ws-server] 🛠️ Tool result: ${msg.toolName}`);
console.log(` Result: ${JSON.stringify(msg.result)}`);
break;
case "audio":
// Handle audio response from TTS
const audioBuffer = Buffer.from(msg.data, "base64");
audioChunks.push(audioBuffer);
console.log(
`[ws-server] 🔊 Received audio: ${audioBuffer.length} bytes (${msg.format})`,
);
// Optionally save audio to file for testing
// await writeFile(`output_${Date.now()}.${msg.format}`, audioBuffer);
break;
case "response_complete":
console.log("\n[ws-server] ✅ Response complete");
console.log(` Text length: ${msg.text.length}`);
console.log(` Tool calls: ${msg.toolCalls.length}`);
console.log(` Tool results: ${msg.toolResults.length}`);
// Reset for next response
streamingText = "";
audioChunks = [];
break;
default:
console.log("[ws-server] <- Unknown message:", msg);
}
} catch {
console.log("[ws-server] <- raw", data.toString().substring(0, 100));
}
// Create a fresh VoiceAgent per connection
const agent = new VoiceAgent({
model: openai("gpt-4o"),
transcriptionModel: openai.transcription("whisper-1"),
speechModel: openai.speech("gpt-4o-mini-tts"),
instructions: `You are a helpful voice assistant.
Keep responses concise and conversational since they will be spoken aloud.
Use tools when needed to provide accurate information.`,
voice: "alloy",
speechInstructions: "Speak in a friendly, natural conversational tone.",
outputFormat: "mp3",
streamingSpeech: {
minChunkSize: 40,
maxChunkSize: 180,
parallelGeneration: true,
maxParallelRequests: 2,
},
tools: {
getWeather: weatherTool,
getTime: timeTool,
},
});
socket.on("close", () => {
// Wire agent events to server logs
agent.on("text", (msg: { role: string; text: string }) => {
const prefix = msg.role === "user" ? "👤 User" : "🤖 Assistant";
console.log(`[ws-server] ${prefix}: ${msg.text}`);
});
agent.on("chunk:text_delta", ({ text }: { text: string }) => {
process.stdout.write(text);
});
agent.on("chunk:tool_call", ({ toolName }: { toolName: string }) => {
console.log(`\n[ws-server] 🛠️ Tool call: ${toolName}`);
});
agent.on("tool_result", ({ name, result }: { name: string; result: unknown }) => {
console.log(`[ws-server] 🛠️ Tool result (${name}):`, JSON.stringify(result));
});
agent.on("speech_start", () => console.log("[ws-server] 🔊 Speech started"));
agent.on("speech_complete", () => console.log("[ws-server] 🔊 Speech complete"));
agent.on("speech_interrupted", ({ reason }: { reason: string }) =>
console.log(`[ws-server] ⏸️ Speech interrupted: ${reason}`),
);
agent.on("audio_chunk", ({ chunkId, format, uint8Array }: { chunkId: number; format: string; uint8Array: Uint8Array }) => {
console.log(`[ws-server] 🔊 Audio chunk #${chunkId}: ${uint8Array.length} bytes (${format})`);
});
agent.on("transcription", ({ text, language }: { text: string; language?: string }) => {
console.log(`[ws-server] 📝 Transcription (${language || "unknown"}): ${text}`);
});
agent.on("audio_received", ({ size }: { size: number }) => {
console.log(`[ws-server] 🎤 Audio received: ${(size / 1024).toFixed(1)} KB`);
});
agent.on("chunk:reasoning_delta", ({ text }: { text: string }) => {
process.stdout.write(text);
});
agent.on("warning", (msg: string) => {
console.log(`[ws-server] ⚠️ Warning: ${msg}`);
});
agent.on("speech_chunk_queued", ({ id, text }: { id: number; text: string }) => {
console.log(`[ws-server] 🔊 Queued speech chunk #${id}: ${text.substring(0, 50)}...`);
});
agent.on("error", (err: Error) => console.error("[ws-server] ❌ Error:", err.message));
agent.on("disconnected", () => {
console.log("[ws-server] ✗ client disconnected\n");
});
socket.on("error", (error) => {
console.error("[ws-server] Error:", error.message);
});
// Hand the accepted socket to the agent this is the key line.
// The agent will listen for "transcript", "audio", "interrupt" messages
// and send back "text_delta", "audio_chunk", "response_complete", etc.
agent.handleSocket(socket);
});
// Graceful shutdown
@@ -146,24 +137,4 @@ process.on("SIGINT", () => {
});
});
// Helper function to simulate sending audio to the agent
async function simulateAudioInput(socket: WebSocket, audioPath: string) {
if (!existsSync(audioPath)) {
console.log(`[ws-server] Audio file not found: ${audioPath}`);
return;
}
const audioBuffer = await readFile(audioPath);
const base64Audio = audioBuffer.toString("base64");
console.log(`[ws-server] -> Sending audio: ${audioPath} (${audioBuffer.length} bytes)`);
socket.send(
JSON.stringify({
type: "audio",
data: base64Audio,
}),
);
}
// Export for use as a module
export { wss, simulateAudioInput };
export { wss };