Compare commits

..

1 Commits

Author SHA1 Message Date
snyk-bot
5c502371b3 fix: package.json & package-lock.json to reduce vulnerabilities
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-JS-UNDICI-8641354
2025-01-22 08:48:33 +00:00
16 changed files with 9145 additions and 1468 deletions

View File

@@ -100,7 +100,6 @@ router.post('/',
...(req.body?.application_sid && {'X-Application-Sid': req.body.application_sid}),
...(restDial.fromHost && {'X-Preferred-From-Host': restDial.fromHost}),
...(record_all_calls && {'X-Record-All-Calls': recordOutputFormat}),
...(target.proxy && {'X-SIP-Proxy': target.proxy}),
...target.headers
};

View File

@@ -1084,12 +1084,6 @@ class CallSession extends Emitter {
api_key: credential.api_key
};
}
else if ('voxist' === vendor) {
return {
speech_credential_sid: credential.speech_credential_sid,
api_key: credential.api_key
};
}
else if ('whisper' === vendor) {
return {
api_key: credential.api_key,
@@ -1799,66 +1793,62 @@ Duration=${duration} `
async updateCall(opts, callSid) {
this.logger.debug(opts, 'CallSession:updateCall');
try {
if (opts.call_status) {
return this._lccCallStatus(opts);
}
if (opts.call_hook || opts.child_call_hook) {
return await this._lccCallHook(opts);
}
if (opts.listen_status || opts.stream_status) {
await this._lccListenStatus(opts);
}
if (opts.transcribe_status) {
await this._lccTranscribeStatus(opts);
}
else if (opts.mute_status) {
await this._lccMuteStatus(opts.mute_status === 'mute', callSid);
}
else if (opts.conf_hold_status) {
await this._lccConfHoldStatus(opts);
}
else if (opts.conf_mute_status) {
await this._lccConfMuteStatus(opts);
}
else if (opts.sip_request) {
const res = await this._lccSipRequest(opts, callSid);
return {status: res.status, reason: res.reason};
} else if (opts.dtmf) {
await this._lccDtmf(opts, callSid);
}
else if (opts.record) {
await this.notifyRecordOptions(opts.record);
}
else if (opts.tag) {
return this._lccTag(opts);
}
else if (opts.conferenceParticipantAction) {
return this._lccConferenceParticipantAction(opts.conferenceParticipantAction);
}
else if (opts.dub) {
return this._lccDub(opts.dub, callSid);
}
else if (opts.boostAudioSignal) {
return this._lccBoostAudioSignal(opts, callSid);
}
else if (opts.media_path) {
return this._lccMediaPath(opts.media_path, callSid);
}
else if (opts.llm_tool_output) {
return this._lccToolOutput(opts.tool_call_id, opts.llm_tool_output, callSid);
}
else if (opts.llm_update) {
return this._lccLlmUpdate(opts.llm_update, callSid);
}
if (opts.call_status) {
return this._lccCallStatus(opts);
}
if (opts.call_hook || opts.child_call_hook) {
return await this._lccCallHook(opts);
}
if (opts.listen_status || opts.stream_status) {
await this._lccListenStatus(opts);
}
if (opts.transcribe_status) {
await this._lccTranscribeStatus(opts);
}
else if (opts.mute_status) {
await this._lccMuteStatus(opts.mute_status === 'mute', callSid);
}
else if (opts.conf_hold_status) {
await this._lccConfHoldStatus(opts);
}
else if (opts.conf_mute_status) {
await this._lccConfMuteStatus(opts);
}
else if (opts.sip_request) {
const res = await this._lccSipRequest(opts, callSid);
return {status: res.status, reason: res.reason};
} else if (opts.dtmf) {
await this._lccDtmf(opts, callSid);
}
else if (opts.record) {
await this.notifyRecordOptions(opts.record);
}
else if (opts.tag) {
return this._lccTag(opts);
}
else if (opts.conferenceParticipantAction) {
return this._lccConferenceParticipantAction(opts.conferenceParticipantAction);
}
else if (opts.dub) {
return this._lccDub(opts.dub, callSid);
}
else if (opts.boostAudioSignal) {
return this._lccBoostAudioSignal(opts, callSid);
}
else if (opts.media_path) {
return this._lccMediaPath(opts.media_path, callSid);
}
else if (opts.llm_tool_output) {
return this._lccToolOutput(opts.tool_call_id, opts.llm_tool_output, callSid);
}
else if (opts.llm_update) {
return this._lccLlmUpdate(opts.llm_update, callSid);
}
// whisper may be the only thing we are asked to do, or it may that
// we are doing a whisper after having muted, paused recording etc..
if (opts.whisper) {
return this._lccWhisper(opts, callSid);
}
} catch (err) {
this.logger.info({err, opts, callSid}, 'CallSession:updateCall - error updating call');
// whisper may be the only thing we are asked to do, or it may that
// we are doing a whisper after having muted, paused recording etc..
if (opts.whisper) {
return this._lccWhisper(opts, callSid);
}
}
@@ -2494,7 +2484,7 @@ Duration=${duration} `
res.send(200, {body: this.ep.local.sdp});
}
else {
if (this.currentTask && this.currentTask.name === TaskName.Dial && this.currentTask.isOnHoldEnabled) {
if (this.currentTask.name === TaskName.Dial && this.currentTask.isOnHoldEnabled) {
this.logger.info('onholdMusic reINVITE after media has been released');
await this.currentTask.handleReinviteAfterMediaReleased(req, res);
} else {

View File

@@ -187,20 +187,18 @@ class TaskConfig extends Task {
: cs.speechRecognizerVendor;
cs.speechRecognizerLabel = this.recognizer.label === 'default'
? cs.speechRecognizerLabel : this.recognizer.label;
cs.speechRecognizerLanguage = this.recognizer.language !== undefined && this.recognizer.language !== 'default'
cs.speechRecognizerLanguage = this.recognizer.language !== 'default'
? this.recognizer.language
: cs.speechRecognizerLanguage;
//fallback
cs.fallbackSpeechRecognizerVendor = this.recognizer.fallbackVendor !== undefined &&
this.recognizer.fallbackVendor !== 'default'
cs.fallbackSpeechRecognizerVendor = this.recognizer.fallbackVendor !== 'default'
? this.recognizer.fallbackVendor
: cs.fallbackSpeechRecognizerVendor;
cs.fallbackSpeechRecognizerLabel = this.recognizer.fallbackLabel === 'default' ?
cs.fallbackSpeechRecognizerLabel :
this.recognizer.fallbackLabel;
cs.fallbackSpeechRecognizerLanguage = this.recognizer.fallbackLanguage !== undefined &&
this.recognizer.fallbackLanguage !== 'default'
cs.fallbackSpeechRecognizerLanguage = this.recognizer.fallbackLanguage !== 'default'
? this.recognizer.fallbackLanguage
: cs.fallbackSpeechRecognizerLanguage;

View File

@@ -230,10 +230,10 @@ class TaskDial extends Task {
try {
await this.epOther.play(this.dialMusic);
} catch (err) {
this.logger.error(err, `TaskDial:exec error playing dialMusic ${this.dialMusic}`);
this.logger.error(err, `TaskDial:exec error playing ${this.dialMusic}`);
await sleepFor(1000);
}
} while (!this.killed && !this.bridged && this._mediaPath === MediaPath.FullMedia);
} while (!this.killed || !this.bridged);
})();
}
}

View File

@@ -11,7 +11,6 @@ const {
NvidiaTranscriptionEvents,
JambonzTranscriptionEvents,
AssemblyAiTranscriptionEvents,
VoxistTranscriptionEvents,
VadDetection,
VerbioTranscriptionEvents,
SpeechmaticsTranscriptionEvents
@@ -525,17 +524,6 @@ class TaskGather extends SttTask {
this._onVendorConnectFailure.bind(this, cs, ep));
break;
case 'voxist':
this.bugname = `${this.bugname_prefix}voxist_transcribe`;
this.addCustomEventListener(ep, VoxistTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep));
this.addCustomEventListener(
ep, VoxistTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
this.addCustomEventListener(ep, VoxistTranscriptionEvents.Error, this._onVendorError.bind(this, cs, ep));
this.addCustomEventListener(ep, VoxistTranscriptionEvents.ConnectFailure,
this._onVendorConnectFailure.bind(this, cs, ep));
break;
case 'speechmatics':
this.bugname = `${this.bugname_prefix}speechmatics_transcribe`;
this.addCustomEventListener(
@@ -636,13 +624,6 @@ class TaskGather extends SttTask {
this._asrTimer = setTimeout(() => {
this.logger.debug('_startAsrTimer - asr timer went off');
const evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language, this.vendor);
/* special case for speechmatics - keep listening if we dont have any transcripts */
if (this.vendor === 'speechmatics' && this._bufferedTranscripts.length === 0) {
this.logger.debug('Gather:_startAsrTimer - speechmatics, no transcripts yet, keep listening');
this._startAsrTimer();
return;
}
this._resolve(this._bufferedTranscripts.length > 0 ? 'speech' : 'timeout', evt);
}, this.asrTimeout);
this.logger.debug(`_startAsrTimer: set for ${this.asrTimeout}ms`);
@@ -785,16 +766,10 @@ class TaskGather extends SttTask {
this.logger.debug('Gather:_onTranscription - got UtteranceEnd event from deepgram but no buffered transcripts');
}
else {
const utteranceTime = evt.last_word_end;
if (utteranceTime && this._dgTimeOfLastUnprocessedWord && utteranceTime < this._dgTimeOfLastUnprocessedWord) {
this.logger.debug('Gather:_onTranscription - got UtteranceEnd with unprocessed words, continue listening');
}
else {
this.logger.debug('Gather:_onTranscription - got UtteranceEnd from deepgram, return buffered transcript');
evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language, this.vendor);
this._bufferedTranscripts = [];
this._resolve('speech', evt);
}
this.logger.debug('Gather:_onTranscription - got UtteranceEnd event from deepgram, return buffered transcript');
evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language, this.vendor);
this._bufferedTranscripts = [];
this._resolve('speech', evt);
}
return;
}
@@ -805,7 +780,7 @@ class TaskGather extends SttTask {
evt = this.normalizeTranscription(evt, this.vendor, 1, this.language,
this.shortUtterance, this.data.recognizer.punctuation);
//this.logger.debug({evt, bugname, finished, vendor: this.vendor}, 'Gather:_onTranscription normalized transcript');
this.logger.debug({evt, bugname, finished, vendor: this.vendor}, 'Gather:_onTranscription normalized transcript');
if (evt.alternatives.length === 0) {
this.logger.info({evt}, 'TaskGather:_onTranscription - got empty transcript, continue listening');
@@ -813,6 +788,8 @@ class TaskGather extends SttTask {
}
const confidence = evt.alternatives[0].confidence;
const minConfidence = this.data.recognizer?.minConfidence;
this.logger.debug({evt},
`TaskGather:_onTranscription - confidence (${confidence}), minConfidence (${minConfidence})`);
if (confidence && minConfidence && confidence < minConfidence) {
this.logger.info({evt},
'TaskGather:_onTranscription - Transcript confidence ' +
@@ -928,21 +905,8 @@ class TaskGather extends SttTask {
if (originalEvent.is_final && evt.alternatives[0].transcript !== '') {
this.logger.debug({evt}, 'Gather:_onTranscription - buffering a completed (partial) deepgram transcript');
this._bufferedTranscripts.push(evt);
this._dgTimeOfLastUnprocessedWord = null;
}
if (evt.alternatives[0].transcript === '') {
emptyTranscript = true;
}
else if (!originalEvent.is_final) {
/* Deepgram: we have unprocessed words-save last word end time so we can later compare to UtteranceEnd */
const words = originalEvent.channel.alternatives[0].words;
if (words?.length > 0) {
this._dgTimeOfLastUnprocessedWord = words.slice(-1)[0].end;
this.logger.debug(
`TaskGather:_onTranscription - saving word end time: ${this._dgTimeOfLastUnprocessedWord}`);
}
}
if (evt.alternatives[0].transcript === '') emptyTranscript = true;
}
if (!emptyTranscript) {
@@ -1212,7 +1176,7 @@ class TaskGather extends SttTask {
if (this.parentTask) this.parentTask.emit('stt-low-confidence', evt);
else {
this.emit('stt-low-confidence', evt);
returnedVerbs = await this.performAction({speech:evt, reason: 'stt-low-confidence'});
returnedVerbs = await this.performAction({reason: 'stt-low-confidence'});
}
}
} catch (err) { /*already logged error*/ }

View File

@@ -3,7 +3,6 @@ const {TaskPreconditions} = require('../../utils/constants');
const TaskLlmOpenAI_S2S = require('./llms/openai_s2s');
const TaskLlmVoiceAgent_S2S = require('./llms/voice_agent_s2s');
const TaskLlmUltravox_S2S = require('./llms/ultravox_s2s');
const TaskLlmElevenlabs_S2S = require('./llms/elevenlabs_s2s');
class TaskLlm extends Task {
constructor(logger, opts) {
@@ -55,10 +54,6 @@ class TaskLlm extends Task {
llm = new TaskLlmUltravox_S2S(this.logger, this.data, this);
break;
case 'elevenlabs':
llm = new TaskLlmElevenlabs_S2S(this.logger, this.data, this);
break;
default:
throw new Error(`Unsupported vendor ${this.vendor} for LLM`);
}

View File

@@ -1,302 +0,0 @@
const Task = require('../../task');
const TaskName = 'Llm_Elevenlabs_s2s';
const {LlmEvents_Elevenlabs} = require('../../../utils/constants');
const {request} = require('undici');
const ClientEvent = 'client.event';
const SessionDelete = 'session.delete';
const elevenlabs_server_events = [
'conversation_initiation_metadata',
'user_transcript',
'agent_response',
'client_tool_call'
];
const expandWildcards = (events) => {
const expandedEvents = [];
events.forEach((evt) => {
if (evt.endsWith('.*')) {
const prefix = evt.slice(0, -2); // Remove the wildcard ".*"
const matchingEvents = elevenlabs_server_events.filter((e) => e.startsWith(prefix));
expandedEvents.push(...matchingEvents);
} else {
expandedEvents.push(evt);
}
});
return expandedEvents;
};
class TaskLlmElevenlabs_S2S extends Task {
constructor(logger, opts, parentTask) {
super(logger, opts, parentTask);
this.parent = parentTask;
this.vendor = this.parent.vendor;
this.auth = this.parent.auth;
const {agent_id, api_key} = this.auth || {};
if (!agent_id) throw new Error('auth.agent_id is required for Elevenlabs S2S');
this.agent_id = agent_id;
this.api_key = api_key;
this.actionHook = this.data.actionHook;
this.eventHook = this.data.eventHook;
this.toolHook = this.data.toolHook;
const {
conversation_initiation_client_data,
input_sample_rate = 16000,
output_sample_rate = 16000
} = this.data.llmOptions;
this.conversation_initiation_client_data = conversation_initiation_client_data;
this.input_sample_rate = input_sample_rate;
this.output_sample_rate = output_sample_rate;
this.results = {
completionReason: 'normal conversation end'
};
/**
* only one of these will have items,
* if includeEvents, then these are the events to include
* if excludeEvents, then these are the events to exclude
*/
this.includeEvents = [];
this.excludeEvents = [];
/* default to all events if user did not specify */
this._populateEvents(this.data.events || elevenlabs_server_events);
this.addCustomEventListener = parentTask.addCustomEventListener.bind(parentTask);
this.removeCustomEventListeners = parentTask.removeCustomEventListeners.bind(parentTask);
}
get name() { return TaskName; }
async getSignedUrl() {
if (!this.api_key) {
return {
host: 'api.elevenlabs.io',
path: `/v1/convai/conversation?agent_id=${this.agent_id}`,
};
}
const {statusCode, body} = await request(
`https://api.elevenlabs.io/v1/convai/conversation/get_signed_url?agent_id=${this.agent_id}`, {
method: 'GET',
headers: {
'xi-api-key': this.api_key
},
}
);
const data = await body.json();
if (statusCode !== 200 || !data?.signed_url) {
this.logger.error({statusCode, data}, 'Elevenlabs Error registering call');
throw new Error(`Elevenlabs Error registering call: ${data.message}`);
}
const url = new URL(data.signed_url);
return {
host: url.hostname,
path: url.pathname + url.search,
};
}
async _api(ep, args) {
const res = await ep.api('uuid_elevenlabs_s2s', `^^|${args.join('|')}`);
if (!res.body?.startsWith('+OK')) {
throw new Error({args}, `Error calling uuid_elevenlabs_s2s: ${res.body}`);
}
}
async exec(cs, {ep}) {
await super.exec(cs);
await this._startListening(cs, ep);
await this.awaitTaskDone();
/* note: the parent llm verb started the span, which is why this is necessary */
await this.parent.performAction(this.results);
this._unregisterHandlers();
}
async kill(cs) {
super.kill(cs);
this._api(cs.ep, [cs.ep.uuid, SessionDelete])
.catch((err) => this.logger.info({err}, 'TaskLlmElevenlabs_S2S:kill - error deleting session'));
this.notifyTaskDone();
}
/**
* Send function call output to the Elevenlabs server in the form of conversation.item.create
* per https://elevenlabs.io/docs/conversational-ai/api-reference/conversational-ai/websocket
*/
async processToolOutput(ep, tool_call_id, rawData) {
try {
const {data} = rawData;
this.logger.debug({tool_call_id, data}, 'TaskLlmElevenlabs_S2S:processToolOutput');
if (!data.type || data.type !== 'client_tool_result') {
this.logger.info({data},
'TaskLlmElevenlabs_S2S:processToolOutput - invalid tool output, must be client_tool_result');
}
else {
await this._api(ep, [ep.uuid, ClientEvent, JSON.stringify(data)]);
}
} catch (err) {
this.logger.info({err}, 'TaskLlmElevenlabs_S2S:processToolOutput');
}
}
/**
* Send a session.update to the Elevenlabs server
* Note: creating and deleting conversation items also supported as well as interrupting the assistant
*/
async processLlmUpdate(ep, data, _callSid) {
this.logger.debug({data, _callSid}, 'TaskLlmElevenlabs_S2S:processLlmUpdate, ignored');
}
async _startListening(cs, ep) {
this._registerHandlers(ep);
try {
const {host, path} = await this.getSignedUrl();
const args = [ep.uuid, 'session.create', this.input_sample_rate, this.output_sample_rate, host, path];
await this._api(ep, args);
} catch (err) {
this.logger.error({err}, 'TaskLlmElevenlabs_S2S:_startListening');
this.notifyTaskDone();
}
}
async _sendClientEvent(ep, obj) {
let ok = true;
this.logger.debug({obj}, 'TaskLlmElevenlabs_S2S:_sendClientEvent');
try {
const args = [ep.uuid, ClientEvent, JSON.stringify(obj)];
await this._api(ep, args);
} catch (err) {
ok = false;
this.logger.error({err}, 'TaskLlmElevenlabs_S2S:_sendClientEvent - Error');
}
return ok;
}
async _sendInitialMessage(ep) {
if (this.conversation_initiation_client_data) {
if (!await this._sendClientEvent(ep, {
type: 'conversation_initiation_client_data',
conversation_initiation_client_data: this.conversation_initiation_client_data
})) {
this.notifyTaskDone();
}
}
}
_registerHandlers(ep) {
this.addCustomEventListener(ep, LlmEvents_Elevenlabs.Connect, this._onConnect.bind(this, ep));
this.addCustomEventListener(ep, LlmEvents_Elevenlabs.ConnectFailure, this._onConnectFailure.bind(this, ep));
this.addCustomEventListener(ep, LlmEvents_Elevenlabs.Disconnect, this._onDisconnect.bind(this, ep));
this.addCustomEventListener(ep, LlmEvents_Elevenlabs.ServerEvent, this._onServerEvent.bind(this, ep));
}
_unregisterHandlers() {
this.removeCustomEventListeners();
}
_onError(ep, evt) {
this.logger.info({evt}, 'TaskLlmElevenlabs_S2S:_onError');
this.notifyTaskDone();
}
_onConnect(ep) {
this.logger.debug('TaskLlmElevenlabs_S2S:_onConnect');
this._sendInitialMessage(ep);
}
_onConnectFailure(_ep, evt) {
this.logger.info(evt, 'TaskLlmElevenlabs_S2S:_onConnectFailure');
this.results = {completionReason: 'connection failure'};
this.notifyTaskDone();
}
_onDisconnect(_ep, evt) {
this.logger.info(evt, 'TaskLlmElevenlabs_S2S:_onConnectFailure');
this.results = {completionReason: 'disconnect from remote end'};
this.notifyTaskDone();
}
async _onServerEvent(ep, evt) {
let endConversation = false;
const type = evt.type;
this.logger.info({evt}, 'TaskLlmElevenlabs_S2S:_onServerEvent');
if (type === 'error') {
endConversation = true;
this.results = {
completionReason: 'server error',
error: evt.error
};
}
/* tool calls */
else if (type === 'client_tool_call') {
this.logger.debug({evt}, 'TaskLlmElevenlabs_S2S:_onServerEvent - function_call');
if (!this.toolHook) {
this.logger.warn({evt}, 'TaskLlmElevenlabs_S2S:_onServerEvent - no toolHook defined!');
}
else {
const {client_tool_call} = evt;
const {tool_name: name, tool_call_id: call_id, parameters: args} = client_tool_call;
try {
await this.parent.sendToolHook(call_id, {name, args});
} catch (err) {
this.logger.info({err, evt}, 'TaskLlmElevenlabs_S2S - error calling function');
this.results = {
completionReason: 'client error calling function',
error: err
};
endConversation = true;
}
}
}
/* check whether we should notify on this event */
if (this.includeEvents.length > 0 ? this.includeEvents.includes(type) : !this.excludeEvents.includes(type)) {
this.parent.sendEventHook(evt)
.catch((err) => this.logger.info({err},
'TaskLlmElevenlabs_S2S:_onServerEvent - error sending event hook'));
}
if (endConversation) {
this.logger.info({results: this.results},
'TaskLlmElevenlabs_S2S:_onServerEvent - ending conversation due to error');
this.notifyTaskDone();
}
}
_populateEvents(events) {
if (events.includes('all')) {
/* work by excluding specific events */
const exclude = events
.filter((evt) => evt.startsWith('-'))
.map((evt) => evt.slice(1));
if (exclude.length === 0) this.includeEvents = elevenlabs_server_events;
else this.excludeEvents = expandWildcards(exclude);
}
else {
/* work by including specific events */
const include = events
.filter((evt) => !evt.startsWith('-'));
this.includeEvents = expandWildcards(include);
}
this.logger.debug({
includeEvents: this.includeEvents,
excludeEvents: this.excludeEvents
}, 'TaskLlmElevenlabs_S2S:_populateEvents');
}
}
module.exports = TaskLlmElevenlabs_S2S;

View File

@@ -213,7 +213,7 @@ class TaskSay extends TtsTask {
ep.once('playback-start', (evt) => {
this.logger.debug({evt}, 'Say got playback-start');
if (this.otelSpan) {
this._addStreamingTtsAttributes(this.otelSpan, evt, vendor);
this._addStreamingTtsAttributes(this.otelSpan, evt);
this.otelSpan.end();
this.otelSpan = null;
if (evt.variable_tts_cache_filename) {
@@ -240,7 +240,6 @@ class TaskSay extends TtsTask {
language,
voice,
engine,
model: this.model || this.model_id,
text
}).catch((err) => this.logger.info({err}, 'Error adding file to cache'));
}
@@ -308,7 +307,7 @@ class TaskSay extends TtsTask {
this.notifyTaskDone();
}
_addStreamingTtsAttributes(span, evt, vendor) {
_addStreamingTtsAttributes(span, evt) {
const attrs = {'tts.cached': false};
for (const [key, value] of Object.entries(evt)) {
if (key.startsWith('variable_tts_')) {
@@ -322,9 +321,6 @@ class TaskSay extends TtsTask {
.replace('elevenlabs_', 'elevenlabs.');
if (spanMapping[newKey]) newKey = spanMapping[newKey];
attrs[newKey] = value;
if (key === 'variable_tts_time_to_first_byte_ms' && value) {
this.cs.srf.locals.stats.histogram('tts.response_time', value, [`vendor:${vendor}`]);
}
}
}
delete attrs['cache_filename']; //no value in adding this to the span

View File

@@ -13,7 +13,6 @@ const {
JambonzTranscriptionEvents,
TranscribeStatus,
AssemblyAiTranscriptionEvents,
VoxistTranscriptionEvents,
VerbioTranscriptionEvents,
SpeechmaticsTranscriptionEvents
} = require('../utils/constants.json');
@@ -301,17 +300,6 @@ class TaskTranscribe extends SttTask {
this._onVendorConnectFailure.bind(this, cs, ep, channel));
break;
case 'voxist':
this.bugname = `${this.bugname_prefix}voxist_transcribe`;
this.addCustomEventListener(ep, VoxistTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
this.addCustomEventListener(ep,
VoxistTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
this.addCustomEventListener(ep, VoxistTranscriptionEvents.Error, this._onVendorError.bind(this, cs, ep));
this.addCustomEventListener(ep, VoxistTranscriptionEvents.ConnectFailure,
this._onVendorConnectFailure.bind(this, cs, ep, channel));
break;
case 'speechmatics':
this.bugname = `${this.bugname_prefix}speechmatics_transcribe`;
this.addCustomEventListener(

View File

@@ -143,16 +143,16 @@ class TtsTask extends Task {
`No text-to-speech service credentials for ${vendor} with labels: ${label} have been configured`);
}
/* parse Nuance voices into name and model */
let model;
if (vendor === 'nuance' && voice) {
const arr = /([A-Za-z-]*)\s+-\s+(enhanced|standard)/.exec(voice);
if (arr) {
voice = arr[1];
this.model = arr[2];
model = arr[2];
}
} else if (vendor === 'deepgram') {
this.model = voice;
model = voice;
}
this.model_id = credentials.model_id;
/* allow for microsoft custom region voice and api_key to be specified as an override */
if (vendor === 'microsoft' && this.options.deploymentId) {
@@ -215,8 +215,7 @@ class TtsTask extends Task {
// If vendor is changed from the previous one, then reset the cache_speech_handles flag
//cs.currentTtsVendor = vendor;
if (!preCache && !this._disableTracing)
this.logger.info({vendor, language, voice, model: this.model}, 'TaskSay:exec');
if (!preCache && !this._disableTracing) this.logger.info({vendor, language, voice, model}, 'TaskSay:exec');
try {
if (!credentials) {
writeAlerts({
@@ -251,7 +250,7 @@ class TtsTask extends Task {
language,
voice,
engine,
model: this.model,
model,
salt,
credentials,
options: this.options,

View File

@@ -149,12 +149,6 @@
"ConnectFailure": "assemblyai_transcribe::connect_failed",
"Connect": "assemblyai_transcribe::connect"
},
"VoxistTranscriptionEvents": {
"Transcription": "voxist_transcribe::transcription",
"Error": "voxist_transcribe::error",
"ConnectFailure": "voxist_transcribe::connect_failed",
"Connect": "voxist_transcribe::connect"
},
"VadDetection": {
"Detection": "vad_detect:detection"
},
@@ -182,13 +176,6 @@
"Disconnect": "openai_s2s::disconnect",
"ServerEvent": "openai_s2s::server_event"
},
"LlmEvents_Elevenlabs": {
"Error": "error",
"Connect": "elevenlabs_s2s::connect",
"ConnectFailure": "elevenlabs_s2s::connect_failed",
"Disconnect": "elevenlabs_s2s::disconnect",
"ServerEvent": "elevenlabs_s2s::server_event"
},
"LlmEvents_VoiceAgent": {
"Error": "error",
"Connect": "voice_agent_s2s::connect",

View File

@@ -122,10 +122,6 @@ const speechMapper = (cred) => {
const o = JSON.parse(decrypt(credential));
obj.api_key = o.api_key;
}
else if ('voxist' === obj.vendor) {
const o = JSON.parse(decrypt(credential));
obj.api_key = o.api_key;
}
else if ('whisper' === obj.vendor) {
const o = JSON.parse(decrypt(credential));
obj.api_key = o.api_key;

View File

@@ -30,7 +30,6 @@ const stickyVars = {
'DEEPGRAM_SPEECH_TIER',
'DEEPGRAM_SPEECH_MODEL',
'DEEPGRAM_SPEECH_ENABLE_SMART_FORMAT',
'DEEPGRAM_SPEECH_ENABLE_NO_DELAY',
'DEEPGRAM_SPEECH_ENABLE_AUTOMATIC_PUNCTUATION',
'DEEPGRAM_SPEECH_PROFANITY_FILTER',
'DEEPGRAM_SPEECH_REDACT',
@@ -45,8 +44,7 @@ const stickyVars = {
'DEEPGRAM_SPEECH_VAD_TURNOFF',
'DEEPGRAM_SPEECH_TAG',
'DEEPGRAM_SPEECH_MODEL_VERSION',
'DEEPGRAM_SPEECH_FILLER_WORDS',
'DEEPGRAM_SPEECH_KEYTERMS',
'DEEPGRAM_SPEECH_FILLER_WORDS'
],
aws: [
'AWS_VOCABULARY_NAME',
@@ -107,9 +105,6 @@ const stickyVars = {
'ASSEMBLYAI_API_KEY',
'ASSEMBLYAI_WORD_BOOST'
],
voxist: [
'VOXIST_API_KEY',
],
speechmatics: [
'SPEECHMATICS_API_KEY',
'SPEECHMATICS_HOST',
@@ -522,25 +517,6 @@ const normalizeAssemblyAi = (evt, channel, language) => {
};
};
const normalizeVoxist = (evt, channel, language) => {
const copy = JSON.parse(JSON.stringify(evt));
return {
language_code: language,
channel_tag: channel,
is_final: evt.type === 'final',
alternatives: [
{
confidence: 1.00,
transcript: evt.text,
}
],
vendor: {
name: 'voxist',
evt: copy
}
};
};
const normalizeSpeechmatics = (evt, channel, language) => {
const copy = JSON.parse(JSON.stringify(evt));
const is_final = evt.message === 'AddTranscript';
@@ -591,8 +567,6 @@ module.exports = (logger) => {
return normalizeCobalt(evt, channel, language);
case 'assemblyai':
return normalizeAssemblyAi(evt, channel, language, shortUtterance);
case 'voxist':
return normalizeVoxist(evt, channel, language);
case 'verbio':
return normalizeVerbio(evt, channel, language);
case 'speechmatics':
@@ -731,8 +705,6 @@ module.exports = (logger) => {
//azureSttEndpointId overrides sttCredentials.custom_stt_endpoint
...(rOpts.azureSttEndpointId &&
{AZURE_SERVICE_ENDPOINT_ID: rOpts.azureSttEndpointId}),
...(azureOptions.speechRecognitionMode &&
{AZURE_RECOGNITION_MODE: azureOptions.speechRecognitionMode}),
};
}
else if ('nuance' === vendor) {
@@ -806,8 +778,6 @@ module.exports = (logger) => {
{DEEPGRAM_SPEECH_ENABLE_AUTOMATIC_PUNCTUATION: 1},
...(deepgramOptions.smartFormatting) &&
{DEEPGRAM_SPEECH_ENABLE_SMART_FORMAT: 1},
...(deepgramOptions.noDelay) &&
{DEEPGRAM_SPEECH_ENABLE_NO_DELAY: 1},
...(deepgramOptions.profanityFilter) &&
{DEEPGRAM_SPEECH_PROFANITY_FILTER: 1},
...(deepgramOptions.redact) &&
@@ -845,9 +815,7 @@ module.exports = (logger) => {
...(deepgramOptions.version) &&
{DEEPGRAM_SPEECH_MODEL_VERSION: deepgramOptions.version},
...(deepgramOptions.fillerWords) &&
{DEEPGRAM_SPEECH_FILLER_WORDS: deepgramOptions.fillerWords},
...((Array.isArray(deepgramOptions.keyterms) && deepgramOptions.keyterms.length > 0) &&
{DEEPGRAM_SPEECH_KEYTERMS: deepgramOptions.keyterms.join(',')})
{DEEPGRAM_SPEECH_FILLER_WORDS: deepgramOptions.fillerWords}
};
}
else if ('soniox' === vendor) {
@@ -956,13 +924,6 @@ module.exports = (logger) => {
{ASSEMBLYAI_WORD_BOOST: JSON.stringify(rOpts.hints)})
};
}
else if ('voxist' === vendor) {
opts = {
...opts,
...(sttCredentials.api_key) &&
{VOXIST_API_KEY: sttCredentials.api_key},
};
}
else if ('verbio' === vendor) {
const {verbioOptions = {}} = rOpts;
opts = {

View File

@@ -4,50 +4,37 @@ const {
TtsStreamingEvents,
TtsStreamingConnectionStatus
} = require('../utils/constants');
const MAX_CHUNK_SIZE = 1800;
const HIGH_WATER_BUFFER_SIZE = 1000;
const LOW_WATER_BUFFER_SIZE = 200;
const TIMEOUT_RETRY_MSECS = 3000;
const isWhitespace = (str) => /^\s*$/.test(str);
/**
* Each queue item is an object:
* - { type: 'text', value: '…' } for text tokens.
* - { type: 'flush' } for a flush command.
*/
class TtsStreamingBuffer extends Emitter {
constructor(cs) {
super();
this.cs = cs;
this.logger = cs.logger;
// Use an array to hold our structured items.
this.queue = [];
// Track total number of characters in text items.
this.bufferedLength = 0;
this.tokens = '';
this.eventHandlers = [];
this._isFull = false;
this._connectionStatus = TtsStreamingConnectionStatus.NotConnected;
this._flushPending = false;
this.timer = null;
// Record the last time the text buffer was updated.
this.lastUpdateTime = 0;
}
get isEmpty() {
return this.queue.length === 0;
}
get size() {
return this.bufferedLength;
return this.tokens.length === 0;
}
get isFull() {
return this._isFull;
}
get size() {
return this.tokens.length;
}
get ep() {
return this.cs?.ep;
}
@@ -55,8 +42,7 @@ class TtsStreamingBuffer extends Emitter {
async start() {
assert.ok(
this._connectionStatus === TtsStreamingConnectionStatus.NotConnected,
'TtsStreamingBuffer:start already started, or has failed'
);
'TtsStreamingBuffer:start already started, or has failed');
this.vendor = this.cs.getTsStreamingVendor();
if (!this.vendor) {
@@ -69,9 +55,9 @@ class TtsStreamingBuffer extends Emitter {
this._connectionStatus = TtsStreamingConnectionStatus.Connecting;
try {
if (this.eventHandlers.length === 0) this._initHandlers(this.ep);
await this._api(this.ep, [this.ep.uuid, 'connect']);
await this._api(this.ep, [this.ep.uuid, 'connect']);
} catch (err) {
this.logger.info({ err }, 'TtsStreamingBuffer:start Error connecting to TTS streaming');
this.logger.info({err}, 'TtsStreamingBuffer:start Error connecting to TTS streaming');
this._connectionStatus = TtsStreamingConnectionStatus.Failed;
}
}
@@ -81,319 +67,204 @@ class TtsStreamingBuffer extends Emitter {
this.removeCustomEventListeners();
if (this.ep) {
this._api(this.ep, [this.ep.uuid, 'close'])
.catch((err) =>
this.logger.info({ err }, 'TtsStreamingBuffer:stop Error closing TTS streaming')
);
.catch((err) => this.logger.info({err}, 'TtsStreamingBuffer:kill Error closing TTS streaming'));
}
this.timer = null;
this.queue = [];
this.bufferedLength = 0;
this.tokens = '';
this._connectionStatus = TtsStreamingConnectionStatus.NotConnected;
}
/**
* Buffer new text tokens.
* Add tokens to the buffer and start feeding them to the endpoint if necessary.
*/
async bufferTokens(tokens) {
if (this._connectionStatus === TtsStreamingConnectionStatus.Failed) {
this.logger.info('TtsStreamingBuffer:bufferTokens TTS streaming connection failed, rejecting request');
return { status: 'failed', reason: `connection to ${this.vendor} failed` };
}
if (0 === this.bufferedLength && isWhitespace(tokens)) {
this.logger.debug({tokens}, 'TtsStreamingBuffer:bufferTokens discarded whitespace tokens');
return { status: 'ok' };
return {status: 'failed', reason: `connection to ${this.vendor} failed`};
}
const displayedTokens = tokens.length <= 40 ? tokens : tokens.substring(0, 40);
const totalLength = tokens.length;
if (this.bufferedLength + totalLength > HIGH_WATER_BUFFER_SIZE) {
/* if we crossed the high water mark, reject the request */
if (this.tokens.length + totalLength > HIGH_WATER_BUFFER_SIZE) {
this.logger.info(
`TtsStreamingBuffer throttling: buffer is full, rejecting request to buffer ${totalLength} tokens`
);
`TtsStreamingBuffer throttling: buffer is full, rejecting request to buffer ${totalLength} tokens`);
if (!this._isFull) {
this._isFull = true;
this.emit(TtsStreamingEvents.Pause);
}
return { status: 'failed', reason: 'full' };
return {status: 'failed', reason: 'full'};
}
this.logger.debug(
`TtsStreamingBuffer:bufferTokens "${displayedTokens}" (length: ${totalLength})`
`TtsStreamingBuffer:bufferTokens "${displayedTokens}" (length: ${totalLength}), starting? ${this.isEmpty}`
);
this.queue.push({ type: 'text', value: tokens });
this.bufferedLength += totalLength;
// Update the last update time each time new text is buffered.
this.lastUpdateTime = Date.now();
this.tokens += (tokens || '');
await this._feedQueue();
return { status: 'ok' };
await this._feedTokens();
return {status: 'ok'};
}
/**
* Insert a flush command. If no text is queued, flush immediately.
* Otherwise, append a flush marker so that all text preceding it will be sent
* (regardless of sentence boundaries) before the flush is issued.
*/
flush() {
this.logger.debug('TtsStreamingBuffer:flush');
if (this._connectionStatus === TtsStreamingConnectionStatus.Connecting) {
this.logger.debug('TtsStreamingBuffer:flush TTS stream is not quite ready - wait for connect');
if (this.queue.length === 0 || this.queue[this.queue.length - 1].type !== 'flush') {
this.queue.push({ type: 'flush' });
}
this._flushPending = true;
return;
}
else if (this._connectionStatus === TtsStreamingConnectionStatus.Connected) {
if (this.isEmpty) {
if (this.size === 0) {
this._doFlush();
}
else {
if (this.queue[this.queue.length - 1].type !== 'flush') {
this.queue.push({ type: 'flush' });
this.logger.debug('TtsStreamingBuffer:flush added flush marker to queue');
}
/* we have tokens queued, so flush after they have been sent */
this._pendingFlush = true;
}
}
else {
this.logger.debug(
`TtsStreamingBuffer:flush TTS stream is not connected, status: ${this._connectionStatus}`
);
}
}
clear() {
this.logger.debug('TtsStreamingBuffer:clear');
if (this._connectionStatus !== TtsStreamingConnectionStatus.Connected) return;
clearTimeout(this.timer);
this._api(this.ep, [this.ep.uuid, 'clear']).catch((err) =>
this.logger.info({ err }, 'TtsStreamingBuffer:clear Error clearing TTS streaming')
);
this.queue = [];
this.bufferedLength = 0;
this._api(this.ep, [this.ep.uuid, 'clear'])
.catch((err) => this.logger.info({err}, 'TtsStreamingBuffer:clear Error clearing TTS streaming'));
this.tokens = '';
this.timer = null;
this._isFull = false;
}
/**
* Process the queue in two phases.
*
* Phase 1: Look for flush markers. When a flush marker is found (even if not at the very front),
* send all text tokens that came before it immediately (ignoring sentence boundaries)
* and then send the flush command. Repeat until there are no flush markers left.
*
* Phase 2: With the remaining queue (now containing only text items), accumulate text
* up to MAX_CHUNK_SIZE and use sentence-boundary logic to determine a chunk.
* Then, remove the exact tokens (or portions thereof) that were consumed.
* Send tokens to the TTS engine in sentence chunks for best playout
*/
async _feedQueue(handlingTimeout = false) {
this.logger.debug({ queue: this.queue }, 'TtsStreamingBuffer:_feedQueue');
async _feedTokens(handlingTimeout = false) {
this.logger.debug({tokens: this.tokens}, '_feedTokens');
try {
if (!this.cs.isTtsStreamOpen || !this.ep) {
this.logger.debug('TtsStreamingBuffer:_feedQueue TTS stream is not open or no endpoint available');
return;
/* are we in a state where we can feed tokens to the TTS? */
if (!this.cs.isTtsStreamOpen || !this.ep || !this.tokens) {
this.logger.debug('TTS stream is not open or no tokens to send');
return this.tokens?.length || 0;
}
if (
this._connectionStatus === TtsStreamingConnectionStatus.NotConnected ||
this._connectionStatus === TtsStreamingConnectionStatus.Failed
) {
this.logger.debug('TtsStreamingBuffer:_feedQueue TTS stream is not connected');
if (this._connectionStatus === TtsStreamingConnectionStatus.NotConnected ||
this._connectionStatus === TtsStreamingConnectionStatus.Failed) {
this.logger.debug('TtsStreamingBuffer:_feedTokens TTS stream is not connected');
return;
}
// --- Phase 1: Process flush markers ---
// Process any flush marker that isnt in the very first position.
let flushIndex = this.queue.findIndex((item, idx) => item.type === 'flush' && idx > 0);
while (flushIndex !== -1) {
let flushText = '';
// Accumulate all text tokens preceding the flush marker.
for (let i = 0; i < flushIndex; i++) {
if (this.queue[i].type === 'text') {
flushText += this.queue[i].value;
}
}
// Remove those text items.
for (let i = 0; i < flushIndex; i++) {
const item = this.queue.shift();
if (item.type === 'text') {
this.bufferedLength -= item.value.length;
}
}
// Remove the flush marker (now at the front).
if (this.queue.length > 0 && this.queue[0].type === 'flush') {
this.queue.shift();
}
// Immediately send all accumulated text (ignoring sentence boundaries).
if (flushText.length > 0) {
const modifiedFlushText = flushText.replace(/\n\n/g, '\n \n');
try {
await this._api(this.ep, [this.ep.uuid, 'send', modifiedFlushText]);
} catch (err) {
this.logger.info({ err, flushText }, 'TtsStreamingBuffer:_feedQueue Error sending TTS chunk');
}
}
// Send the flush command.
await this._doFlush();
flushIndex = this.queue.findIndex((item, idx) => item.type === 'flush' && idx > 0);
}
// If a flush marker is at the very front, process it.
while (this.queue.length > 0 && this.queue[0].type === 'flush') {
this.queue.shift();
await this._doFlush();
}
// --- Phase 2: Process remaining text tokens ---
if (this.queue.length === 0) {
this._removeTimer();
if (this._connectionStatus === TtsStreamingConnectionStatus.Connecting) {
this.logger.debug('TtsStreamingBuffer:_feedTokens TTS stream is not ready, waiting for connect');
return;
}
// Accumulate contiguous text tokens (from the front) up to MAX_CHUNK_SIZE.
let combinedText = '';
for (const item of this.queue) {
if (item.type !== 'text') break;
combinedText += item.value;
if (combinedText.length >= MAX_CHUNK_SIZE) break;
}
if (combinedText.length === 0) {
this._removeTimer();
return;
}
/* must send at least one sentence */
const limit = Math.min(MAX_CHUNK_SIZE, this.tokens.length);
let chunkEnd = findSentenceBoundary(this.tokens, limit);
const limit = Math.min(MAX_CHUNK_SIZE, combinedText.length);
let chunkEnd = findSentenceBoundary(combinedText, limit);
if (chunkEnd <= 0) {
if (handlingTimeout) {
chunkEnd = findWordBoundary(combinedText, limit);
/* on a timeout we've left some tokens sitting around, so be more aggressive now in sending them */
chunkEnd = findWordBoundary(this.tokens, limit);
if (chunkEnd <= 0) {
this.logger.debug('TtsStreamingBuffer:_feedTokens: no word boundary found');
this._setTimerIfNeeded();
return;
}
} else {
}
else {
/* if we just received tokens, we wont send unless we have at least a full sentence */
this.logger.debug('TtsStreamingBuffer:_feedTokens: no sentence boundary found');
this._setTimerIfNeeded();
return;
}
}
const chunk = combinedText.slice(0, chunkEnd);
// Now we iterate over the queue items
// and deduct their lengths until we've accounted for chunkEnd characters.
let remaining = chunkEnd;
let tokensProcessed = 0;
for (let i = 0; i < this.queue.length; i++) {
const token = this.queue[i];
if (token.type !== 'text') break;
if (remaining >= token.value.length) {
remaining -= token.value.length;
tokensProcessed = i + 1;
} else {
// Partially consumed token: update its value to remove the consumed part.
token.value = token.value.slice(remaining);
tokensProcessed = i;
remaining = 0;
break;
}
}
// Remove the fully consumed tokens from the front of the queue.
this.queue.splice(0, tokensProcessed);
this.bufferedLength -= chunkEnd;
const chunk = this.tokens.slice(0, chunkEnd);
this.tokens = this.tokens.slice(chunkEnd);
/* freeswitch looks for sequence of 2 newlines to determine end of message, so insert a space */
const modifiedChunk = chunk.replace(/\n\n/g, '\n \n');
this.logger.debug(`TtsStreamingBuffer:_feedQueue sending chunk to tts: ${modifiedChunk}`);
await this._api(this.ep, [this.ep.uuid, 'send', modifiedChunk]);
this.logger.debug(`TtsStreamingBuffer:_feedTokens: sent ${chunk.length}, remaining: ${this.tokens.length}`);
try {
await this._api(this.ep, [this.ep.uuid, 'send', modifiedChunk]);
} catch (err) {
this.logger.info({ err, chunk }, 'TtsStreamingBuffer:_feedQueue Error sending TTS chunk');
if (this._pendingFlush) {
this._doFlush();
this._pendingFlush = false;
}
if (this._isFull && this.bufferedLength <= LOW_WATER_BUFFER_SIZE) {
this.logger.info('TtsStreamingBuffer throttling: buffer is no longer full - resuming');
if (this.isFull && this.tokens.length <= LOW_WATER_BUFFER_SIZE) {
this.logger.info('TtsStreamingBuffer throttling: TTS streaming buffer is no longer full - resuming');
this._isFull = false;
this.emit(TtsStreamingEvents.Resume);
}
return this._feedQueue();
} catch (err) {
this.logger.info({ err }, 'TtsStreamingBuffer:_feedQueue Error sending TTS chunk');
this.queue = [];
this.bufferedLength = 0;
this.logger.info({err}, 'TtsStreamingBuffer:_feedTokens Error sending TTS chunk');
this.tokens = '';
}
return;
}
async _api(ep, args) {
const apiCmd = `uuid_${this.vendor.startsWith('custom:') ? 'custom' : this.vendor}_tts_streaming`;
const res = await ep.api(apiCmd, `^^|${args.join('|')}`);
if (!res.body?.startsWith('+OK')) {
this.logger.info({ args }, `Error calling ${apiCmd}: ${res.body}`);
this.logger.info({args}, `Error calling ${apiCmd}: ${res.body}`);
throw new Error(`Error calling ${apiCmd}: ${res.body}`);
}
}
_onConnectFailure(vendor) {
this.logger.info(`streaming tts connection failed to ${vendor}`);
this._connectionStatus = TtsStreamingConnectionStatus.Failed;
this.tokens = '';
this.emit(TtsStreamingEvents.ConnectFailure, {vendor});
}
_doFlush() {
return this._api(this.ep, [this.ep.uuid, 'flush'])
.then(() => this.logger.debug('TtsStreamingBuffer:_doFlush sent flush command'))
.catch((err) =>
this.logger.info(
{ err },
`TtsStreamingBuffer:_doFlush Error flushing TTS streaming: ${JSON.stringify(err)}`
)
);
this._api(this.ep, [this.ep.uuid, 'flush'])
.catch((err) => this.logger.info({err},
`TtsStreamingBuffer:_doFlush Error flushing TTS streaming: ${JSON.stringify(err)}`));
}
async _onConnect(vendor) {
this.logger.info(`TtsStreamingBuffer:_onConnect streaming tts connection made to ${vendor} successful`);
this.logger.info(`streaming tts connection made to ${vendor}`);
this._connectionStatus = TtsStreamingConnectionStatus.Connected;
if (this.queue.length > 0) {
await this._feedQueue();
if (this.tokens.length > 0) {
await this._feedTokens();
}
if (this._flushPending) {
this.flush();
this._flushPending = false;
}
}
_onConnectFailure(vendor) {
this.logger.info(`TtsStreamingBuffer:_onConnectFailure streaming tts connection failed to ${vendor}`);
this._connectionStatus = TtsStreamingConnectionStatus.Failed;
this.queue = [];
this.bufferedLength = 0;
this.emit(TtsStreamingEvents.ConnectFailure, { vendor });
}
_setTimerIfNeeded() {
if (this.bufferedLength > 0 && !this.timer) {
this.logger.debug({queue: this.queue},
`TtsStreamingBuffer:_setTimerIfNeeded setting timer because ${this.bufferedLength} buffered`);
if (this.tokens.length > 0 && !this.timer) {
this.timer = setTimeout(this._onTimeout.bind(this), TIMEOUT_RETRY_MSECS);
}
}
_removeTimer() {
if (this.timer) {
this.logger.debug('TtsStreamingBuffer:_removeTimer clearing timer');
clearTimeout(this.timer);
this.timer = null;
}
}
_onTimeout() {
this.logger.debug('TtsStreamingBuffer:_onTimeout Timeout waiting for sentence boundary');
// Check if new text has been added since the timer was set.
const now = Date.now();
if (now - this.lastUpdateTime < TIMEOUT_RETRY_MSECS) {
this.logger.debug('TtsStreamingBuffer:_onTimeout New text received recently; postponing flush.');
this._setTimerIfNeeded();
return;
}
this.logger.info('TtsStreamingBuffer:_onTimeout');
this.timer = null;
this._feedQueue(true);
this._feedTokens(true);
}
_onTtsEmpty(vendor) {
this.emit(TtsStreamingEvents.Empty, { vendor });
this.emit(TtsStreamingEvents.Empty, {vendor});
}
addCustomEventListener(ep, event, handler) {
this.eventHandlers.push({ ep, event, handler });
this.eventHandlers.push({ep, event, handler});
ep.addCustomEventListener(event, handler);
}
@@ -403,6 +274,7 @@ class TtsStreamingBuffer extends Emitter {
_initHandlers(ep) {
[
// DH: add other vendors here as modules are added
'deepgram',
'cartesia',
'elevenlabs',
@@ -421,21 +293,23 @@ class TtsStreamingBuffer extends Emitter {
}
const findSentenceBoundary = (text, limit) => {
// Look for punctuation or double newline that signals sentence end.
// Match traditional sentence boundaries or double newlines
const sentenceEndRegex = /[.!?](?=\s|$)|\n\n/g;
let lastSentenceBoundary = -1;
let match;
while ((match = sentenceEndRegex.exec(text)) && match.index < limit) {
const precedingText = text.slice(0, match.index).trim();
if (precedingText.length > 0) {
const precedingText = text.slice(0, match.index).trim(); // Extract text before the match and trim whitespace
if (precedingText.length > 0) { // Check if there's actual content
if (
match[0] === '\n\n' ||
(match.index === 0 || !/\d$/.test(text[match.index - 1]))
match[0] === '\n\n' || // It's a double newline
(match.index === 0 || !/\d$/.test(text[match.index - 1])) // Standard punctuation rules
) {
lastSentenceBoundary = match.index + (match[0] === '\n\n' ? 2 : 1);
lastSentenceBoundary = match.index + (match[0] === '\n\n' ? 2 : 1); // Include the boundary
}
}
}
return lastSentenceBoundary;
};
@@ -443,6 +317,7 @@ const findWordBoundary = (text, limit) => {
const wordBoundaryRegex = /\s+/g;
let lastWordBoundary = -1;
let match;
while ((match = wordBoundaryRegex.exec(text)) && match.index < limit) {
lastWordBoundary = match.index;
}

9681
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -31,9 +31,9 @@
"@jambonz/http-health-check": "^0.0.1",
"@jambonz/mw-registrar": "^0.2.7",
"@jambonz/realtimedb-helpers": "^0.8.8",
"@jambonz/speech-utils": "^0.2.3",
"@jambonz/speech-utils": "^0.2.1",
"@jambonz/stats-collector": "^0.1.10",
"@jambonz/verb-specifications": "^0.0.97",
"@jambonz/verb-specifications": "^0.0.94",
"@jambonz/time-series": "^0.2.13",
"@opentelemetry/api": "^1.8.0",
"@opentelemetry/exporter-jaeger": "^1.23.0",
@@ -60,7 +60,7 @@
"short-uuid": "^5.1.0",
"sinon": "^17.0.1",
"to-snake-case": "^1.0.0",
"undici": "^6.20.0",
"undici": "^6.21.1",
"uuid-random": "^1.3.2",
"verify-aws-sns-signature": "^0.1.0",
"ws": "^8.18.0",