Compare commits

...

150 Commits

Author SHA1 Message Date
surajshivakumar
a6dcc9170b removed:duplicate line 2024-07-28 17:18:27 -04:00
surajshivakumar
34693b7e77 early media call termination log --removed faulty log msg 2024-07-26 18:16:08 -04:00
surajshivakumar
2439e225a0 early media call termination log 2024-07-26 18:07:14 -04:00
surajshivakumar
76c2be1d07 early media logging patch 2024-07-25 15:36:04 -04:00
rammohan-kore
4b4807e4cf Check the confidence levels of a transcript with minConfidence (#808)
* https://github.com/jambonz/jambonz-feature-server/issues/807

* feat/807: Using minConfidence from recognizer settings

* feat/807: new reason stt-min-confidence-error

* feat/807: sending stt-min-confidence instead of  stt-min-confidence-error

* feat/807: sending stt-low-confidence instead of  stt-min-confidence-error

* feat/807 - removed ? for this.data
2024-07-25 12:22:42 -04:00
Vinod Dharashive
9a3c731389 Update transcription-utils.js (#802) 2024-07-24 15:20:26 -04:00
Dave Horton
edd8f20642 fix for #826 race condition in say (#827)
* fix for #826 race condition in say

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip
2024-07-24 12:56:03 -04:00
Hoan Luu Huu
ee24041cba Allow joining conference as muted (#821)
* allow entering conference as muted

* allow entering conference as muted
2024-07-20 12:31:25 -04:00
Hoan Luu Huu
83f7abcd89 Kick member out conference (#820) 2024-07-20 12:11:36 -04:00
Hoan Luu Huu
c9194168d2 support restDial.referhook (#812)
* support restDial.referhook

* support restDial.referhook

* wip
2024-07-19 10:22:29 -04:00
Hoan Luu Huu
83191487cf fix config.transcribe should not override config.transcribe.recognizer (#817) 2024-07-19 07:26:49 -04:00
Hoan Luu Huu
65ef4e6d64 fix conference in feature server cluster join, leave, end events are … (#803)
* fix conference in feature server cluster join, leave, end events are missing original data

* wip
2024-07-12 08:36:43 -06:00
Hoan Luu Huu
ddb4719220 Merge pull request #806 from jambonz/feat/fd_269
support disable/enable listen DTMF in prompt
2024-07-11 20:09:40 +07:00
Quan HL
f514a65f63 support disable/enable listen DTMF in prompt 2024-07-10 08:37:02 -06:00
Hoan Luu Huu
5ccea65b7f stt/tts label can be empty, should not assign application level label… (#804)
* stt/tts label can be empty, should not assign application level label as default value

* wip
2024-07-10 08:36:00 -06:00
Dave Horton
8672152873 fix for #765 (#785) 2024-06-28 09:05:05 -04:00
Dave Horton
425b88f930 fix: package.json & package-lock.json to reduce vulnerabilities (#792)
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-JS-UNDICI-7361667

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
2024-06-28 09:04:25 -04:00
Dave Horton
111976bea5 bug: clear asr timer when gather resolves with timeout (#788) 2024-06-28 08:54:36 -04:00
Dave Horton
ec6d7b3f42 persistent connection for custom stt vendors in transcribe (#794) 2024-06-28 08:33:10 -04:00
Vinod Dharashive
5e1b826da4 Aws polly engine fix (#789)
* Aws polly engine fix  

engine parameter was  not able to change using synthesizer

* WIP

code correction and set default engine to Neural

* WIP

* WIP

Updated  tts-task.js

* WIP
2024-06-25 13:29:28 -04:00
Dave Horton
be9c3406c1 fix bug where play incorrectly plays again after response received (#786)
* fix bug where play incorrectly plays again after response received

* wip

* fix race condition where bot delay audio kcks off same instant we receive commands
2024-06-25 12:25:55 -04:00
Dave Horton
2f3ef1654a fix: package.json & package-lock.json to reduce vulnerabilities (#787)
The following vulnerabilities are fixed with an upgrade:
- https://snyk.io/vuln/SNYK-JS-WS-7266574

Co-authored-by: snyk-bot <snyk-bot@snyk.io>
2024-06-24 15:07:55 -04:00
Hoan Luu Huu
0baa080a1e update getAwsAuthToken use parameters in an object (#784)
* update getAwsAuthToken use parameters in an object

* wip

* update speech utils
2024-06-15 08:11:31 -04:00
Dave Horton
f5cbd26c9f update to speech-utils with support for JAMBONES_DISABLE_AZURE_TTS_STREAMING (#776) 2024-06-14 09:31:28 -04:00
Dave Horton
d9fd82fa60 major refactor and simplification of actionHookDelay feature (#771)
* major refactor and simplification of actionHookDelay feature

* wip for #765

* wip

* testing

* wip

* added validity checks for actionHookDelay properties

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* fix bug where config happens before endpoint is established

* wip

* hangup and clear ws connection if nogiveuptimer expires

* wip

* wip

* wip
2024-06-14 09:24:26 -04:00
Dave Horton
76a3aa7f42 send end of utterance events if using deepgram, interim events are enabled, and utterance_end_ms option is set (#772) (#782) 2024-06-13 13:18:32 -04:00
Hoan Luu Huu
cafe149bdf fix wrong vad notification to background bargin (#781)
* fix wrong vad notification to background bargin

* wip

* wip

* reset bargeinHandled every on reset
2024-06-12 10:52:53 -04:00
Anton Voylenko
9969e39e7e set valid terminatedBy for rest call (#779) 2024-06-08 17:39:40 -04:00
Hoan Luu Huu
8eea212df2 Fix/verbio stt (#770)
* fix verbio stt normalization

* wip
2024-06-01 07:38:36 -04:00
Hoan Luu Huu
e8e356ea3a update speech util version to fix verbio cache (#766) 2024-05-31 06:42:22 -04:00
Hoan Luu Huu
c5e19bf775 support verbio speech (#757)
* support verbio speech

* wip

* wip

* wip

* update speech utils

* update verb specification
2024-05-29 07:55:46 -04:00
Hoan Luu Huu
498dd64025 support mod_vad_detect (#762)
* support mod_vad_detect

* wip

* update verb spec and drachtio fsmrf

* Update example-voicemail-greetings.json (#761)

Update voicemail english greetings

* wip

* stopvad if playdone

---------

Co-authored-by: Vinod Dharashive <vdharashive@gmail.com>
2024-05-29 07:31:59 -04:00
Dave Horton
24b6d2464b update speech-utils and fsmrf (#764) 2024-05-28 18:24:51 -04:00
Dave Horton
cd5421120f fix race condition with filler noise and also play filler noise when idle and waiting for commands (#763) 2024-05-28 12:45:29 -04:00
Hoan Luu Huu
d7c3a4a632 support mod_custom_tts (#731) 2024-05-28 12:30:25 -04:00
Hoan Luu Huu
c53ad89154 support direct call to conference (#746)
* support direct call to conference

* wip

* wip

* wip
2024-05-28 10:30:52 -04:00
Vinod Dharashive
10b98630d3 Update example-voicemail-greetings.json (#761)
Update voicemail english greetings
2024-05-27 21:13:48 -04:00
Dave Horton
d132bdb92b fix gather race condition (#759) 2024-05-22 14:03:15 -04:00
Hoan Luu Huu
6be3fd9b64 say verb should not print speech credentials in log when tts stream API is used (#756) 2024-05-21 08:38:18 -04:00
Dave Horton
844b0cb05d log endpoint uuid for cross referencing with freeswitch logs 2024-05-20 11:04:15 -04:00
Dave Horton
c0b56d4fc6 per email from microsoft, do not restart STT connection when we get a no audio event (#754) 2024-05-17 11:19:01 -04:00
Dave Horton
d27de284e7 update to drachtio-srf@4.5.35 (#750) 2024-05-09 08:32:52 -04:00
Hoan Luu Huu
5e97847a2f fix fs keep looping forever if there is no fallback TTS (#749) 2024-05-09 06:15:57 -04:00
Hoan Luu Huu
17c379df47 update stats colector version (#744) 2024-05-06 20:06:04 -04:00
Hoan Luu Huu
e7bc0b0737 fix dead lock in say verb while waiting playback-stop and say verb is killed (#742) 2024-05-05 08:12:29 -04:00
Dave Horton
dfe623e78a Fix/google race condition gather (#743)
* lint

* logging

* wip
2024-05-03 12:53:26 -04:00
Dave Horton
56b8f0623b limit utterance_end_ms to (1000,5000) per discussion with Deepgram (#740) 2024-05-02 13:19:14 -04:00
Hoan Luu Huu
7bcbab5b74 feat tts stream fallback (#736)
* feat tts stream fallback

* wip

* wip

* wip

* wip

* wip

* wip

* fix review comment
2024-05-02 08:43:41 -04:00
Hoan Luu Huu
44e6a3513d support speech aws polly by role_arn (#729)
* support speech aws polly by role_arn

* support aws stt assume role

* wip

* update speech utils version
2024-05-02 07:59:21 -04:00
Dave Horton
fad16144b9 update undici and ws (#739) 2024-05-01 14:20:43 -04:00
Dave Horton
6523a861c0 fix asr error notify wrong vendor name (#728) (#738)
Co-authored-by: Hoan Luu Huu <110280845+xquanluu@users.noreply.github.com>
2024-05-01 13:48:23 -04:00
Dave Horton
cff67f5e4c dial race where caller hangs up while dial is starting (#737) 2024-05-01 13:38:59 -04:00
Dave Horton
c77bd84e0e we should restart asr timer after a partial transcript (#735) 2024-04-30 14:53:08 -04:00
Dave Horton
3cd7a619ad ignore transcriptions from previous turns of conversation (#734) 2024-04-30 08:21:27 -04:00
Dave Horton
59cf02bd04 wait for session:reconnect ack to send queued msgs (#723) (#732)
Co-authored-by: Hoan Luu Huu <110280845+xquanluu@users.noreply.github.com>
2024-04-25 11:22:15 -04:00
Dave Horton
a18d55e9ab minor fix for leaving coach mode in conferencing 2024-04-22 12:46:34 -04:00
Dave Horton
d474b9d604 Feat/advanced conferencing features (#730)
* update drachtio-fsmrf and fixes to setCoachMode

* wip

* wip

* wip

* wip

* wip

* update gh actions
2024-04-22 11:00:05 -04:00
Dave Horton
8d2b60c284 minor 2024-04-21 09:51:05 -04:00
Dave Horton
9cf9d4f587 Fix/0.8.5 cherries (#724)
* kill play task if bot responds verbs while actionHook delay is enabled (#712)

* kill play task if bot responds verbs while actionHook delay is enabled

* fix actionHook delay continues even the bot already responded verbs

* wip

* wip

* wip

* gather is hang if listenDuringPrompt = false and say/play task throw exception (#717)

* merge fix for Support ASR TTS fallback (#713)

---------

Co-authored-by: Hoan Luu Huu <110280845+xquanluu@users.noreply.github.com>
2024-04-17 11:01:21 -04:00
Dave Horton
bd002ede48 ignore google errors with error_code 0 2024-04-16 20:06:26 -04:00
Dave Horton
1a2aa91973 proper fix for precache (#721)
* proper fix for precache

* wip
2024-04-15 16:25:12 -04:00
Dave Horton
e322b7d8d3 be more cautious about pre-caching prompts; in particular, a Config verb will not give us time to precache so avoid in that scenario (#720) 2024-04-15 15:38:10 -04:00
Hoan Luu Huu
7da11df88e default DEEPGRAM_SPEECH_UTTERANCE_END_MS is 1000 (#719) 2024-04-14 19:39:07 -04:00
Hoan Luu Huu
09cf1345f6 tts span for whisper (#718)
* tts span for whisper

* support deepgram tts span

* support playht tts span

* support rimelabs tts span

* wip
2024-04-14 09:14:49 -04:00
Dave Horton
2595f527ff gather: fix bug where empty deepgram transcript saved incorrectly 2024-04-13 09:59:02 -04:00
Dave Horton
1d77c0cd20 bugfx: bargein after first when config bargein with sticky=true fails 2024-04-12 20:08:21 -04:00
Hoan Luu Huu
9eab81268b support mod_rimelabs_tts (#716)
* support mod_rimelabs_tts

* update speech utils
2024-04-12 07:28:45 -04:00
Dave Horton
ecf3d140d6 fix #714 (#715) 2024-04-10 16:23:22 -04:00
Hoan Luu Huu
4a52be9171 support mod_playht_tts (#711)
* support mod_playht_tts

* update speech utils version
2024-04-08 10:21:54 -04:00
Dave Horton
9b722ae36d update deps (#709)
* update deps

* version
2024-04-07 18:22:31 -04:00
Dave Horton
370b046fac update to speech utils with azure 1.36.0 2024-04-07 12:16:35 -04:00
Hoan Luu Huu
fca391c32e support listen verb support bidirectionalAudioSampleRate (#695)
* support listen verb support bidirectionalAudioSampleRate

* ưip

* update verb spec and drachtio fsmrf

* fix listen failing testcase

* fix review comment

* update freeswitch test image

* update freeswitch teset image

---------

Co-authored-by: Dave Horton <daveh@beachdognet.com>
2024-04-06 13:20:01 -04:00
Dave Horton
043860c4a3 update to speech utils supporting deepgram tts (#708) 2024-04-06 12:35:03 -04:00
Dave Horton
a021ee3112 update unidici (#707) 2024-04-05 17:23:21 -04:00
Dave Horton
8999c85a71 Fixes/ws testing dh (#704)
* fixes from testing with translator app

* more updates

* linting

* update gh actions to node 20

* add support for google v2 preconfigured recognizer

* add support for google voice activity events

* update to speech-utils@0.0.45

* update speech-utils to support caching azure tts

* transcribe must buffer transcripts for channel 1 and 2 separately

* further fix for accumulating transcripts

* linting

* deepgram sends transcripts with empty alternatives array

* fix deepgram returning an empty array
2024-04-03 14:30:49 -04:00
Hoan Luu Huu
72147a8110 support google v2 enableVoiceActivityEvents (#703)
* support google v2 enableVoiceActivityEvents

* support google v2 enableVoiceActivityEvents
2024-04-02 10:14:54 -04:00
Hoan Luu Huu
93d0e41e31 support google version 2 (#699)
* support google version 2

* update new parameters for google v2
2024-04-02 07:33:22 -04:00
Hoan Luu Huu
5b1d8a8ff3 Feat/ambient sounds (#678)
* initial support for coaching mode in conference

* wip

* wip

* add support for answer verb

* wip

* wip

* wip

* wip

* wip

* updates to rename option to dub

* wip

* wip

* wip

* update verb-specs

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* add option to boost audio signal in main channel

* wip

* wip

* wip

* wip

* wip

* wip

* for now, bypass use of streaming apis when generating tts audio for dub tracks

* add nested dub to dial

* wip

* add support for filler noise

* kill filler noise when gather killed

* wip

* wip

* while using sayOnTrack, we have to enclose the say command in double quotes

* disableTtsStreaming = false

* allow transcribe of b leg only on dial verb

* dub.say can either be text or object like say verb with text and synthesizer

* remove loop for sayOnTrack

* update speech-utils

* fixes for testing transcribe verb and support for dub and boostAudioSignal in lcc commands

* add dial.boostAudioSignal

* fix bug where session-level recognizer settings incorrectly overwrite verb-level settings

* update verb specs

* update dial to support array of dub verbs

* fix bug setting gain

* lint

* wip

* update speech-utils

* use new endpoint methods for mod_dub

---------

Co-authored-by: Dave Horton <daveh@beachdognet.com>
2024-03-23 16:23:57 -04:00
Dave Horton
ec58232b61 Fix/replace application issue (#692)
* fix scenario where ws replace application from gather while awaiting command and no tasks on execution stack

* lint

* remove some debug logging
2024-03-23 16:14:16 -04:00
Hoan Luu Huu
65c241bcd1 gather verb should clean dtmf listerner even dtmfBargein=false (#686) 2024-03-23 16:01:41 -04:00
Hoan Luu Huu
75b6f89e0c add log to get more detail for AMD issue (#687)
* add log to check issue

* update drachtio-fsmrf 3.0.38
2024-03-21 09:14:32 -04:00
Hoan Luu Huu
b80d39d205 fix asrtimer always return vendor=deepgram (#682) 2024-03-13 12:57:55 -04:00
Hoan Luu Huu
40f70e3531 update speech utils version 0.0.63 (#681) 2024-03-12 09:12:18 -04:00
Hoan Luu Huu
1914b88af9 support azure language id mode (#674) 2024-03-12 08:35:01 -04:00
Hoan Luu Huu
c946a5d14d fix actionHookDelay feature is not working properly if there is no de… (#679)
* fix actionHookDelayAction when no actions is defnied

* terminated by jambonz for giveuptimeout
2024-03-12 08:33:03 -04:00
Hoan Luu Huu
878578fe0f Fix/issue 676 (#680)
* fix bargin is not working

* fix bargin is not working
2024-03-11 08:46:38 -04:00
Hoan Luu Huu
9b3be6c0b9 allow custom header on pause, resume recording (#670)
* allow custom header on pause, resume recording

* fix review comments
2024-03-05 18:01:32 -05:00
Hoan Luu Huu
4ae661daea remove unnecessary code for cleanup disableBotMode (#673) 2024-03-04 18:03:32 -05:00
Dave Horton
dbd3b59901 fix #666 2024-02-26 09:39:49 -05:00
Hoan Luu Huu
06b066a3f2 update speech util to support whisper stream (#657)
* update speech util to support whisper stream

* minor editing of span attributes

* more span attrs cleanup

---------

Co-authored-by: Dave Horton <daveh@beachdognet.com>
2024-02-22 14:17:29 -05:00
Dave Horton
fc3655c9bd fixes for confirm session (#663)
* fixes for confirm session

* allow empty dialconfirm array
2024-02-22 12:33:35 -05:00
dependabot[bot]
1b5f801830 Bump undici from 5.26.2 to 5.28.3 (#647)
Bumps [undici](https://github.com/nodejs/undici) from 5.26.2 to 5.28.3.
- [Release notes](https://github.com/nodejs/undici/releases)
- [Commits](https://github.com/nodejs/undici/compare/v5.26.2...v5.28.3)

---
updated-dependencies:
- dependency-name: undici
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-22 10:13:19 -05:00
Dave Horton
d0ebe3f99f fix possible undefined reference in precache audio (#662)
* fix possible undefined reference in precache audio

* fix parsing of JAMBONES_EAGERLY_PRE_CACHE_AUDIO
2024-02-22 07:58:41 -05:00
Dave Horton
51a379998f fix #655 (#658)
* fix #655

* fix race condition
2024-02-22 07:46:53 -05:00
dependabot[bot]
c2ae42a456 Bump ip from 1.1.8 to 1.1.9 (#660)
Bumps [ip](https://github.com/indutny/node-ip) from 1.1.8 to 1.1.9.
- [Commits](https://github.com/indutny/node-ip/compare/v1.1.8...v1.1.9)

---
updated-dependencies:
- dependency-name: ip
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-02-22 07:31:01 -05:00
Hoan Luu Huu
c187685054 feat actionHook delay action (#470)
* feat actionHook delay action

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip
2024-02-20 21:09:19 -05:00
Hoan Luu Huu
81234a583c support update record from application ws connection (#645) 2024-02-19 07:53:39 -05:00
Hoan Luu Huu
206849fa25 create outbound dial from webhook ws (#581)
* wip, create outbound dial from webhook ws

* wip, create outbound dial from webhook ws

* clean
2024-02-13 07:58:39 -05:00
Dave Horton
662b6d3d95 fix: elevenlabs caching with streaming 2024-02-12 21:08:41 -05:00
Anton Voylenko
5c070597cf tag outdial session (#643) 2024-02-12 13:16:43 -05:00
Dave Horton
42be9ff1ca update to speech-utils with env to disable elevenlabs streaming (default is on) 2024-02-12 12:49:45 -05:00
Dave Horton
f0533c881b deepgram gather: if both endpointing and utterance_end_ms are set (bu… (#644)
* deepgram gather: if both endpointing and utterance_end_ms are set (but not continous asr) return either when we get speech_final or UtteranceEnd.  This is the belt-and-suspenders apprach deepgram is recommending

* include verb id in action hook if one was provided in the verb set

* minor
2024-02-12 12:32:43 -05:00
Hoan Luu Huu
c894369a13 fix pause resume background transcribe (#586)
* fix pause resume background transcribe

* fix review comments
2024-02-12 10:38:07 -05:00
Dave Horton
565478cc0a #573 address race condition in pause/resume recording (#584) 2024-02-12 10:26:34 -05:00
Hoan Luu Huu
cdd25ca33d Fix/gather timeout (#594)
* fix gather verb timeout does not work

* wip

* wip

* wip

* wip

* fix review comments
2024-02-12 10:13:02 -05:00
Markus Frindt
ef2306e558 Improve Deepgram default modely by language (#641)
Co-authored-by: Markus Frindt <m.frindt@cognigy.com>
2024-02-12 09:53:14 -05:00
Dave Horton
9c33a790bd update to latest speech-utils (#639) 2024-02-08 15:54:45 -05:00
Dave Horton
9f9a9ec598 initial changes for deepgram on-prem (#636)
* initial changes for deepgram on-prem

* typo

* fixes for selecting deepgram model

* update some property names

* wip

* wip

* wip
2024-02-07 14:21:05 -05:00
Dave Horton
75566bb268 bump to start 0.8.6 2024-02-07 08:51:05 -05:00
Hoan Luu Huu
a55f81676b Tts/elevenlabs streaming (#629)
* update to fsmrf with fix

* changes to support elevenlabs tts streaming

* say: add vendor data to span

* bug: tts spans must include cached property

* add env for JAMBONES_USE_FREESWITCH_TIMER_FD

* fix bug in prev commit

* wip

* linting

* wip - caching files generating by streaming tts

* wip caching

* cleanup some logs

* handle tts streaming failure, write alert

* update node version dependency

* set timerfd on outbound call scenarios

* default model to nova-2-phonecall when using deepgram

---------

Co-authored-by: Dave Horton <daveh@beachdognet.com>
2024-02-07 08:49:36 -05:00
Hoan Luu Huu
48a81072e8 fix gather should not play audio if gather already resolved (#638) 2024-02-06 07:42:44 -05:00
Hoan Luu Huu
74ede31cd3 fix ws reconnect does not send verb:hook data (#633) 2024-01-31 07:20:57 -08:00
Anton Voylenko
048229f019 fix(dequeue): retrieve by callsid (#630) 2024-01-31 07:06:08 -08:00
Hoan Luu Huu
71e266ae32 Merge pull request #632 from jambonz/fix/issue_631
fix default gather input is digits and gather dtmf should not require speech
2024-01-31 12:01:36 +07:00
Quan HL
5b607693dc fix default gather input is digits and gather dtmf should not require speech 2024-01-31 11:46:29 +07:00
Dave Horton
0491c5ce25 minor logging changes 2024-01-27 12:59:23 -05:00
Vinod Dharashive
a7fa2f95dd Change regex to have fqdn and IP (#625) 2024-01-25 09:13:30 -05:00
Dave Horton
901e412343 fix bug where final transcript with finished header results in timeout (#624) 2024-01-25 08:48:22 -05:00
Dave Horton
e57c7ba90a fix for #627 (#628) 2024-01-25 08:46:57 -05:00
Hoan Luu Huu
b867395d87 fix aldulting call does not send status callback when hhangup (#623) 2024-01-23 07:12:43 -05:00
Hoan Luu Huu
1a80910f91 fix pause transcribe cannot close transcription on 2nd leg (#621) 2024-01-18 11:21:25 -05:00
Hoan Luu Huu
5d4f25622d fixed call hangup as call is await for new task and received ws command (#619)
* fixed call hangup as call is await for new task and received ws command

* wi
2024-01-18 11:12:50 -05:00
Dave Horton
aabf37e269 update db-helpers 2024-01-17 13:23:21 -05:00
Hoan Luu Huu
b45275789b verbhook on ws connection should be ended in next redirect command (#616)
* verbhook on ws connection should be ended in next redirect command

* wip

* wip

* minor change for readability

---------

Co-authored-by: Dave Horton <daveh@beachdognet.com>
2024-01-17 12:37:03 -05:00
Dave Horton
6d5ef6a215 gather: dont resolve if deepgram sends final/empty transcript with no transcripts previously buffered (#618) 2024-01-17 10:59:37 -05:00
Hoan Luu Huu
b423a51638 feat: allow update azure endpoint ID from recognizer property (#612) 2024-01-17 07:34:02 -05:00
Hoan Luu Huu
b4ff2ea702 fix onholdHOok (#540)
* fix onholdHOok

* wip

* wip

* wip

* wip

* adding more debug log

* wip

* wip

* wip
2024-01-15 08:34:45 -05:00
Dave Horton
f22d66dfd6 set default deepgram model by language and task (gather vs transcribe) (#610)
* set default deepgram model by language and task (gather vs transcribe)

* wip
2024-01-14 10:38:14 -05:00
Dave Horton
09a83e3a31 Feature/precache audio (#609)
* wip

* fix for establishing vendor etc

* more fixes

* avoid a pre-caching attempt if synth settings change
2024-01-13 12:51:25 -05:00
dependabot[bot]
d3d494191f Bump follow-redirects from 1.15.2 to 1.15.4 (#603)
Bumps [follow-redirects](https://github.com/follow-redirects/follow-redirects) from 1.15.2 to 1.15.4.
- [Release notes](https://github.com/follow-redirects/follow-redirects/releases)
- [Commits](https://github.com/follow-redirects/follow-redirects/compare/v1.15.2...v1.15.4)

---
updated-dependencies:
- dependency-name: follow-redirects
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-10 09:24:36 -05:00
Paulo Telles
859e816a8e issue #605 (#606)
Co-authored-by: p.souza <p.souza@cognigy.com>
2024-01-10 09:23:09 -05:00
Hoan Luu Huu
29bbcf1be0 add user-agent to http and ws requestor (#602)
* add user-agent to http and ws requestor

* wip

* fix review comment
2024-01-10 08:54:46 -05:00
Dave Horton
6f6d7a06b0 Revert "Fix/dual legs transcribe race condition (#593)" (#600)
This reverts commit 9d70ed96a1.
2024-01-09 08:23:13 -05:00
Anton Voylenko
a2ba80a9a3 Snake case customer data for refer (#598)
* update envs

* fix refer customer data

* use data from function
2024-01-08 19:15:08 -05:00
Hoan Luu Huu
9d70ed96a1 Fix/dual legs transcribe race condition (#593)
* fs only stop one of bugname when transcribe is used for dual legs

* wip

* fix review comment

* wip

* wip
2024-01-06 19:12:51 -05:00
Hoan Luu Huu
8173a306f7 fix stt default vendor cannot be mapped to correct value (#588) 2024-01-04 07:34:30 -05:00
Hoan Luu Huu
2e69630544 fix siprec to remap sdp base on participant label (#587)
* fix siprec to remap sdp base on participant label

* fix
2024-01-03 11:10:31 -05:00
Hoan Luu Huu
15829139c1 fix hangup headers (#583)
* fix hangup headers

* no need for callback

* fix test failure

---------

Co-authored-by: Dave Horton <daveh@beachdognet.com>
2023-12-28 14:59:59 -05:00
Dave Horton
2c48083c26 fix to be more precise about removing custom event handlers so that w… (#580)
* fix to be more precise about removing custom event handlers so that when we stop a gather we dont also inadvertently stop a background transcribe as well

* test fixes

* fix: endpointing=false was being ignored for Deepgram
2023-12-28 11:00:27 -05:00
Hoan Luu Huu
9d8291f892 Transcribe background task (#576)
* first draft

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* update verb-specification

* fix comment reviews

* provide bugname when stopping transcription, otherwise it will continue

---------

Co-authored-by: Dave Horton <daveh@beachdognet.com>
2023-12-26 21:50:51 -05:00
Hoan Luu Huu
3e8474867f support deepgram (#579)
* support deepgram

* update speech utils
2023-12-26 07:46:35 -05:00
Hoan Luu Huu
9eb315ecd6 fix config and stt task for altLanguages (#575)
* fix config and stt task for altLanguages

* clear freeswitch channel var when altLanguages is empty list
2023-12-25 22:21:34 -05:00
Hoan Luu Huu
2ec1460b4e nuance transcribe should have utteranceDetectionMode=multiple (#574)
* nuance transcribe should have utteranceDetectionMode=multiple

* nuance transcribe should have utteranceDetectionMode=multiple
2023-12-20 18:44:08 -05:00
Hoan Luu Huu
e30782ea7b fix riva transcribe issue (#570) 2023-12-20 10:25:17 -05:00
Hoan Luu Huu
83c1c07eb0 fix enqueue waithook on ws hold the session (#572)
* fix enqueue waithook on ws hold the session

* wip
2023-12-19 09:42:20 -05:00
Dave Horton
47fbc1a4a4 allow custom speech with no auth token (#571) 2023-12-18 14:51:34 -05:00
Dave Horton
7474a359a4 update to drachtio-fsmrf@3.0.33 2023-12-18 14:28:25 -05:00
Hoan Luu Huu
30977b309c punctuation for microsoft (#566)
* punctuation for microsoft

* wip
2023-12-18 08:38:05 -05:00
Hoan Luu Huu
bcb4bf43bf fix altLanguages (#567)
* fix altLanguages

* adding testcase
2023-12-16 08:35:09 -05:00
Dave Horton
077460d0e2 Feat/multiset envs (#569)
* update to drachtio-fsmrf@3.0.31

* fix prev commits
2023-12-15 15:39:15 -05:00
50 changed files with 8568 additions and 9344 deletions

View File

@@ -6,10 +6,10 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 18
node-version: 20
- run: npm ci
- run: npm run jslint
- run: docker pull drachtio/sipp

1
.gitignore vendored
View File

@@ -42,3 +42,4 @@ ecosystem.config.js
test/credentials/*.json
run-tests.sh
run-coverage.sh
.vscode

17
.vscode/launch.json vendored
View File

@@ -1,17 +0,0 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"type": "node",
"request": "launch",
"name": "Launch Program",
"program": "${workspaceFolder}/test/index.js",
"env": {
"NODE_ENV": "test"
}
}
]
}

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2021 Drachtio Communications Services, LLC
Copyright (c) 2018-2024 FirstFive8, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

2
app.js
View File

@@ -109,7 +109,7 @@ const disconnect = () => {
httpServer?.on('close', resolve);
httpServer?.close();
srf.disconnect();
srf.locals.mediaservers.forEach((ms) => ms.disconnect());
srf.locals.mediaservers?.forEach((ms) => ms.disconnect());
});
};

View File

@@ -9,7 +9,112 @@
"can't take your call",
"will get back to you",
"I'll get back to you",
"we are unable"
"we are unable",
"Unable to take your call now",
"I'll reply soon",
"I'll call back",
"I'll reach out to you as soon as possible",
"Leave a message",
"Away from phone",
"Not available now",
"I'll return call",
"On another call",
"Currently on another call",
"I will return call later",
"Busy please leave message",
"Message will be returned promptly",
"Currently unavailable to answer",
"Planning to return your call soon",
"Apologies for missing your call",
"Not by the phone at the moment",
"Expecting to return your call",
"Currently not accessible",
"Intend to call back",
"Appreciate your patience!",
"Engaged in another conversation",
"I Will respond promptly",
"Kindly leave a message",
"Currently occupied leave a message",
"Unfortunately unable to answer right now",
"Occupied at the moment",
"Not present leave a message",
"Regrettably unavailable kindly leave a message",
"Will ensure a prompt response to your message",
"Currently engaged",
"Will return your call at the earliest opportunity",
"Your message will receive my prompt attention",
"I'll respond as soon as I can",
"Your message is important please leave it after the beep",
"Away from the phone at the moment",
"Unable to answer right now",
"Engaged in another task",
"Not by the phone presently",
"I'll respond at my earliest convenience",
"Away from the phone momentarily",
"I'll return your call shortly",
"Currently not able to answer",
"Your message is important please leave it after the tone",
"I'm unable to take your call right now",
"Please leave your message for me",
"I'll get back to you soon",
"Your call has been missed",
"Please leave a detailed message for me to respond to",
"Leave a message I'll make sure to respond",
"Feel free to leave a message",
"Your call is important to me",
"I'll get back to you shortly",
"Your message will be attended to promptly",
"Not available at the moment",
"I'll be sure to get back to you",
"I'll call you back soon",
"I'll ensure a prompt response",
"Sorry for the inconvenience",
"I'll return your call",
"I'll make sure to get back to you",
"I'll call you back shortly",
"I'll return your call as soon as possible",
"Apologies for the inconvenience leave your message",
"Your call is appreciated",
"I'm unavailable to answer",
"I'm currently away",
"I'll return your call as soon as I can",
"I'm away from the phone",
"I'm currently unavailable to take your call",
"Sorry for missing your call",
"I'll ensure it receives my immediate attention",
"I'm away from the phone momentarily",
"I'll reach out to you shortly",
"Apologies for the inconvenience",
"Currently occupied",
"Unable to answer your call at the moment",
"I'll make sure to follow up with you",
"Sorry for not being available",
"I'll reach out to you as soon as I can",
"I'm currently engaged",
"I'm currently busy",
"I'm currently unavailable",
"I'll respond to you at my earliest convenience",
"Your message is appreciated",
"I'll get back to you promptly",
"I'll get back to you without delay",
"Currently away from the phone",
"I'll return your call at my earliest opportunity",
"Sorry for the missed call",
"I'll make sure to address your concerns",
"Please provide your details for a callback",
"I'll make every effort to respond promptly",
"I'll ensure it's attended to promptly",
"Away from the phone temporarily",
"I'll get back to you as soon as I return",
"Currently not in a position to answer your call",
"Your call cannot be answered at the moment",
"I'll ensure to respond as soon as I'm able",
"Your call is important please leave a message",
"Unable to answer right now please leave your message",
"Currently not accessible intending to return your call",
"I'll respond promptly to your message",
"leave a memo",
"please leave a memo"
],
"es-ES": [
"le pasamos la llamada",

View File

@@ -120,6 +120,7 @@ const HTTP_TIMEOUT = 10000;
const HTTP_PROXY_IP = process.env.JAMBONES_HTTP_PROXY_IP;
const HTTP_PROXY_PORT = process.env.JAMBONES_HTTP_PROXY_PORT;
const HTTP_PROXY_PROTOCOL = process.env.JAMBONES_HTTP_PROXY_PROTOCOL || 'http';
const HTTP_USER_AGENT_HEADER = process.env.JAMBONES_HTTP_USER_AGENT_HEADER || 'jambonz';
const OPTIONS_PING_INTERVAL = parseInt(process.env.OPTIONS_PING_INTERVAL, 10) || 30000;
@@ -129,6 +130,10 @@ const JAMBONZ_RECORD_WS_PASSWORD = process.env.JAMBONZ_RECORD_WS_PASSWORD || pro
const JAMBONZ_DISABLE_DIAL_PAI_HEADER = process.env.JAMBONZ_DISABLE_DIAL_PAI_HEADER || false;
const JAMBONES_DISABLE_DIRECT_P2P_CALL = process.env.JAMBONES_DISABLE_DIRECT_P2P_CALL || false;
const JAMBONES_EAGERLY_PRE_CACHE_AUDIO = parseInt(process.env.JAMBONES_EAGERLY_PRE_CACHE_AUDIO, 10) || 0;
const JAMBONES_USE_FREESWITCH_TIMER_FD = process.env.JAMBONES_USE_FREESWITCH_TIMER_FD;
module.exports = {
JAMBONES_MYSQL_HOST,
JAMBONES_MYSQL_USER,
@@ -151,6 +156,7 @@ module.exports = {
JAMBONES_API_BASE_URL,
JAMBONES_TIME_SERIES_HOST,
JAMBONES_INJECT_CONTENT,
JAMBONES_EAGERLY_PRE_CACHE_AUDIO,
JAMBONES_ESL_LISTEN_ADDRESS,
JAMBONES_SBCS,
JAMBONES_OTEL_ENABLED,
@@ -193,6 +199,7 @@ module.exports = {
HTTP_PROXY_IP,
HTTP_PROXY_PORT,
HTTP_PROXY_PROTOCOL,
HTTP_USER_AGENT_HEADER,
OPTIONS_PING_INTERVAL,
RESPONSE_TIMEOUT_MS,
JAMBONES_WS_HANDSHAKE_TIMEOUT_MS,
@@ -208,5 +215,6 @@ module.exports = {
JAMBONZ_RECORD_WS_USERNAME,
JAMBONZ_RECORD_WS_PASSWORD,
JAMBONZ_DISABLE_DIAL_PAI_HEADER,
JAMBONES_DISABLE_DIRECT_P2P_CALL
JAMBONES_DISABLE_DIRECT_P2P_CALL,
JAMBONES_USE_FREESWITCH_TIMER_FD
};

View File

@@ -30,6 +30,20 @@ const appsMap = {
}
]
}]
},
conference: {
// Dummy hook to follow later feature server logic.
call_hook: {
url: 'https://jambonz.org',
method: 'GET'
},
account_sid: '',
app_json: [{
verb: 'conference',
name: '',
beep: false,
startConferenceOnEnter: true
}]
}
};
@@ -38,6 +52,7 @@ const createJambonzApp = (type, {account_sid, name, caller_id}) => {
app.account_sid = account_sid;
switch (type) {
case 'queue':
case 'conference':
app.app_json[0].name = name;
break;
case 'user':

View File

@@ -75,13 +75,19 @@ module.exports = function(srf, logger) {
req.locals.application_sid = application_sid;
}
// check for call to queue
if (uri.user?.startsWith('queue-') && req.locals.originatingUser && clientDb?.allow_direct_queue_calling) {
else if (uri.user?.startsWith('queue-') && req.locals.originatingUser && clientDb?.allow_direct_queue_calling) {
const queue_name = uri.user.match(/queue-(.*)/)[1];
logger.debug(`got Queue from Request URI header: ${queue_name}`);
req.locals.queue_name = queue_name;
}
// check for call to conference
else if (uri.user?.startsWith('conference-') && req.locals.originatingUser && clientDb?.allow_direct_app_calling) {
const conference_id = uri.user.match(/conference-(.*)/)[1];
logger.debug(`got Conference from Request URI header: ${conference_id}`);
req.locals.conference_id = conference_id;
}
// check for call to registered user
if (!JAMBONES_DISABLE_DIRECT_P2P_CALL && req.locals.originatingUser && clientDb?.allow_direct_user_calling) {
else if (!JAMBONES_DISABLE_DIRECT_P2P_CALL && req.locals.originatingUser && clientDb?.allow_direct_user_calling) {
const arr = /^(.*)@(.*)/.exec(req.locals.originatingUser);
if (arr) {
const sipRealm = arr[2];
@@ -97,7 +103,7 @@ module.exports = function(srf, logger) {
if (req.has('X-MS-Teams-Tenant-FQDN')) req.locals.msTeamsTenant = req.get('X-MS-Teams-Tenant-FQDN');
if (req.has('X-Cisco-Recording-Participant')) {
const ciscoParticipants = req.get('X-Cisco-Recording-Participant');
const regex = /sip:[\d]+@[\d]+\.[\d]+\.[\d]+\.[\d]+/g;
const regex = /sip:[a-zA-Z0-9]+@[a-zA-Z0-9.-_]+/g;
const sipURIs = ciscoParticipants.match(regex);
logger.info(`X-Cisco-Recording-Participant : ${sipURIs} `);
if (sipURIs && sipURIs.length > 0) {
@@ -237,6 +243,9 @@ module.exports = function(srf, logger) {
logger.debug(`calling to registered user ${req.locals.called_user}, generating dial app`);
app = createJambonzApp('user',
{account_sid, name: req.locals.called_user, caller_id: req.locals.callingNumber});
} else if (req.locals.conference_id) {
logger.debug(`calling to conference ${req.locals.conference_id}, generating conference app`);
app = createJambonzApp('conference', {account_sid, name: req.locals.conference_id});
} else if (req.locals.application_sid) {
app = await lookupAppBySid(req.locals.application_sid);
} else if (req.locals.originatingUser) {
@@ -343,6 +352,15 @@ module.exports = function(srf, logger) {
direction: CallDirection.Inbound,
traceId: rootSpan.traceId
});
// if transferred call contains callInfo, let update original data to newly created callInfo in this instance.
if (app.transferredCall && app.callInfo) {
req.locals.callInfo.callerName = app.callInfo.callerName;
req.locals.callInfo.from = app.callInfo.from;
req.locals.callInfo.to = app.callInfo.to;
req.locals.callInfo.originatingSipIp = app.callInfo.originatingSipIp;
req.locals.callInfo.originatingSipTrunkName = app.callInfo.originatingSipTrunkName;
delete app.callInfo;
}
next();
} catch (err) {
span.end();
@@ -382,7 +400,7 @@ module.exports = function(srf, logger) {
},
recognizer: {
vendor: app.speech_recognizer_vendor,
...(app.speech_synthesis_label && {label: app.speech_synthesis_label}),
...(app.speech_recognizer_label && {label: app.speech_recognizer_label}),
language: app.speech_recognizer_language,
...(app.fallback_speech_recognizer_vendor && {fallback_vendor: app.fallback_speech_recognizer_vendor}),
...(app.fallback_speech_recognizer_label && {fallback_label: app.fallback_speech_recognizer_label}),

View File

@@ -53,16 +53,24 @@ class AdultingCallSession extends CallSession {
}
_callerHungup() {
this._hangup('caller');
}
_jambonzHangup() {
this._hangup();
}
_hangup(terminatedBy = 'jambonz') {
if (this.dlg.connectTime) {
const duration = moment().diff(this.dlg.connectTime, 'seconds');
this.rootSpan.setAttributes({'call.termination': 'hangup by caller'});
this.callInfo.callTerminationBy = 'caller';
this.rootSpan.setAttributes({'call.termination': `hangup by ${terminatedBy}`});
this.callInfo.callTerminationBy = terminatedBy;
this.emit('callStatusChange', {
callStatus: CallStatus.Completed,
duration
});
}
this.logger.info('InboundCallSession: caller hung up');
this.logger.info(`InboundCallSession: ${terminatedBy} hung up`);
this._callReleased();
this.req.removeAllListeners('cancel');
}

File diff suppressed because it is too large Load Diff

View File

@@ -34,6 +34,9 @@ class ConfirmCallSession extends CallSession {
_callerHungup() {
}
_jambonzHangup() {
}
}

View File

@@ -35,11 +35,21 @@ class InboundCallSession extends CallSession {
_onCancel() {
this.rootSpan.setAttributes({'call.termination': 'caller abandoned'});
this.callInfo.callTerminationBy = 'caller';
const wasEarlyMedia = this.callInfo.callStatus === 'early-media';
this._notifyCallStatusChange({
callStatus: CallStatus.NoAnswer,
sipStatus: 487,
sipReason: 'Request Terminated'
});
if (wasEarlyMedia) {
const duration = 0; // Set duration to 0 for early media termination, required param
this._notifyCallStatusChange({
callStatus: CallStatus.Completed,
sipStatus: 487,
sipReason: 'Call Terminated During Early Media',
duration: duration
});
}
this._callReleased();
}
@@ -67,15 +77,27 @@ class InboundCallSession extends CallSession {
* This is invoked when the caller hangs up, in order to calculate the call duration.
*/
_callerHungup() {
this._hangup('caller');
}
_jambonzHangup() {
this.dlg?.destroy();
}
_hangup(terminatedBy = 'jambonz') {
if (this.dlg === null) {
this.logger.info('InboundCallSession:_hangup - race condition, dlg cleared by app hangup');
return;
}
this.logger.info(`InboundCallSession: ${terminatedBy} hung up`);
assert(this.dlg.connectTime);
const duration = moment().diff(this.dlg.connectTime, 'seconds');
this.rootSpan.setAttributes({'call.termination': 'hangup by caller'});
this.callInfo.callTerminationBy = 'caller';
this.rootSpan.setAttributes({'call.termination': `hangup by ${terminatedBy}`});
this.callInfo.callTerminationBy = terminatedBy;
this.emit('callStatusChange', {
callStatus: CallStatus.Completed,
duration
});
this.logger.info('InboundCallSession: caller hung up');
this._callReleased();
this.req.removeAllListeners('cancel');
}

View File

@@ -1,6 +1,9 @@
const CallSession = require('./call-session');
const {CallStatus} = require('../utils/constants');
const moment = require('moment');
const {parseUri} = require('drachtio-srf');
const { normalizeJambones } = require('@jambonz/verb-specifications');
const makeTask = require('../tasks/make_task');
/**
* @classdesc Subclass of CallSession. This represents a CallSession that is
@@ -42,20 +45,81 @@ class RestCallSession extends CallSession {
setDialog(dlg) {
this.dlg = dlg;
dlg.on('destroy', this._callerHungup.bind(this));
dlg.on('refer', this._onRefer.bind(this));
this.wrapDialog(dlg);
}
/**
* global referHook
*/
set referHook(hook) {
this._referHook = hook;
}
/**
* This is invoked when the called party sends REFER to Jambonz.
*/
async _onRefer(req, res) {
if (this._referHook) {
try {
const to = parseUri(req.getParsedHeader('Refer-To').uri);
const by = parseUri(req.getParsedHeader('Referred-By').uri);
const b3 = this.b3;
const httpHeaders = b3 && {b3};
const json = await this.requestor.request('verb:hook', this._referHook, {
...(this.callInfo.toJSON()),
refer_details: {
sip_refer_to: req.get('Refer-To'),
sip_referred_by: req.get('Referred-By'),
sip_user_agent: req.get('User-Agent'),
refer_to_user: to.scheme === 'tel' ? to.number : to.user,
referred_by_user: by.scheme === 'tel' ? by.number : by.user,
referring_call_sid: this.callSid,
referred_call_sid: null,
}
}, httpHeaders);
if (json && Array.isArray(json)) {
const tasks = normalizeJambones(this.logger, json).map((tdata) => makeTask(this.logger, tdata));
if (tasks && tasks.length > 0) {
this.logger.info('RestCallSession:handleRefer received REFER, get new tasks');
this.replaceApplication(tasks);
if (this.wakeupResolver) {
this.wakeupResolver({reason: 'RestCallSession: referHook new taks'});
this.wakeupResolver = null;
}
}
}
res.send(202);
this.logger.info('RestCallSession:handleRefer - sent 202 Accepted');
} catch (err) {
this.logger.error({err}, 'RestCallSession:handleRefer - error while asking referHook');
res.send(err.statusCode || 501);
}
} else {
res.send(501);
}
}
/**
* This is invoked when the called party hangs up, in order to calculate the call duration.
*/
_callerHungup() {
this._hangup('caller');
}
_jambonzHangup() {
this._hangup();
}
_hangup(terminatedBy = 'jambonz') {
if (this.restDialTask) {
this.restDialTask.turnOffAmd();
}
this.callInfo.callTerminationBy = 'caller';
this.callInfo.callTerminationBy = terminatedBy;
const duration = moment().diff(this.dlg.connectTime, 'seconds');
this.emit('callStatusChange', {callStatus: CallStatus.Completed, duration});
this.logger.debug('RestCallSession: called party hung up');
this.logger.debug(`RestCallSession: called party hung up by ${terminatedBy}`);
this._callReleased();
}

22
lib/tasks/answer.js Normal file
View File

@@ -0,0 +1,22 @@
const Task = require('./task');
const {TaskName, TaskPreconditions} = require('../utils/constants');
/**
* Answer the call.
* Note: This is rarely used, as the call is typically answered automatically when required by the app,
* but it can be useful to force an answer before a pause in some cases
*/
class TaskAnswer extends Task {
constructor(logger, opts) {
super(logger, opts);
this.preconditions = TaskPreconditions.Endpoint;
}
get name() { return TaskName.Answer; }
async exec(cs) {
super.exec(cs);
}
}
module.exports = TaskAnswer;

View File

@@ -6,6 +6,7 @@ const { normalizeJambones } = require('@jambonz/verb-specifications');
const makeTask = require('./make_task');
const bent = require('bent');
const assert = require('assert');
const HttpRequestor = require('../utils/http-requestor');
const WAIT = 'wait';
const JOIN = 'join';
const START = 'start';
@@ -60,6 +61,8 @@ class Conference extends Task {
this.emitter = new Emitter();
this.results = {};
this.coaching = [];
this.speakOnlyTo = this.data.speakOnlyTo;
// transferred from another server in order to bridge to a local caller?
if (this.data._ && this.data._.connectTime) {
@@ -348,16 +351,29 @@ class Conference extends Task {
Object.assign(opts, {flags: {
...(this.endConferenceOnExit && {endconf: true}),
...(this.startConferenceOnEnter && {moderator: true}),
...(this.joinMuted && {joinMuted: true}),
//https://developer.signalwire.com/freeswitch/FreeSWITCH-Explained/Modules/mod_conference_3965534/
// mute | Enter conference muted
...((this.joinMuted || this.speakOnlyTo) && {mute: true}),
}});
/**
* Note on the above: if we are joining in "coaching" mode (ie only going to heard by a subset of participants)
* then we join muted temporarily, and then unmute ourselves once we have identified the subset of participants
* to whom we will be speaking.
*/
}
try {
const {memberId, confUuid} = await this.ep.join(this.confName, opts);
this.logger.debug({memberId, confUuid}, `Conference:_joinConference: successfully joined ${this.confName}`);
this.memberId = memberId;
this.memberId = parseInt(memberId, 10);
this.confUuid = confUuid;
// set a tag for this member, if provided
if (this.data.memberTag) {
this.setMemberTag(this.data.memberTag);
}
cs.setConferenceDetails(memberId, this.confName, confUuid);
const response = await this.ep.api('conference', [this.confName, 'get', 'count']);
if (response.body && /\d+/.test(response.body)) this.participantCount = parseInt(response.body);
@@ -384,6 +400,9 @@ class Conference extends Task {
.catch((err) => {});
}
if (this.speakOnlyTo) {
this.setCoachMode(this.speakOnlyTo);
}
} catch (err) {
this.logger.error(err, `Failed to join conference ${this.confName}`);
throw err;
@@ -428,7 +447,15 @@ class Conference extends Task {
}
}
async doConferenceHold(cs, opts) {
doConferenceMute(cs, opts) {
assert (cs.isInConference);
const mute = opts.conf_mute_status === 'mute';
this.ep.api(`conference ${this.confName} ${mute ? 'mute' : 'unmute'} ${this.memberId}`)
.catch((err) => this.logger.info({err}, 'Error muting or unmuting participant'));
}
doConferenceHold(cs, opts) {
assert (cs.isInConference);
const {conf_hold_status, wait_hook} = opts;
@@ -465,6 +492,43 @@ class Conference extends Task {
}
}
async doConferenceParticipantAction(cs, opts) {
const {action, tag} = opts;
switch (action) {
case 'tag':
await this.setMemberTag(tag);
break;
case 'untag':
await this.clearMemberTag();
break;
case 'coach':
await this.setCoachMode(tag);
break;
case 'uncoach':
await this.clearCoachMode();
break;
case 'hold':
this.doConferenceHold(cs, {conf_hold_status: 'hold'});
break;
case 'unhold':
this.doConferenceHold(cs, {conf_hold_status: 'unhold'});
break;
case 'mute':
this.doConferenceMute(cs, {conf_mute_status: 'mute'});
break;
case 'unmute':
this.doConferenceMute(cs, {conf_mute_status: 'unmute'});
break;
case 'kick':
this.kickMember(cs);
break;
default:
this.logger.info(`Conference:doConferenceParticipantAction - unhandled action ${action}`);
break;
}
}
async _doWaitHookWhileOnHold(cs, dlg, wait_hook) {
do {
try {
@@ -511,7 +575,7 @@ class Conference extends Task {
_normalizeHook(cs, hook) {
if (typeof hook === 'object') return hook;
const url = hook.startsWith('/') ?
`${cs.application.requestor.baseUrl}${hook}` :
`${cs.application.requestor instanceof HttpRequestor ? cs.application.requestor.baseUrl : ''}${hook}` :
hook;
return { url } ;
@@ -530,7 +594,7 @@ class Conference extends Task {
const response = await this.ep.api('conference', [this.confName, 'get', 'count']);
if (response.body && confNoMatch(response.body)) this.participantCount = 0;
else if (response.body && /^\d+$/.test(response.body)) this.participantCount = parseInt(response.body) - 1;
this.logger.debug({response}, `Conference:_doFinalMemberCheck conference count ${this.participantCount}`);
this.logger.debug(`Conference:_doFinalMemberCheck conference count ${this.participantCount}`);
} catch (err) {
this.logger.info({err}, 'Conference:_doFinalMemberCheck error retrieving count (we were probably kicked');
}
@@ -642,11 +706,19 @@ class Conference extends Task {
}
// conference event handlers
_onAddMember(logger, cs, evt) {
const memberId = parseInt(evt.getHeader('Member-ID')) ;
if (this.speakOnlyTo) {
logger.debug(`Conference:_onAddMember - member ${memberId} added to ${this.confName}, updating coaching mode`);
this.setCoachMode(this.speakOnlyTo).catch(() => {});
}
else logger.debug(`Conference:_onAddMember - member ${memberId} added to conference ${this.confName}`);
}
_onDelMember(logger, cs, evt) {
const memberId = parseInt(evt.getHeader('Member-ID')) ;
this.participantCount = parseInt(evt.getHeader('Conference-Size'));
if (memberId === this.memberId) {
this.logger.info(`Conference:_onDelMember - I was dropped from conference ${this.confName}, task is complete`);
logger.info(`Conference:_onDelMember - I was dropped from conference ${this.confName}, task is complete`);
this.replaceEndpointAndEnd(cs);
}
}
@@ -675,6 +747,99 @@ class Conference extends Task {
}
}
_onTag(logger, cs, evt) {
const memberId = parseInt(evt.getHeader('Member-ID')) ;
const tag = evt.getHeader('Tag') || '';
if (memberId !== this.memberId && this.speakOnlyTo) {
logger.info(`Conference:_onTag - member ${memberId} set tag to '${tag }'; updating coach mode accordingly`);
this.setCoachMode(this.speakOnlyTo).catch(() => {});
}
}
/**
* Set the conference to "coaching" mode, where the audio of the participant is only heard
* by a subset of the participants in the conference.
* We do this by first getting all of the members who do *not* have this tag, and then
* we configure this members audio to not be sent to them.
* @param {string} speakOnlyTo - tag of the members who should receive our audio
*
* N.B.: this feature requires jambonz patches to freeswitch mod_conference
*/
async setCoachMode(speakOnlyTo) {
this.speakOnlyTo = speakOnlyTo;
if (!this.memberId) {
this.logger.info('Conference:_setCoachMode: no member id yet');
return;
}
try {
const members = (await this.ep.getNonMatchingConfParticipants(this.confName, speakOnlyTo))
.filter((m) => m !== this.memberId);
if (members.length === 0) {
this.logger.info({members}, 'Conference:_setCoachMode: all participants have the tag, so all will hear me');
if (this.coaching.length) {
await this.ep.api('conference', [this.confName, 'relate', this.memberId, this.coaching.join(','), 'clear']);
this.coaching = [];
}
}
else {
const memberList = members.join(',');
this.logger.info(`Conference:_setCoachMode: my audio will NOT be sent to ${memberList}`);
await this.ep.api('conference', [this.confName, 'relate', this.memberId, memberList, 'nospeak']);
this.coaching = members;
}
} catch (err) {
this.logger.error({err, speakOnlyTo}, '_setCoachMode: Error');
}
}
async clearCoachMode() {
if (!this.memberId) return;
try {
if (this.coaching.length === 0) {
this.logger.info('Conference:_clearCoachMode: no coaching mode to clear');
}
else {
const memberList = this.coaching.join(',');
this.logger.info(`Conference:_clearCoachMode: now sending my audio to all, including ${memberList}`);
await this.ep.api('conference', [this.confName, 'relate', this.memberId, memberList, 'clear']);
}
this.speakOnlyTo = null;
this.coaching = [];
} catch (err) {
this.logger.error({err}, '_clearCoachMode: Error');
}
}
async setMemberTag(tag) {
try {
await this.ep.api('conference', [this.confName, 'tag', this.memberId, tag]);
this.logger.info(`Conference:setMemberTag: set tag for ${this.memberId} to ${tag}`);
this.memberTag = tag;
} catch (err) {
this.logger.error({err}, `Error setting tag for ${this.memberId} to ${tag}`);
}
}
async clearMemberTag() {
try {
await this.ep.api('conference', [this.confName, 'tag', this.memberId]);
this.logger.info(`Conference:setMemberTag: clearing tag for ${this.memberId}`);
this.memberTag = null;
} catch (err) {
this.logger.error({err}, `Error clearing tag for ${this.memberId}`);
}
}
async kickMember(cs) {
assert(cs.isInConference);
try {
await this.ep.api('conference', [this.confName, 'kick', this.memberId]);
this.logger.info(`Conference:kickMember: kick ${this.memberId} out of conference ${this.confName}`);
} catch (err) {
this.logger.error({err}, `Error kicking member out of conference for ${this.memberId}`);
}
}
}
module.exports = Conference;

View File

@@ -1,15 +1,22 @@
const Task = require('./task');
const {TaskName, TaskPreconditions} = require('../utils/constants');
const parseDecibels = require('../utils/parse-decibels');
class TaskConfig extends Task {
constructor(logger, opts) {
super(logger, opts);
[
'synthesizer',
'recognizer',
'bargeIn',
'record',
'listen'
'listen',
'transcribe',
'fillerNoise',
'actionHookDelayAction',
'boostAudioSignal',
'vad'
].forEach((k) => this[k] = this.data[k] || {});
if ('notifyEvents' in this.data) {
@@ -30,6 +37,13 @@ class TaskConfig extends Task {
if (this.bargeIn[k]) this.gatherOpts[k] = this.bargeIn[k];
});
}
if (this.transcribe?.enable) {
this.transcribeOpts = {
verb: 'transcribe',
...this.transcribe
};
delete this.transcribeOpts.enable;
}
if (this.data.reset) {
if (typeof this.data.reset === 'string') this.data.reset = [this.data.reset];
@@ -37,7 +51,12 @@ class TaskConfig extends Task {
else this.data.reset = [];
if (this.bargeIn.sticky) this.autoEnable = true;
this.preconditions = (this.bargeIn.enable || this.record?.action || this.listen?.url || this.data.amd) ?
this.preconditions = (this.bargeIn.enable ||
this.record?.action ||
this.listen?.url ||
this.data.amd ||
'boostAudioSignal' in this.data ||
this.transcribe?.enable) ?
TaskPreconditions.Endpoint :
TaskPreconditions.None;
@@ -50,6 +69,10 @@ class TaskConfig extends Task {
get hasRecognizer() { return Object.keys(this.recognizer).length; }
get hasRecording() { return Object.keys(this.record).length; }
get hasListen() { return Object.keys(this.listen).length; }
get hasTranscribe() { return Object.keys(this.transcribe).length; }
get hasDub() { return Object.keys(this.dub).length; }
get hasVad() { return Object.keys(this.vad).length; }
get hasFillerNoise() { return Object.keys(this.fillerNoise).length; }
get summary() {
const phrase = [];
@@ -72,9 +95,14 @@ class TaskConfig extends Task {
if (this.hasListen) {
phrase.push(this.listen.enable ? `listen ${this.listen.url}` : 'stop listen');
}
if (this.hasTranscribe) {
phrase.push(this.transcribe.enable ? `transcribe ${this.transcribe.transcriptionHook}` : 'stop transcribe');
}
if (this.hasFillerNoise) phrase.push(`fillerNoise ${this.fillerNoise.enable ? 'on' : 'off'}`);
if (this.data.amd) phrase.push('enable amd');
if (this.notifyEvents) phrase.push(`event notification ${this.notifyEvents ? 'on' : 'off'}`);
if (this.onHoldMusic) phrase.push(`onHoldMusic: ${this.onHoldMusic}`);
if ('boostAudioSignal' in this.data) phrase.push(`setGain ${this.data.boostAudioSignal}`);
return `${this.name}{${phrase.join(',')}}`;
}
@@ -113,9 +141,8 @@ class TaskConfig extends Task {
cs.speechSynthesisVendor = this.synthesizer.vendor !== 'default'
? this.synthesizer.vendor
: cs.speechSynthesisVendor;
cs.speechSynthesisLabel = this.synthesizer.label !== 'default'
? this.synthesizer.label
: cs.speechSynthesisLabel;
cs.speechSynthesisLabel = this.synthesizer.label === 'default'
? cs.speechSynthesisLabel : this.synthesizer.label;
cs.speechSynthesisLanguage = this.synthesizer.language !== 'default'
? this.synthesizer.language
: cs.speechSynthesisLanguage;
@@ -127,15 +154,16 @@ class TaskConfig extends Task {
cs.fallbackSpeechSynthesisVendor = this.synthesizer.fallbackVendor !== 'default'
? this.synthesizer.fallbackVendor
: cs.fallbackSpeechSynthesisVendor;
cs.fallbackSpeechSynthesisLabel = this.synthesizer.fallbackLabel !== 'default'
? this.synthesizer.fallbackLabel
: cs.fallbackSpeechSynthesisLabel;
cs.fallbackSpeechSynthesisLabel = this.synthesizer.fallbackLabel === 'default'
? cs.fallbackSpeechSynthesisLabel : this.synthesizer.fallbackLabel;
cs.fallbackSpeechSynthesisLanguage = this.synthesizer.fallbackLanguage !== 'default'
? this.synthesizer.fallbackLanguage
: cs.fallbackSpeechSynthesisLanguage;
cs.fallbackSpeechSynthesisVoice = this.synthesizer.fallbackVoice !== 'default'
? this.synthesizer.fallbackVoice
: cs.fallbackSpeechSynthesisVoice;
// new vendor is set, reset fallback vendor
cs.hasFallbackTts = false;
this.logger.info({synthesizer: this.synthesizer}, 'Config: updated synthesizer');
}
if (this.hasRecognizer) {
@@ -143,9 +171,8 @@ class TaskConfig extends Task {
cs.speechRecognizerVendor = this.recognizer.vendor !== 'default'
? this.recognizer.vendor
: cs.speechRecognizerVendor;
cs.speechRecognizerLabel = this.recognizer.label !== 'default'
? this.recognizer.label
: cs.speechRecognizerLabel;
cs.speechRecognizerLabel = this.recognizer.label === 'default'
? cs.speechRecognizerLabel : this.recognizer.label;
cs.speechRecognizerLanguage = this.recognizer.language !== 'default'
? this.recognizer.language
: cs.speechRecognizerLanguage;
@@ -154,9 +181,9 @@ class TaskConfig extends Task {
cs.fallbackSpeechRecognizerVendor = this.recognizer.fallbackVendor !== 'default'
? this.recognizer.fallbackVendor
: cs.fallbackSpeechRecognizerVendor;
cs.fallbackSpeechRecognizerLabel = this.recognizer.fallbackLabel !== 'default'
? this.recognizer.fallbackLabel
: cs.fallbackSpeechRecognizerLabel;
cs.fallbackSpeechRecognizerLabel = this.recognizer.fallbackLabel === 'default' ?
cs.fallbackSpeechRecognizerLabel :
this.recognizer.fallbackLabel;
cs.fallbackSpeechRecognizerLanguage = this.recognizer.fallbackLanguage !== 'default'
? this.recognizer.fallbackLanguage
: cs.fallbackSpeechRecognizerLanguage;
@@ -180,6 +207,8 @@ class TaskConfig extends Task {
if ('punctuation' in this.recognizer) {
cs.globalSttPunctuation = this.recognizer.punctuation;
}
// new vendor is set, reset fallback vendor
cs.hasFallbackAsr = false;
this.logger.info({
recognizer: this.recognizer,
isContinuousAsr: cs.isContinuousAsr
@@ -212,15 +241,63 @@ class TaskConfig extends Task {
const {enable, ...opts} = this.listen;
if (enable) {
this.logger.debug({opts}, 'Config: enabling listen');
cs.startBackgroundListen({verb: 'listen', ...opts});
cs.startBackgroundTask('listen', {verb: 'listen', ...opts});
} else {
this.logger.info('Config: disabling listen');
cs.stopBackgroundListen();
cs.stopBackgroundTask('listen');
}
}
if (this.hasTranscribe) {
if (this.transcribe.enable) {
if (!this.transcribeOpts.recognizer) {
this.transcribeOpts.recognizer = this.hasRecognizer ?
this.recognizer :
{
vendor: cs.speechRecognizerVendor,
language: cs.speechRecognizerLanguage
};
}
this.logger.debug(this.transcribeOpts, 'Config: enabling transcribe');
cs.startBackgroundTask('transcribe', this.transcribeOpts);
} else {
this.logger.info('Config: disabling transcribe');
cs.stopBackgroundTask('transcribe');
}
}
if (Object.keys(this.actionHookDelayAction).length !== 0) {
cs.actionHookDelayProperties = this.actionHookDelayAction;
}
if (this.data.sipRequestWithinDialogHook) {
cs.sipRequestWithinDialogHook = this.data.sipRequestWithinDialogHook;
}
if ('boostAudioSignal' in this.data) {
const db = parseDecibels(this.data.boostAudioSignal);
this.logger.info(`Config: boosting audio signal by ${db} dB`);
const args = [ep.uuid, 'setGain', db];
ep.api('uuid_dub', args).catch((err) => {
this.logger.error(err, 'Error boosting audio signal');
});
}
if (this.hasFillerNoise) {
const {enable, ...opts} = this.fillerNoise;
this.logger.info({fillerNoise: this.fillerNoise}, 'Config: fillerNoise');
if (!enable) cs.disableFillerNoise();
else {
cs.enableFillerNoise(opts);
}
}
if (this.hasVad) {
cs.vad = {
enable: this.vad.enable || false,
voiceMs: this.vad.voiceMs || 250,
silenceMs: this.vad.silenceMs || 150,
strategy: this.vad.strategy || 'one-shot',
mode: this.vad.mod || 2
};
}
}
async kill(cs) {

View File

@@ -73,7 +73,8 @@ class TaskDequeue extends Task {
try {
let url;
if (this.callSid) {
url = await retrieveByPatternSortedSet(this.queueName, `*${this.callSid}`);
const r = await retrieveByPatternSortedSet(this.queueName, `*${this.callSid}`);
url = r[0];
} else {
url = await retrieveFromSortedSet(this.queueName);
}

View File

@@ -14,6 +14,7 @@ const sessionTracker = require('../session/session-tracker');
const DtmfCollector = require('../utils/dtmf-collector');
const ConfirmCallSession = require('../session/confirm-call-session');
const dbUtils = require('../utils/db-utils');
const parseDecibels = require('../utils/parse-decibels');
const debug = require('debug')('jambonz:feature-server');
const {parseUri} = require('drachtio-srf');
const {ANCHOR_MEDIA_ALWAYS, JAMBONZ_DISABLE_DIAL_PAI_HEADER} = require('../config');
@@ -100,6 +101,8 @@ class TaskDial extends Task {
this.referHook = this.data.referHook;
this.dtmfHook = this.data.dtmfHook;
this.proxy = this.data.proxy;
this.tag = this.data.tag;
this.boostAudioSignal = this.data.boostAudioSignal;
if (this.dtmfHook) {
const {parentDtmfCollector, childDtmfCollector} = parseDtmfOptions(logger, this.data.dtmfCapture || {});
@@ -117,6 +120,9 @@ class TaskDial extends Task {
if (this.data.transcribe) {
this.transcribeTask = makeTask(logger, {'transcribe' : this.data.transcribe}, this);
}
if (this.data.dub && Array.isArray(this.data.dub) && this.data.dub.length > 0) {
this.dubTasks = this.data.dub.map((d) => makeTask(logger, {'dub': d}, this));
}
this.results = {};
this.bridged = false;
@@ -138,15 +144,17 @@ class TaskDial extends Task {
get name() { return TaskName.Dial; }
get isOnHold() {
return this.isIncomingLegHold || this.isOutgoingLegHold;
get isOnHoldEnabled() {
return !!this.data.onHoldHook;
}
get canReleaseMedia() {
const keepAnchor = this.data.anchorMedia ||
this.cs.isBackGroundListen ||
this.cs.onHoldMusic ||
ANCHOR_MEDIA_ALWAYS ||
this.listenTask ||
this.dubTasks ||
this.transcribeTask ||
this.startAmd;
@@ -323,7 +331,7 @@ class TaskDial extends Task {
const by = parseUri(req.getParsedHeader('Referred-By').uri);
this.logger.info({to}, 'refer to parsed');
const json = await cs.requestor.request('verb:hook', this.referHook, {
...callInfo,
...(callInfo.toJSON()),
refer_details: {
sip_refer_to: req.get('Refer-To'),
sip_referred_by: req.get('Referred-By'),
@@ -549,9 +557,9 @@ class TaskDial extends Task {
const str = this.callerId || req.callingNumber || '';
const callingNumber = str.startsWith('+') ? str.substring(1) : str;
const voip_carrier_sid = await lookupCarrierByPhoneNumber(cs.accountSid, callingNumber);
this.logger.info(
`Dial:_attemptCalls: selected ${voip_carrier_sid} for requested phone number: ${callingNumber}`);
if (voip_carrier_sid) {
this.logger.info(
`Dial:_attemptCalls: selected voip_carrier_sid ${voip_carrier_sid} for callingNumber: ${callingNumber}`);
opts.headers['X-Requested-Carrier-Sid'] = voip_carrier_sid;
}
}
@@ -570,7 +578,8 @@ class TaskDial extends Task {
accountInfo: cs.accountInfo,
rootSpan: cs.rootSpan,
startSpan: this.startSpan.bind(this),
dialTask: this
dialTask: this,
onHoldMusic: this.cs.onHoldMusic
});
this.dials.set(sd.callSid, sd);
@@ -627,6 +636,8 @@ class TaskDial extends Task {
await this._connectSingleDial(cs, sd);
} catch (err) {
this.logger.info({err}, 'Dial:_attemptCalls - Error calling _connectSingleDial ');
sd.removeAllListeners();
this.kill(cs);
}
})
.on('decline', () => {
@@ -679,22 +690,43 @@ class TaskDial extends Task {
async _onReinvite(req, res) {
try {
let isHandled = false;
if (this.cs.onHoldMusic) {
if (isOnhold(req.body) && !this.epOther && !this.ep) {
await this.cs.handleReinviteAfterMediaReleased(req, res);
// Onhold but media is already released
// reconnect A Leg and Response B leg
await this.reAnchorMedia(this.cs, this.sd);
this.isOutgoingLegHold = true;
if (this.isOnHoldEnabled) {
if (isOnhold(req.body)) {
this.logger.debug('Dial: _onReinvite receive hold Request');
if (!this.epOther && !this.ep) {
this.logger.debug(`Dial: _onReinvite receive hold Request,
media already released, reconnect media server`);
// update caller leg for new SDP from callee.
await this.cs.handleReinviteAfterMediaReleased(req, res);
// Freeswitch media is released, reconnect
await this.reAnchorMedia(this.cs, this.sd);
this.isOutgoingLegHold = true;
} else {
this.logger.debug('Dial: _onReinvite receive hold Request, update SDP');
const newSdp = await this.ep.modify(req.body);
res.send(200, {body: newSdp});
}
isHandled = true;
this._onHoldHook();
} else if (!isOnhold(req.body) && this.epOther && this.ep && this.isOutgoingLegHold && this.canReleaseMedia) {
// Offhold, time to release media
const newSdp = await this.ep.modify(req.body);
await res.send(200, {body: newSdp});
await this._releaseMedia(this.cs, this.sd);
// Media already connected, ask for onHoldHook
this._onHoldHook(req);
} else if (!isOnhold(req.body)) {
this.logger.debug('Dial: _onReinvite receive unhold Request');
if (this.epOther && this.ep && this.isOutgoingLegHold && this.canReleaseMedia) {
this.logger.debug('Dial: _onReinvite receive unhold Request, release media');
// Offhold, time to release media
const newSdp = await this.ep.modify(req.body);
await res.send(200, {body: newSdp});
await this._releaseMedia(this.cs, this.sd);
this.isOutgoingLegHold = false;
} else {
this.logger.debug('Dial: _onReinvite receive unhold Request, update media server');
const newSdp = await this.ep.modify(req.body);
res.send(200, {body: newSdp});
}
if (this._onHoldSession) {
this._onHoldSession.kill();
}
isHandled = true;
this.isOutgoingLegHold = false;
}
}
if (!isHandled) {
@@ -755,6 +787,17 @@ class TaskDial extends Task {
dialCallSid: sd.callSid,
});
if (this.dubTasks) {
for (const dub of this.dubTasks) {
try {
await dub.exec(cs, {ep: sd.ep});
}
catch (err) {
this.logger.error({err}, 'Dial:_selectSingleDial - error executing dubTask');
}
}
}
if (this.parentDtmfCollector) this._installDtmfDetection(cs, cs.dlg);
if (this.childDtmfCollector) this._installDtmfDetection(cs, this.dlg);
if (cs.sipRequestWithinDialogHook) this._initSipIndialogRequestListener(cs, this.dlg);
@@ -769,6 +812,18 @@ class TaskDial extends Task {
}
}
/* boost audio signal if requested */
if (this.boostAudioSignal) {
try {
const db = parseDecibels(this.boostAudioSignal);
this.logger.info(`Dial: boosting audio signal by ${db} dB`);
const args = [this.ep.uuid, 'setGain', db];
await this.ep.api('uuid_dub', args);
} catch (err) {
this.logger.info({err}, 'Dial:_selectSingleDial - Error boosting audio signal');
}
}
/* if we can release the media back to the SBC, do so now */
if (this.canReleaseMedia) setTimeout(this._releaseMedia.bind(this, cs, sd), 200);
}
@@ -811,23 +866,34 @@ class TaskDial extends Task {
this.epOther = cs.ep;
}
// Handle RE-INVITE hold from caller leg.
async handleReinviteAfterMediaReleased(req, res) {
let isHandled = false;
if (isOnhold(req.body) && !this.epOther && !this.ep) {
const sdp = await this.dlg.modify(req.body);
res.send(200, {body: sdp});
// Onhold but media is already released
await this.reAnchorMedia(this.cs, this.sd);
isHandled = true;
this.isIncomingLegHold = true;
this._onHoldHook();
} else if (!isOnhold(req.body) && this.epOther && this.ep && this.isIncomingLegHold && this.canReleaseMedia) {
// Offhold, time to release media
const newSdp = await this.epOther.modify(req.body);
await res.send(200, {body: newSdp});
await this._releaseMedia(this.cs, this.sd);
isHandled = true;
this.isIncomingLegHold = false;
if (this.isOnHoldEnabled) {
if (isOnhold(req.body)) {
if (!this.epOther && !this.ep) {
// update callee leg for new SDP from caller.
const sdp = await this.dlg.modify(req.body);
res.send(200, {body: sdp});
// Onhold but media is already released, reconnect
await this.reAnchorMedia(this.cs, this.sd);
isHandled = true;
this.isIncomingLegHold = true;
}
this._onHoldHook(req);
} else if (!isOnhold(req.body)) {
if (this.epOther && this.ep && this.isIncomingLegHold && this.canReleaseMedia) {
// Offhold, time to release media
const newSdp = await this.epOther.modify(req.body);
await res.send(200, {body: newSdp});
await this._releaseMedia(this.cs, this.sd);
isHandled = true;
}
this.isIncomingLegHold = false;
if (this._onHoldSession) {
this._onHoldSession.kill();
}
}
}
if (!isHandled) {
@@ -846,7 +912,7 @@ class TaskDial extends Task {
});
}
async _onHoldHook(allowed = [TaskName.Play, TaskName.Say, TaskName.Pause]) {
async _onHoldHook(req, allowed = [TaskName.Play, TaskName.Say, TaskName.Pause]) {
if (this.data.onHoldHook) {
// send silence for keep Voice quality
await this.epOther.play('silence_stream://500');
@@ -856,7 +922,13 @@ class TaskDial extends Task {
const b3 = this.getTracingPropagation();
const httpHeaders = b3 && {b3};
const json = await this.cs.application.requestor.
request('verb:hook', this.data.onHoldHook, this.cs.callInfo.toJSON(), httpHeaders);
request('verb:hook', this.data.onHoldHook, {
...this.cs.callInfo.toJSON(),
hold_detail: {
from: req.get('From'),
to: req.get('To')
}
}, httpHeaders);
const tasks = normalizeJambones(this.logger, json).map((tdata) => makeTask(this.logger, tdata));
allowedTasks = tasks.filter((t) => allowed.includes(t.name));
if (tasks.length !== allowedTasks.length) {
@@ -865,7 +937,7 @@ class TaskDial extends Task {
}
this.logger.debug(`DialTask:_onHoldHook: executing ${tasks.length} tasks`);
if (tasks.length) {
this._playSession = new ConfirmCallSession({
this._onHoldSession = new ConfirmCallSession({
logger: this.logger,
application: this.cs.application,
dlg: this.isIncomingLegHold ? this.dlg : this.cs.dlg,
@@ -875,12 +947,12 @@ class TaskDial extends Task {
tasks,
rootSpan: this.cs.rootSpan
});
await this._playSession.exec();
this._playSession = null;
await this._onHoldSession.exec();
this._onHoldSession = null;
}
} catch (error) {
this.logger.info(error, 'DialTask:_onHoldHook: failed retrieving waitHook');
this._playSession = null;
this._onHoldSession = null;
break;
}
} while (allowedTasks && allowedTasks.length > 0 && !this.killed && this.isOnHold);

144
lib/tasks/dub.js Normal file
View File

@@ -0,0 +1,144 @@
const {TaskName} = require('../utils/constants');
const TtsTask = require('./tts-task');
const assert = require('assert');
const parseDecibels = require('../utils/parse-decibels');
/**
* Dub task: add or remove additional audio tracks into the call
*/
class TaskDub extends TtsTask {
constructor(logger, opts, parentTask) {
super(logger, opts, parentTask);
this.logger.debug({opts: this.data}, 'TaskDub constructor');
['action', 'track', 'play', 'say', 'loop'].forEach((prop) => {
this[prop] = this.data[prop];
});
this.gain = parseDecibels(this.data.gain);
assert.ok(this.action, 'TaskDub: action is required');
assert.ok(this.track, 'TaskDub: track is required');
}
get name() { return TaskName.Dub; }
async exec(cs, {ep}) {
super.exec(cs);
try {
switch (this.action) {
case 'addTrack':
await this._addTrack(cs, ep);
break;
case 'removeTrack':
await this._removeTrack(cs, ep);
break;
case 'silenceTrack':
await this._silenceTrack(cs, ep);
break;
case 'playOnTrack':
await this._playOnTrack(cs, ep);
break;
case 'sayOnTrack':
await this._sayOnTrack(cs, ep);
break;
default:
throw new Error(`TaskDub: unsupported action ${this.action}`);
}
} catch (err) {
this.logger.error(err, 'Error executing dub task');
}
}
async _addTrack(cs, ep) {
this.logger.info(`adding track: ${this.track}`);
await ep.dub({
action: 'addTrack',
track: this.track
});
if (this.play) await this._playOnTrack(cs, ep);
else if (this.say) await this._sayOnTrack(cs, ep);
}
async _removeTrack(_cs, ep) {
this.logger.info(`removing track: ${this.track}`);
await ep.dub({
action: 'removeTrack',
track: this.track
});
}
async _silenceTrack(_cs, ep) {
this.logger.info(`silencing track: ${this.track}`);
await ep.dub({
action: 'silenceTrack',
track: this.track
});
}
async _playOnTrack(_cs, ep) {
this.logger.info(`playing on track: ${this.track}`);
await ep.dub({
action: 'playOnTrack',
track: this.track,
play: this.play,
loop: this.loop ? 'loop' : 'once',
gain: this.gain
});
}
async _sayOnTrack(cs, ep) {
const text = this.say.text || this.say;
this.synthesizer = this.say.synthesizer || {};
if (Object.keys(this.synthesizer).length) {
this.logger.info({synthesizer: this.synthesizer},
`saying on track ${this.track}: ${text} with synthesizer options`);
}
else {
this.logger.info(`saying on track ${this.track}: ${text}`);
}
this.synthesizer = this.synthesizer || {};
this.text = [text];
const vendor = this.synthesizer.vendor && this.synthesizer.vendor !== 'default' ?
this.synthesizer.vendor :
cs.speechSynthesisVendor;
const language = this.synthesizer.language && this.synthesizer.language !== 'default' ?
this.synthesizer.language :
cs.speechSynthesisLanguage ;
const voice = this.synthesizer.voice && this.synthesizer.voice !== 'default' ?
this.synthesizer.voice :
cs.speechSynthesisVoice;
const label = this.synthesizer.label && this.synthesizer.label !== 'default' ?
this.synthesizer.label :
cs.speechSynthesisLabel;
const disableTtsStreaming = false;
const filepath = await this._synthesizeWithSpecificVendor(cs, ep, {
vendor, language, voice, label, disableTtsStreaming
});
assert.ok(filepath.length === 1, 'TaskDub: no filepath returned from synthesizer');
const path = filepath[0];
if (!path.startsWith('say:{')) {
/* we have a local file of mp3 or r8 of synthesized speech audio to play */
this.logger.info(`playing synthesized speech from file on track ${this.track}: ${path}`);
this.play = path;
await this._playOnTrack(cs, ep);
}
else {
this.logger.info(`doing actual text to speech file on track ${this.track}: ${path}`);
await ep.dub({
action: 'sayOnTrack',
track: this.track,
say: path,
gain: this.gain
});
}
}
}
module.exports = TaskDub;

View File

@@ -338,6 +338,7 @@ class TaskEnqueue extends Task {
this.logger.error({err}, `TaskEnqueue:_playHook error retrieving list info for queue ${this.queueName}`);
}
const json = await cs.application.requestor.request('verb:hook', hook, params, httpHeaders);
this.logger.debug({json}, 'TaskEnqueue:_playHook: received response from waitHook');
const tasks = normalizeJambones(this.logger, json).map((tdata) => makeTask(this.logger, tdata));
const allowedTasks = tasks.filter((t) => allowed.includes(t.name));

View File

@@ -10,7 +10,9 @@ const {
IbmTranscriptionEvents,
NvidiaTranscriptionEvents,
JambonzTranscriptionEvents,
AssemblyAiTranscriptionEvents
AssemblyAiTranscriptionEvents,
VadDetection,
VerbioTranscriptionEvents
} = require('../utils/constants.json');
const {
JAMBONES_GATHER_EARLY_HINTS_MATCH,
@@ -27,9 +29,13 @@ class TaskGather extends SttTask {
[
'finishOnKey', 'input', 'numDigits', 'minDigits', 'maxDigits',
'interDigitTimeout', 'partialResultHook', 'bargein', 'dtmfBargein',
'speechTimeout', 'timeout', 'say', 'play'
'speechTimeout', 'timeout', 'say', 'play', 'actionHookDelayAction', 'fillerNoise', 'vad'
].forEach((k) => this[k] = this.data[k]);
// gather default input is digits
if (!this.input) {
this.input = ['digits'];
}
/* when collecting dtmf, bargein on dtmf is true unless explicitly set to false */
if (this.dtmfBargein !== false && this.input.includes('digits')) this.dtmfBargein = true;
@@ -37,7 +43,8 @@ class TaskGather extends SttTask {
this.timeout = this.timeout === 0 ? 0 : (this.timeout || 15) * 1000;
this.interim = !!this.partialResultHook || this.bargein || (this.timeout > 0);
this.listenDuringPrompt = this.data.listenDuringPrompt === false ? false : true;
this.minBargeinWordCount = this.data.minBargeinWordCount || 1;
this.minBargeinWordCount = this.data.minBargeinWordCount !== undefined ? this.data.minBargeinWordCount : 1;
this._vadEnabled = this.minBargeinWordCount === 0;
if (this.data.recognizer) {
/* continuous ASR (i.e. compile transcripts until a special timeout or dtmf key) */
this.asrTimeout = typeof this.data.recognizer.asrTimeout === 'number' ?
@@ -71,6 +78,7 @@ class TaskGather extends SttTask {
/* buffer speech for continuous asr */
this._bufferedTranscripts = [];
this.partialTranscriptsCount = 0;
this.bugname_prefix = 'gather_';
}
get name() { return TaskName.Gather; }
@@ -86,6 +94,18 @@ class TaskGather extends SttTask {
(this.playTask && this.playTask.earlyMedia);
}
get hasFillerNoise() {
return Object.keys(this.fillerNoise).length > 0 && this.fillerNoise.enabled !== false;
}
get fillerNoiseUrl() {
return this.fillerNoise.url;
}
get fillerNoiseStartDelaySecs() {
return this.fillerNoise.startDelaySecs;
}
get summary() {
let s = `${this.name}{`;
if (this.input.length === 2) s += 'inputs=[speech,digits],';
@@ -97,6 +117,7 @@ class TaskGather extends SttTask {
}
if (this.sayTask) s += ',with nested say task';
if (this.playTask) s += ',with nested play task';
if (this.actionHookDelayAction) s += ',with actionHookDelayAction';
s += '}';
return s;
}
@@ -106,6 +127,16 @@ class TaskGather extends SttTask {
await super.exec(cs, {ep});
const {updateSpeechCredentialLastUsed} = require('../utils/db-utils')(this.logger, cs.srf);
this.fillerNoise = {
...(cs.fillerNoise || {}),
...(this.fillerNoise || {})
};
this.vad = {
...(cs.vad || {}),
...(this.vad || {})
};
if (cs.hasGlobalSttHints && !this.maskGlobalSttHints) {
const {hints, hintsBoost} = cs.globalSttHints;
const setOfHints = new Set((this.data.recognizer.hints || [])
@@ -116,14 +147,6 @@ class TaskGather extends SttTask {
this.logger.debug({hints: this.data.recognizer.hints, hintsBoost: this.data.recognizer.hintsBoost},
'Gather:exec - applying global sttHints');
}
if (cs.hasAltLanguages) {
this.data.recognizer.altLanguages = this.data.recognizer.altLanguages.concat(cs.altLanguages);
this.logger.debug({altLanguages: this.data.recognizer?.altLanguages},
'Gather:exec - applying altLanguages');
}
if (cs.hasGlobalSttPunctuation && !this.data.recognizer.punctuation) {
this.data.recognizer.punctuation = cs.globalSttPunctuation;
}
if (!this.isContinuousAsr && cs.isContinuousAsr) {
this.isContinuousAsr = true;
this.asrTimeout = cs.asrTimeout * 1000;
@@ -141,6 +164,24 @@ class TaskGather extends SttTask {
this.interim = true;
this.logger.debug('Gather:exec - early hints match enabled');
}
// if we have actionHook delay, and the session does as well, stash the session config
if (this.actionHookDelayAction) {
if (cs.actionHookDelayProcessor) {
this.logger.debug('Gather:exec - stashing session-level ahd proprerties');
cs.stashActionHookDelayProperties();
}
cs.actionHookDelayProperties = this.actionHookDelayAction;
}
this._startVad();
const startDtmfListener = () => {
if (this.input.includes('digits') || this.dtmfBargein || this.asrDtmfTerminationDigit) {
ep.on('dtmf', this._onDtmf.bind(this, cs, ep));
}
};
const startListening = async(cs, ep) => {
this._startTimer();
if (this.isContinuousAsr && 0 === this.timeout) this._startAsrTimer();
@@ -154,12 +195,7 @@ class TaskGather extends SttTask {
this._startTranscribing(ep);
return updateSpeechCredentialLastUsed(this.sttCredentials.speech_credential_sid);
} catch (e) {
if (this.fallbackVendor && this.isHandledByPrimaryProvider) {
await this._fallback();
startListening(cs, ep);
} else {
this.logger.error({error: e}, 'error in initSpeech');
}
await this._startFallback(cs, ep, {error: e});
}
}
};
@@ -167,13 +203,12 @@ class TaskGather extends SttTask {
try {
if (this.sayTask) {
const {span, ctx} = this.startChildSpan(`nested:${this.sayTask.summary}`);
this.sayTask.span = span;
this.sayTask.ctx = ctx;
this.sayTask.exec(cs, {ep}); // kicked off, _not_ waiting for it to complete
this.sayTask.on('playDone', (err) => {
span.end();
if (err) this.logger.error({err}, 'Gather:exec Error playing tts');
const process = () => {
this.logger.debug('Gather: nested say task completed');
if (!this.listenDuringPrompt) {
startDtmfListener();
}
this._stopVad();
if (!this.killed) {
startListening(cs, ep);
if (this.input.includes('speech') && this.vendor === 'nuance' && this.listenDuringPrompt) {
@@ -183,17 +218,27 @@ class TaskGather extends SttTask {
});
}
}
};
this.sayTask.span = span;
this.sayTask.ctx = ctx;
this.sayTask.exec(cs, {ep}) // kicked off, _not_ waiting for it to complete
.catch((err) => {
process();
});
this.sayTask.on('playDone', (err) => {
span.end();
if (err) this.logger.error({err}, 'Gather:exec Error playing tts');
process();
});
}
else if (this.playTask) {
const {span, ctx} = this.startChildSpan(`nested:${this.playTask.summary}`);
this.playTask.span = span;
this.playTask.ctx = ctx;
this.playTask.exec(cs, {ep}); // kicked off, _not_ waiting for it to complete
this.playTask.on('playDone', (err) => {
span.end();
if (err) this.logger.error({err}, 'Gather:exec Error playing url');
const process = () => {
this.logger.debug('Gather: nested play task completed');
if (!this.listenDuringPrompt) {
startDtmfListener();
}
this._stopVad();
if (!this.killed) {
startListening(cs, ep);
if (this.input.includes('speech') && this.vendor === 'nuance' && this.listenDuringPrompt) {
@@ -203,6 +248,17 @@ class TaskGather extends SttTask {
});
}
}
};
this.playTask.span = span;
this.playTask.ctx = ctx;
this.playTask.exec(cs, {ep}) // kicked off, _not_ waiting for it to complete
.catch((err) => {
process();
});
this.playTask.on('playDone', (err) => {
span.end();
if (err) this.logger.error({err}, 'Gather:exec Error playing url');
process();
});
}
else {
@@ -215,30 +271,38 @@ class TaskGather extends SttTask {
if (this.input.includes('speech') && this.listenDuringPrompt) {
await this._setSpeechHandlers(cs, ep);
this._startTranscribing(ep);
updateSpeechCredentialLastUsed(this.sttCredentials.speech_credential_sid)
.catch(() => {/*already logged error */});
if (!this.resolved && !this.killed) {
this._startTranscribing(ep);
updateSpeechCredentialLastUsed(this.sttCredentials.speech_credential_sid)
.catch(() => {/*already logged error */});
}
else {
this.logger.info('Gather:exec - task was killed or resolved quickly, not starting transcription');
}
}
if (this.input.includes('digits') || this.dtmfBargein || this.asrDtmfTerminationDigit) {
ep.on('dtmf', this._onDtmf.bind(this, cs, ep));
if (this.listenDuringPrompt) {
startDtmfListener();
}
await this.awaitTaskDone();
this._killAudio(cs);
} catch (err) {
this.logger.error(err, 'TaskGather:exec error');
}
this.removeSpeechListeners(ep);
this.removeCustomEventListeners();
}
kill(cs) {
super.kill(cs);
this._killAudio(cs);
this._clearFillerNoiseTimer();
this.ep.removeAllListeners('dtmf');
clearTimeout(this.interDigitTimer);
this._clearAsrTimer();
this.playTask?.span.end();
this.sayTask?.span.end();
this._stopVad();
this._resolve('killed');
}
@@ -268,8 +332,11 @@ class TaskGather extends SttTask {
else if (this.input.includes('digits')) {
if (this.digitBuffer.length === 0 && this.needsStt) {
// DTMF is higher priority than STT.
this.removeSpeechListeners(ep);
ep.stopTranscription({vendor: this.vendor})
this.removeCustomEventListeners();
ep.stopTranscription({
vendor: this.vendor,
bugname: this.bugname,
})
.catch((err) => this.logger.error({err},
` Received DTMF, Error stopping transcription for vendor ${this.vendor}`));
}
@@ -305,38 +372,36 @@ class TaskGather extends SttTask {
if (this.data.recognizer?.deepgramOptions?.shortUtterance) this.shortUtterance = true;
}
const opts = this.setChannelVarsForStt(this, this.sttCredentials, this.data.recognizer);
const opts = this.setChannelVarsForStt(this, this.sttCredentials, this.language, this.data.recognizer);
switch (this.vendor) {
case 'google':
this.bugname = 'google_transcribe';
ep.addCustomEventListener(GoogleTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
ep.addCustomEventListener(GoogleTranscriptionEvents.EndOfUtterance, this._onEndOfUtterance.bind(this, cs, ep));
ep.addCustomEventListener(GoogleTranscriptionEvents.VadDetected, this._onVadDetected.bind(this, cs, ep));
this.bugname = `${this.bugname_prefix}google_transcribe`;
this.addCustomEventListener(
ep, GoogleTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
this.addCustomEventListener(
ep, GoogleTranscriptionEvents.EndOfUtterance, this._onEndOfUtterance.bind(this, cs, ep));
break;
case 'aws':
case 'polly':
this.bugname = 'aws_transcribe';
ep.addCustomEventListener(AwsTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
ep.addCustomEventListener(AwsTranscriptionEvents.VadDetected, this._onVadDetected.bind(this, cs, ep));
this.bugname = `${this.bugname_prefix}aws_transcribe`;
this.addCustomEventListener(ep, AwsTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
break;
case 'microsoft':
this.bugname = 'azure_transcribe';
ep.addCustomEventListener(AzureTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
ep.addCustomEventListener(AzureTranscriptionEvents.NoSpeechDetected,
this._onNoSpeechDetected.bind(this, cs, ep));
ep.addCustomEventListener(AzureTranscriptionEvents.VadDetected, this._onVadDetected.bind(this, cs, ep));
this.bugname = `${this.bugname_prefix}azure_transcribe`;
this.addCustomEventListener(
ep, AzureTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
//this.addCustomEventListener(ep, AzureTranscriptionEvents.NoSpeechDetected,
//this._onNoSpeechDetected.bind(this, cs, ep));
break;
case 'nuance':
this.bugname = 'nuance_transcribe';
ep.addCustomEventListener(NuanceTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}nuance_transcribe`;
this.addCustomEventListener(ep, NuanceTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep));
ep.addCustomEventListener(NuanceTranscriptionEvents.StartOfSpeech,
this.addCustomEventListener(ep, NuanceTranscriptionEvents.StartOfSpeech,
this._onStartOfSpeech.bind(this, cs, ep));
ep.addCustomEventListener(NuanceTranscriptionEvents.TranscriptionComplete,
this.addCustomEventListener(ep, NuanceTranscriptionEvents.TranscriptionComplete,
this._onTranscriptionComplete.bind(this, cs, ep));
ep.addCustomEventListener(NuanceTranscriptionEvents.VadDetected,
this._onVadDetected.bind(this, cs, ep));
/* stall timers until prompt finishes playing */
if ((this.sayTask || this.playTask) && this.listenDuringPrompt) {
@@ -345,24 +410,30 @@ class TaskGather extends SttTask {
break;
case 'deepgram':
this.bugname = 'deepgram_transcribe';
ep.addCustomEventListener(DeepgramTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
ep.addCustomEventListener(DeepgramTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
ep.addCustomEventListener(DeepgramTranscriptionEvents.ConnectFailure,
this.bugname = `${this.bugname_prefix}deepgram_transcribe`;
this.addCustomEventListener(
ep, DeepgramTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
this.addCustomEventListener(ep, DeepgramTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
this.addCustomEventListener(ep, DeepgramTranscriptionEvents.ConnectFailure,
this._onVendorConnectFailure.bind(this, cs, ep));
/* if app sets deepgramOptions.utteranceEndMs they essentially want continuous asr */
if (opts.DEEPGRAM_SPEECH_UTTERANCE_END_MS) this.isContinuousAsr = true;
break;
case 'soniox':
this.bugname = 'soniox_transcribe';
ep.addCustomEventListener(SonioxTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
this.bugname = `${this.bugname_prefix}soniox_transcribe`;
this.addCustomEventListener(
ep, SonioxTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
break;
case 'verbio':
this.bugname = `${this.bugname_prefix}verbio_transcribe`;
this.addCustomEventListener(
ep, VerbioTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
break;
case 'cobalt':
this.bugname = 'cobalt_transcribe';
ep.addCustomEventListener(CobaltTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
this.bugname = `${this.bugname_prefix}cobalt_transcribe`;
this.addCustomEventListener(
ep, CobaltTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
/* cobalt doesnt have language, it has model, which is required */
if (!this.data.recognizer.model) {
@@ -391,23 +462,21 @@ class TaskGather extends SttTask {
break;
case 'ibm':
this.bugname = 'ibm_transcribe';
ep.addCustomEventListener(IbmTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
ep.addCustomEventListener(IbmTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
ep.addCustomEventListener(IbmTranscriptionEvents.ConnectFailure,
this.bugname = `${this.bugname_prefix}ibm_transcribe`;
this.addCustomEventListener(ep, IbmTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
this.addCustomEventListener(ep, IbmTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
this.addCustomEventListener(ep, IbmTranscriptionEvents.ConnectFailure,
this._onVendorConnectFailure.bind(this, cs, ep));
break;
case 'nvidia':
this.bugname = 'nvidia_transcribe';
ep.addCustomEventListener(NvidiaTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}nvidia_transcribe`;
this.addCustomEventListener(ep, NvidiaTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep));
ep.addCustomEventListener(NvidiaTranscriptionEvents.StartOfSpeech,
this.addCustomEventListener(ep, NvidiaTranscriptionEvents.StartOfSpeech,
this._onStartOfSpeech.bind(this, cs, ep));
ep.addCustomEventListener(NvidiaTranscriptionEvents.TranscriptionComplete,
this.addCustomEventListener(ep, NvidiaTranscriptionEvents.TranscriptionComplete,
this._onTranscriptionComplete.bind(this, cs, ep));
ep.addCustomEventListener(NvidiaTranscriptionEvents.VadDetected,
this._onVadDetected.bind(this, cs, ep));
/* I think nvidia has this (??) - stall timers until prompt finishes playing */
if ((this.sayTask || this.playTask) && this.listenDuringPrompt) {
@@ -416,20 +485,22 @@ class TaskGather extends SttTask {
break;
case 'assemblyai':
this.bugname = 'assemblyai_transcribe';
ep.addCustomEventListener(AssemblyAiTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}assemblyai_transcribe`;
this.addCustomEventListener(ep, AssemblyAiTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep));
ep.addCustomEventListener(AssemblyAiTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
ep.addCustomEventListener(AssemblyAiTranscriptionEvents.Error, this._onVendorError.bind(this, cs, ep));
ep.addCustomEventListener(AssemblyAiTranscriptionEvents.ConnectFailure,
this.addCustomEventListener(
ep, AssemblyAiTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
this.addCustomEventListener(ep, AssemblyAiTranscriptionEvents.Error, this._onVendorError.bind(this, cs, ep));
this.addCustomEventListener(ep, AssemblyAiTranscriptionEvents.ConnectFailure,
this._onVendorConnectFailure.bind(this, cs, ep));
break;
default:
if (this.vendor.startsWith('custom:')) {
this.bugname = `${this.vendor}_transcribe`;
ep.addCustomEventListener(JambonzTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
ep.addCustomEventListener(JambonzTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
ep.addCustomEventListener(JambonzTranscriptionEvents.ConnectFailure,
this.bugname = `${this.bugname_prefix}${this.vendor}_transcribe`;
this.addCustomEventListener(
ep, JambonzTranscriptionEvents.Transcription, this._onTranscription.bind(this, cs, ep));
this.addCustomEventListener(ep, JambonzTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
this.addCustomEventListener(ep, JambonzTranscriptionEvents.ConnectFailure,
this._onVendorConnectFailure.bind(this, cs, ep));
break;
}
@@ -441,7 +512,7 @@ class TaskGather extends SttTask {
}
/* common handler for all stt engine errors */
ep.addCustomEventListener(JambonzTranscriptionEvents.Error, this._onJambonzError.bind(this, cs, ep));
this.addCustomEventListener(ep, JambonzTranscriptionEvents.Error, this._onJambonzError.bind(this, cs, ep));
await ep.set(opts)
.catch((err) => this.logger.info(err, 'Error setting channel variables'));
}
@@ -482,9 +553,8 @@ class TaskGather extends SttTask {
this._clearTimer();
this._timeoutTimer = setTimeout(() => {
if (this.isContinuousAsr) this._startAsrTimer();
else if (this.interDigitTimeout <= 0 ||
this.digitBuffer.length < this.minDigits ||
this.needsStt && this.digitBuffer.length === 0) {
if (this.interDigitTimer) return; // let the inter-digit timer complete
else {
this._resolve(this.digitBuffer.length >= this.minDigits ? 'dtmf-num-digits' : 'timeout');
}
}, this.timeout);
@@ -494,7 +564,9 @@ class TaskGather extends SttTask {
if (this._timeoutTimer) {
clearTimeout(this._timeoutTimer);
this._timeoutTimer = null;
return true;
}
return false;
}
_startAsrTimer() {
@@ -503,17 +575,25 @@ class TaskGather extends SttTask {
this._clearAsrTimer();
this._asrTimer = setTimeout(() => {
this.logger.debug('_startAsrTimer - asr timer went off');
const evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language);
const evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language, this.vendor);
this._resolve(this._bufferedTranscripts.length > 0 ? 'speech' : 'timeout', evt);
}, this.asrTimeout);
this.logger.debug(`_startAsrTimer: set for ${this.asrTimeout}ms`);
}
_clearAsrTimer() {
if (this._asrTimer) clearTimeout(this._asrTimer);
if (this._asrTimer) {
this.logger.debug('_clearAsrTimer: asrTimer cleared');
clearTimeout(this._asrTimer);
}
this._asrTimer = null;
}
_hangupCall() {
this.logger.debug('_hangupCall');
this.cs.hangup();
}
_startFastRecognitionTimer(evt) {
assert(this.fastRecognitionTimeout > 0);
this._clearFastRecognitionTimer();
@@ -534,7 +614,7 @@ class TaskGather extends SttTask {
this._clearFinalAsrTimer();
this._finalAsrTimer = setTimeout(() => {
this.logger.debug('_startFinalAsrTimer - final asr timer went off');
const evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language);
const evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language, this.vendor);
this._resolve(this._bufferedTranscripts.length > 0 ? 'speech' : 'timeout', evt);
}, 1000);
this.logger.debug('_startFinalAsrTimer: set for 1 second');
@@ -545,11 +625,65 @@ class TaskGather extends SttTask {
this._finalAsrTimer = null;
}
_startVad() {
if (!this._vadStarted && this._vadEnabled) {
this.logger.debug('_startVad');
this.addCustomEventListener(this.ep, VadDetection.Detection, this._onVadDetected.bind(this, this.cs, this.ep));
this.ep?.startVadDetection(this.vad);
this._vadStarted = true;
}
}
_stopVad() {
if (this._vadStarted) {
this.logger.debug('_stopVad');
this.ep?.stopVadDetection(this.vad);
this.ep?.removeCustomEventListener(VadDetection.Detection, this._onVadDetected);
this._vadStarted = false;
}
}
_startFillerNoise() {
this.logger.debug('Gather:_startFillerNoise - playing filler noise');
this.ep?.play(this.fillerNoise.url);
this._fillerNoiseOn = true;
this.ep.once('playback-start', (evt) => {
if (evt.file === this.fillerNoise.url && !this._fillerNoiseOn) {
this.logger.info({evt}, 'Gather:_startFillerNoise - race condition - kill filler noise here');
this.ep.api('uuid_break', this.ep.uuid)
.catch((err) => this.logger.info(err, 'Error killing filler noise'));
return;
} else this.logger.debug({evt}, 'Gather:_startFillerNoise - playback started');
});
}
_startFillerNoiseTimer() {
this._clearFillerNoiseTimer();
this._fillerNoiseTimer = setTimeout(() => {
this.logger.debug('Gather:_startFillerNoiseTimer - playing filler noise');
this._startFillerNoise();
}, this.fillerNoise.startDelaySecs * 1000);
}
_clearFillerNoiseTimer() {
if (this._fillerNoiseTimer) clearTimeout(this._fillerNoiseTimer);
this._fillerNoiseTimer = null;
}
_killFillerNoise() {
if (this._fillerNoiseTimer) {
this.logger.debug('Gather:_killFillerNoise');
this.ep?.api('uuid_break', this.ep.uuid);
}
}
_killAudio(cs) {
if (!this.sayTask && !this.playTask && this.bargein) {
if (this.ep?.connected && !this.playComplete) {
if (this.hasFillerNoise || (!this.sayTask && !this.playTask && this.bargein)) {
if (this.ep?.connected && (!this.playComplete || this.hasFillerNoise)) {
this.logger.debug('Gather:_killAudio: killing playback of any audio');
this.playComplete = true;
this._fillerNoiseOn = false; // in a race, if we just started audio it may sneak through here
this.ep.api('uuid_break', this.ep.uuid)
.catch((err) => this.logger.info(err, 'Error killing audio'));
}
@@ -571,11 +705,11 @@ class TaskGather extends SttTask {
// make sure this is not a transcript from answering machine detection
const bugname = fsEvent.getHeader('media-bugname');
const finished = fsEvent.getHeader('transcription-session-finished');
this.logger.debug({evt, bugname, finished}, `Gather:_onTranscription for vendor ${this.vendor}`);
this.logger.debug({evt, bugname, finished, vendor: this.vendor}, 'Gather:_onTranscription raw transcript');
if (bugname && this.bugname !== bugname) return;
if (finished === 'true') return;
if (this.vendor === 'ibm' && evt?.state === 'listening') return;
if (this.vendor === 'deepgram' && evt.type === 'UtteranceEnd') {
/* we will only get this when we have set utterance_end_ms */
if (this._bufferedTranscripts.length === 0) {
@@ -583,18 +717,35 @@ class TaskGather extends SttTask {
}
else {
this.logger.debug('Gather:_onTranscription - got UtteranceEnd event from deepgram, return buffered transcript');
evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language);
evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language, this.vendor);
this._bufferedTranscripts = [];
this._resolve('speech', evt);
}
return;
}
if (this.vendor === 'deepgram' && evt.type === 'Metadata') {
this.logger.debug('Gather:_onTranscription - discarding Metadata event from deepgram');
return;
}
evt = this.normalizeTranscription(evt, this.vendor, 1, this.language,
this.shortUtterance, this.data.recognizer.punctuation);
//this.logger.debug({evt, bugname, finished, vendor: this.vendor}, 'Gather:_onTranscription normalized transcript');
evt = this.normalizeTranscription(evt, this.vendor, 1, this.language, this.shortUtterance);
if (evt.alternatives.length === 0) {
this.logger.info({evt}, 'TaskGather:_onTranscription - got empty transcript, continue listening');
return;
}
const confidence = evt.alternatives[0].confidence;
const minConfidence = this.data.recognizer?.minConfidence;
this.logger.debug({evt},
`TaskGather:_onTranscription - confidence (${confidence}), minConfidence (${minConfidence})`);
if (confidence && minConfidence && confidence < minConfidence) {
this.logger.info({evt},
'TaskGather:_onTranscription - Transcript confidence ' +
`(${confidence}) < minConfidence (${minConfidence})`);
return this._resolve('stt-low-confidence', evt);
}
/* fast path: our first partial transcript exactly matches an early hint */
if (this.earlyHintsMatch && evt.is_final === false && this.partialTranscriptsCount++ === 0) {
@@ -616,7 +767,9 @@ class TaskGather extends SttTask {
if (evt.is_final) {
if (evt.alternatives[0].transcript === '' && !this.callSession.callGone && !this.killed) {
emptyTranscript = true;
if (finished === 'true' && ['microsoft', 'deepgram'].includes(this.vendor)) {
if (finished === 'true' &&
['microsoft', 'deepgram'].includes(this.vendor) &&
this._bufferedTranscripts.length === 0) {
this.logger.debug({evt}, 'TaskGather:_onTranscription - got empty transcript from old gather, disregarding');
return;
}
@@ -650,7 +803,7 @@ class TaskGather extends SttTask {
this._clearTimer();
if (this._finalAsrTimer) {
this._clearFinalAsrTimer();
const evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language);
const evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language, this.vendor);
return this._resolve(this._bufferedTranscripts.length > 0 ? 'speech' : 'timeout', evt);
}
this._startAsrTimer();
@@ -659,47 +812,59 @@ class TaskGather extends SttTask {
if (!['soniox', 'aws', 'microsoft', 'deepgram'].includes(this.vendor)) this._startTranscribing(ep);
}
else {
/* this was removed to fix https://github.com/jambonz/jambonz-feature-server/issues/783 */
/*
if (this.bargein && (words + bufferedWords) < this.minBargeinWordCount) {
this.logger.debug({evt, words, bufferedWords},
'TaskGather:_onTranscription - final transcript but < min barge words');
this._bufferedTranscripts.push(evt);
this._startTranscribing(ep);
if (!emptyTranscript) this._bufferedTranscripts.push(evt);
if (!['soniox', 'aws', 'microsoft', 'deepgram'].includes(this.vendor)) this._startTranscribing(ep);
return;
}
else {
if (this.vendor === 'soniox') {
/* compile transcripts into one */
this._sonioxTranscripts.push(evt.vendor.finalWords);
evt = this.compileSonioxTranscripts(this._sonioxTranscripts, 1, this.language);
this._sonioxTranscripts = [];
}
else if (this.vendor === 'deepgram') {
/* compile transcripts into one */
if (!emptyTranscript) this._bufferedTranscripts.push(evt);
if (this.data.recognizer?.deepgramOptions?.utteranceEndMs) {
this.logger.debug('TaskGather:_onTranscription - got speech_final waiting for UtteranceEnd event');
return;
}
this.logger.debug({evt}, 'TaskGather:_onTranscription - compiling deepgram transcripts');
evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language);
this._bufferedTranscripts = [];
this.logger.debug({evt}, 'TaskGather:_onTranscription - compiled deepgram transcripts');
}
/* here is where we return a final transcript */
this._resolve('speech', evt);
*/
if (this.vendor === 'soniox') {
/* compile transcripts into one */
this._sonioxTranscripts.push(evt.vendor.finalWords);
evt = this.compileSonioxTranscripts(this._sonioxTranscripts, 1, this.language);
this._sonioxTranscripts = [];
}
else if (this.vendor === 'deepgram') {
/* compile transcripts into one */
if (!emptyTranscript) this._bufferedTranscripts.push(evt);
/* deepgram can send an empty and final transcript; only if we have any buffered should we resolve */
if (this._bufferedTranscripts.length === 0) return;
evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language, this.vendor);
this._bufferedTranscripts = [];
}
/* here is where we return a final transcript */
this._resolve('speech', evt);
/*}*/
}
}
else {
this._clearTimer();
this._startTimer();
if (this.bargein && (words + bufferedWords) >= this.minBargeinWordCount) {
if (!this.playComplete) {
this.logger.debug({transcript: evt.alternatives[0].transcript}, 'killing audio due to speech');
this.emit('vad');
/* deepgram can send a non-final transcript but with words that are final, so we need to buffer */
let emptyTranscript = false;
if (this.vendor === 'deepgram') {
const originalEvent = evt.vendor.evt;
if (originalEvent.is_final && evt.alternatives[0].transcript !== '') {
this.logger.debug({evt}, 'Gather:_onTranscription - buffering a completed (partial) deepgram transcript');
this._bufferedTranscripts.push(evt);
}
if (evt.alternatives[0].transcript === '') emptyTranscript = true;
}
if (!emptyTranscript) {
if (this._clearTimer()) this._startTimer();
if (this.bargein && (words + bufferedWords) >= this.minBargeinWordCount) {
if (!this.playComplete) {
this.logger.debug({transcript: evt.alternatives[0].transcript}, 'killing audio due to speech');
this.emit('vad');
}
this._killAudio(cs);
}
this._killAudio(cs);
}
if (this.fastRecognitionTimeout) {
this._startFastRecognitionTimer(evt);
@@ -717,14 +882,9 @@ class TaskGather extends SttTask {
this._sonioxTranscripts.push(evt.vendor.finalWords);
}
}
/* deepgram can send a non-final transcript but with words that are final, so we need to buffer */
if (this.vendor === 'deepgram') {
const originalEvent = evt.vendor.evt;
if (originalEvent.is_final && evt.alternatives[0].transcript !== '') {
this.logger.debug({evt}, 'Gather:_onTranscription - buffering a completed (partial) deepgram transcript');
this._bufferedTranscripts.push(evt);
}
}
/* restart asr timer if we get a partial transcript */
if (this.isContinuousAsr) this._startAsrTimer();
}
}
_onEndOfUtterance(cs, ep) {
@@ -756,24 +916,45 @@ class TaskGather extends SttTask {
_onTranscriptionComplete(cs, ep) {
this.logger.debug('TaskGather:_onTranscriptionComplete');
}
async _onJambonzError(cs, ep, evt) {
this.logger.info({evt}, 'TaskGather:_onJambonzError');
if (this.isHandledByPrimaryProvider && this.fallbackVendor) {
ep.stopTranscription({vendor: this.vendor})
async _startFallback(cs, ep, evt) {
if (this.canFallback) {
ep.stopTranscription({
vendor: this.vendor,
bugname: this.bugname
})
.catch((err) => this.logger.error({err}, `Error stopping transcription for primary vendor ${this.vendor}`));
const {updateSpeechCredentialLastUsed} = require('../utils/db-utils')(this.logger, cs.srf);
try {
await this._fallback();
await this._initSpeech(cs, ep);
this.logger.debug('gather:_startFallback');
this.notifyError({ msg: 'ASR error',
details:`STT Vendor ${this.vendor} error: ${evt.error || evt.reason}`, failover: 'in progress'});
await this._initFallback();
this._speechHandlersSet = false;
await this._setSpeechHandlers(cs, ep);
this._startTranscribing(ep);
updateSpeechCredentialLastUsed(this.sttCredentials.speech_credential_sid);
return;
return true;
} catch (error) {
this.logger.info({error}, `There is error while falling back to ${this.fallbackVendor}`);
this.notifyError({ msg: 'ASR error',
details:`STT Vendor ${this.vendor} error: ${evt.error || evt.reason}`, failover: 'not available'});
}
} else {
this.logger.debug('gather:_startFallback no condition for falling back');
this.notifyError({ msg: 'ASR error',
details:`STT Vendor ${this.vendor} error: ${evt.error || evt.reason}`, failover: 'not available'});
}
const {writeAlerts, AlertType} = cs.srf.locals;
return false;
}
async _onJambonzError(cs, ep, evt) {
if (this.vendor === 'google' && evt.error_code === 0) {
this.logger.info({evt}, 'TaskTranscribe:_onJambonzError - ignoring google error code 0');
return;
}
this.logger.info({evt}, 'TaskGather:_onJambonzError');
const {writeAlerts, AlertType} = cs.srf.locals;
if (this.vendor === 'nuance') {
const {code, error} = evt;
if (code === 404 && error === 'No speech') return this._resolve('timeout');
@@ -786,17 +967,23 @@ class TaskGather extends SttTask {
message: `Custom speech vendor ${this.vendor} error: ${evt.error}`,
vendor: this.vendor,
}).catch((err) => this.logger.info({err}, 'Error generating alert for jambonz custom connection failure'));
this.notifyError({msg: 'ASR error', details:`Custom speech vendor ${this.vendor} error: ${evt.error}`});
if (!(await this._startFallback(cs, ep, evt))) {
this.notifyTaskDone();
}
}
_onVendorConnectFailure(cs, _ep, evt) {
async _onVendorConnectFailure(cs, _ep, evt) {
super._onVendorConnectFailure(cs, _ep, evt);
this.notifyTaskDone();
if (!(await this._startFallback(cs, _ep, evt))) {
this.notifyTaskDone();
}
}
_onVendorError(cs, _ep, evt) {
async _onVendorError(cs, _ep, evt) {
super._onVendorError(cs, _ep, evt);
this._resolve('stt-error', evt);
if (!(await this._startFallback(cs, _ep, evt))) {
this._resolve('stt-error', evt);
}
}
_onVadDetected(cs, ep) {
@@ -805,6 +992,10 @@ class TaskGather extends SttTask {
this._killAudio(cs);
this.emit('vad');
}
if (this.vad?.strategy === 'one-shot') {
this.ep?.removeCustomEventListener(VadDetection.Detection, this._onVadDetected);
this._vadStarted = false;
}
}
_onNoSpeechDetected(cs, ep, evt, fsEvent) {
@@ -823,26 +1014,38 @@ class TaskGather extends SttTask {
async _resolve(reason, evt) {
this.logger.debug(`TaskGather:resolve with reason ${reason}`);
if (this.resolved) return;
if (this.needsStt && this.ep && this.ep.connected) {
this.ep.stopTranscription({
vendor: this.vendor,
bugname: this.bugname
})
.catch((err) => {
if (this.resolved) return;
this.logger.error({err}, 'Error stopping transcription');
});
}
if (this.resolved) {
this.logger.debug('TaskGather:_resolve - already resolved');
return;
}
this.resolved = true;
// Clear dtmf event
if (this.dtmfBargein) {
this.ep.removeAllListeners('dtmf');
}
// If bargin is false and ws application return ack to verb:hook
// the gather should not play any audio
this._killAudio(this.cs);
// Clear dtmf events, to avoid any case can leak the listener, just clean it
this.ep.removeAllListeners('dtmf');
clearTimeout(this.interDigitTimer);
this._clearTimer();
this._clearFastRecognitionTimer();
this._clearAsrTimer();
this._clearFinalAsrTimer();
this.span.setAttributes({
channel: 1,
'stt.resolve': reason,
'stt.result': JSON.stringify(evt)
});
if (this.needsStt && this.ep && this.ep.connected) {
this.ep.stopTranscription({vendor: this.vendor})
.catch((err) => this.logger.error({err}, 'Error stopping transcription'));
}
if (this.callSession && this.callSession.callGone) {
this.logger.debug('TaskGather:_resolve - call is gone, not invoking web callback');
@@ -850,36 +1053,82 @@ class TaskGather extends SttTask {
return;
}
// action hook delay
if (this.cs.actionHookDelayProcessor) {
this.logger.debug('TaskGather:_resolve - actionHookDelayProcessor exists - starting it');
this.cs.actionHookDelayProcessor.start();
}
// TODO: remove and implement as actionHookDelay
if (this.hasFillerNoise && (reason.startsWith('dtmf') || reason.startsWith('speech'))) {
if (this.fillerNoiseStartDelaySecs > 0) {
this._startFillerNoiseTimer();
}
else {
this.logger.debug(`TaskGather:_resolve - playing filler noise: ${this.fillerNoiseUrl}`);
this._startFillerNoise();
}
}
let returnedVerbs = false;
try {
if (reason.startsWith('dtmf')) {
if (this.parentTask) this.parentTask.emit('dtmf', evt);
else {
this.emit('dtmf', evt);
await this.performAction({digits: this.digitBuffer, reason: 'dtmfDetected'});
returnedVerbs = await this.performAction({digits: this.digitBuffer, reason: 'dtmfDetected'});
}
}
else if (reason.startsWith('speech')) {
if (this.parentTask) this.parentTask.emit('transcription', evt);
else {
this.emit('transcription', evt);
await this.performAction({speech: evt, reason: 'speechDetected'});
this.logger.debug('TaskGather:_resolve - invoking performAction');
returnedVerbs = await this.performAction({speech: evt, reason: 'speechDetected'});
this.logger.debug({returnedVerbs}, 'TaskGather:_resolve - back from performAction');
}
}
else if (reason.startsWith('timeout')) {
if (this.parentTask) this.parentTask.emit('timeout', evt);
else {
this.emit('timeout', evt);
await this.performAction({reason: 'timeout'});
returnedVerbs = await this.performAction({reason: 'timeout'});
}
}
else if (reason.startsWith('stt-error')) {
if (this.parentTask) this.parentTask.emit('stt-error', evt);
else {
this.emit('stt-error', evt);
await this.performAction({reason: 'error', details: evt.error});
returnedVerbs = await this.performAction({reason: 'error', details: evt.error});
}
} else if (reason.startsWith('stt-low-confidence')) {
if (this.parentTask) this.parentTask.emit('stt-low-confidence', evt);
else {
this.emit('stt-low-confidence', evt);
returnedVerbs = await this.performAction({reason: 'stt-low-confidence'});
}
}
} catch (err) { /*already logged error*/ }
// Gather got response from hook, cancel actionHookDelay processing
this.logger.debug('TaskGather:_resolve - checking ahd');
if (this.cs.actionHookDelayProcessor) {
if (returnedVerbs) {
this.logger.debug('TaskGather:_resolve - got response from action hook, cancelling actionHookDelay');
await this.cs.actionHookDelayProcessor.stop();
if (this.actionHookDelayAction && !this.cs.popActionHookDelayProperties()) {
// no session level ahd was running when this task started, so clear it
this.cs.clearActionHookDelayProcessor();
this.logger.debug('TaskGather:_resolve - clear ahd');
}
}
else {
this.logger.debug('TaskGather:_resolve - no response from action hook, continue actionHookDelay');
}
}
this._clearFillerNoiseTimer();
this.notifyTaskDone();
}
}

View File

@@ -8,6 +8,10 @@ const DTMF_SPAN_NAME = 'dtmf';
class TaskListen extends Task {
constructor(logger, opts, parentTask) {
super(logger, opts);
/**
* @deprecated
* use bidirectionalAudio.enabled
*/
this.disableBidirectionalAudio = opts.disableBidirectionalAudio;
this.preconditions = TaskPreconditions.Endpoint;
@@ -25,6 +29,15 @@ class TaskListen extends Task {
this.results = {};
this.playAudioQueue = [];
this.isPlayingAudioFromQueue = false;
this.bidirectionalAudio = {
enabled: this.disableBidirectionalAudio === true ? false : true,
...(this.data['bidirectionalAudio']),
};
// From drachtio-version 3.0.40, forkAudioStart will send empty bugname, metadata together with
// bidirectionalAudio params that cause old version of freeswitch missunderstand between bugname and
// bidirectionalAudio params
this._bugname = 'audio_fork';
if (this.transcribe) this.transcribeTask = makeTask(logger, {'transcribe': opts.transcribe}, this);
}
@@ -123,8 +136,6 @@ class TaskListen extends Task {
ci,
this.metadata);
if (this.hook.auth) {
this.logger.debug({username: this.hook.auth.username, password: this.hook.auth.password},
'TaskListen:_startListening basic auth');
await this.ep.set({
'MOD_AUDIO_BASIC_AUTH_USERNAME': this.hook.auth.username,
'MOD_AUDIO_BASIC_AUTH_PASSWORD': this.hook.auth.password
@@ -135,7 +146,8 @@ class TaskListen extends Task {
mixType: this.mixType,
sampling: this.sampleRate,
...(this._bugname && {bugname: this._bugname}),
metadata
metadata,
bidirectionalAudio: this.bidirectionalAudio || {}
});
this.recordStartTime = moment();
if (this.maxLength) {
@@ -155,7 +167,7 @@ class TaskListen extends Task {
}
/* support bi-directional audio */
if (!this.disableBidirectionalAudio) {
if (this.bidirectionalAudio.enabled) {
ep.addCustomEventListener(ListenEvents.PlayAudio, this._onPlayAudio.bind(this, ep));
}
ep.addCustomEventListener(ListenEvents.KillAudio, this._onKillAudio.bind(this, ep));

View File

@@ -14,6 +14,9 @@ function makeTask(logger, obj, parent) {
}
validateVerb(name, data, logger);
switch (name) {
case TaskName.Answer:
const TaskAnswer = require('./answer');
return new TaskAnswer(logger, data, parent);
case TaskName.SipDecline:
const TaskSipDecline = require('./sip_decline');
return new TaskSipDecline(logger, data, parent);
@@ -41,6 +44,9 @@ function makeTask(logger, obj, parent) {
case TaskName.Dtmf:
const TaskDtmf = require('./dtmf');
return new TaskDtmf(logger, data, parent);
case TaskName.Dub:
const TaskDub = require('./dub');
return new TaskDub(logger, data, parent);
case TaskName.Enqueue:
const TaskEnqueue = require('./enqueue');
return new TaskEnqueue(logger, data, parent);

View File

@@ -17,6 +17,7 @@ class TaskRestDial extends Task {
this.call_hook = this.data.call_hook;
this.timeout = this.data.timeout || 60;
this.sipRequestWithinDialogHook = this.data.sipRequestWithinDialogHook;
this.referHook = this.data.referHook;
this.on('connect', this._onConnect.bind(this));
this.on('callStatus', this._onCallStatus.bind(this));
@@ -64,6 +65,7 @@ class TaskRestDial extends Task {
this.canCancel = false;
const cs = this.callSession;
cs.setDialog(dlg);
cs.referHook = this.referHook;
this.logger.debug('TaskRestDial:_onConnect - call connected');
if (this.sipRequestWithinDialogHook) this._initSipRequestWithinDialogHandler(cs, dlg);
try {

View File

@@ -1,4 +1,4 @@
const Task = require('./task');
const TtsTask = require('./tts-task');
const {TaskName, TaskPreconditions} = require('../utils/constants');
const pollySSMLSplit = require('polly-ssml-split');
@@ -23,9 +23,15 @@ const breakLengthyTextIfNeeded = (logger, text) => {
}
};
class TaskSay extends Task {
const parseTextFromSayString = (text) => {
const closingBraceIndex = text.indexOf('}');
if (closingBraceIndex === -1) return text;
return text.slice(closingBraceIndex + 1);
};
class TaskSay extends TtsTask {
constructor(logger, opts, parentTask) {
super(logger, opts);
super(logger, opts, parentTask);
this.preconditions = TaskPreconditions.Endpoint;
this.text = (Array.isArray(this.data.text) ? this.data.text : [this.data.text])
@@ -33,10 +39,6 @@ class TaskSay extends Task {
.flat();
this.loop = this.data.loop || 1;
this.earlyMedia = this.data.earlyMedia === true || (parentTask && parentTask.earlyMedia);
this.synthesizer = this.data.synthesizer || {};
this.disableTtsCache = this.data.disableTtsCache;
this.options = this.synthesizer.options || {};
this.isHandledByPrimaryProvider = true;
}
@@ -59,12 +61,12 @@ class TaskSay extends Task {
}
}
async _synthesizeWithSpecificVendor(cs, ep, {vendor, language, voice, label}) {
const {srf} = cs;
async _synthesizeWithSpecificVendor(cs, ep, {vendor, language, voice, label, preCache = false}) {
const {srf, accountSid:account_sid} = cs;
const {updateSpeechCredentialLastUsed} = require('../utils/db-utils')(this.logger, srf);
const {writeAlerts, AlertType, stats} = srf.locals;
const {synthAudio} = srf.locals.dbHelpers;
const engine = this.synthesizer.engine || 'standard';
const engine = this.synthesizer.engine || cs.synthesizer?.engine || 'neural';
const salt = cs.callSid;
let credentials = cs.getSpeechCredentials(vendor, 'tts', label);
@@ -76,6 +78,8 @@ class TaskSay extends Task {
voice = arr[1];
model = arr[2];
}
} else if (vendor === 'deepgram') {
model = voice;
}
/* allow for microsoft custom region voice and api_key to be specified as an override */
@@ -95,18 +99,23 @@ class TaskSay extends Task {
voice = this.options.voice_id || voice;
}
this.logger.info({vendor, language, voice, model}, 'TaskSay:exec');
ep.set({
tts_engine: vendor.startsWith('custom:') ? 'custom' : vendor,
tts_voice: voice,
cache_speech_handles: !cs.currentTtsVendor || cs.currentTtsVendor === vendor ? 1 : 0,
}).catch((err) => this.logger.info({err}, 'Error setting tts_engine on endpoint'));
// set the current vendor on the call session
// If vendor is changed from the previous one, then reset the cache_speech_handles flag
cs.currentTtsVendor = vendor;
if (!preCache && !this._disableTracing) this.logger.info({vendor, language, voice, model}, 'TaskSay:exec');
try {
if (!credentials) {
writeAlerts({
account_sid: cs.accountSid,
account_sid,
alert_type: AlertType.TTS_NOT_PROVISIONED,
vendor
}).catch((err) => this.logger.info({err}, 'Error generating alert for no tts'));
this.notifyError({
msg: 'TTS error',
details:`No speech credentials provisioned for selected vendor ${vendor}`
});
throw new Error('no provisioned speech credentials for TTS');
}
// synthesize all of the text elements
@@ -118,14 +127,17 @@ class TaskSay extends Task {
if (text.startsWith('silence_stream://')) return text;
/* otel: trace time for tts */
const {span} = this.startChildSpan('tts-generation', {
'tts.vendor': vendor,
'tts.language': language,
'tts.voice': voice
});
if (!preCache && !this._disableTracing) {
const {span} = this.startChildSpan('tts-generation', {
'tts.vendor': vendor,
'tts.language': language,
'tts.voice': voice
});
this.otelSpan = span;
}
try {
const {filePath, servedFromCache, rtt} = await synthAudio(stats, {
account_sid: cs.accountSid,
account_sid,
text,
vendor,
language,
@@ -135,37 +147,46 @@ class TaskSay extends Task {
salt,
credentials,
options: this.options,
disableTtsCache : this.disableTtsCache
disableTtsCache : this.disableTtsCache,
preCache
});
this.logger.debug(`file ${filePath}, served from cache ${servedFromCache}`);
if (filePath) cs.trackTmpFile(filePath);
if (!servedFromCache && !lastUpdated) {
lastUpdated = true;
updateSpeechCredentialLastUsed(credentials.speech_credential_sid)
.catch(() => {/*already logged error */});
if (!filePath.startsWith('say:')) {
this.logger.debug(`Say: file ${filePath}, served from cache ${servedFromCache}`);
if (filePath) cs.trackTmpFile(filePath);
if (this.otelSpan) {
this.otelSpan.setAttributes({'tts.cached': servedFromCache});
this.otelSpan.end();
this.otelSpan = null;
}
if (!servedFromCache && !lastUpdated) {
lastUpdated = true;
updateSpeechCredentialLastUsed(credentials.speech_credential_sid).catch(() => {/* logged error */});
}
if (!servedFromCache && rtt && !preCache && !this._disableTracing) {
this.notifyStatus({
event: 'synthesized-audio',
vendor,
language,
characters: text.length,
elapsedTime: rtt
});
}
}
span.setAttributes({'tts.cached': servedFromCache});
span.end();
if (!servedFromCache && rtt) {
this.notifyStatus({
event: 'synthesized-audio',
vendor,
language,
characters: text.length,
elapsedTime: rtt
});
else {
this.logger.debug('Say: a streaming tts api will be used');
const modifiedPath = filePath.replace('say:{', `say:{session-uuid=${ep.uuid},`);
return modifiedPath;
}
return filePath;
} catch (err) {
this.logger.info({err}, 'Error synthesizing tts');
span.end();
if (this.otelSpan) this.otelSpan.end();
writeAlerts({
account_sid: cs.accountSid,
alert_type: AlertType.TTS_FAILURE,
vendor,
detail: err.message
}).catch((err) => this.logger.info({err}, 'Error generating alert for tts failure'));
this.notifyError({msg: 'TTS error', details: err.message || err});
throw err;
}
};
@@ -180,21 +201,27 @@ class TaskSay extends Task {
}
async exec(cs, {ep}) {
const {srf, accountSid:account_sid} = cs;
const {writeAlerts, AlertType} = srf.locals;
const {addFileToCache} = srf.locals.dbHelpers;
const engine = this.synthesizer.engine || cs.synthesizer?.engine || 'neural';
await super.exec(cs);
this.ep = ep;
const vendor = this.synthesizer.vendor && this.synthesizer.vendor !== 'default' ?
let vendor = this.synthesizer.vendor && this.synthesizer.vendor !== 'default' ?
this.synthesizer.vendor :
cs.speechSynthesisVendor;
const language = this.synthesizer.language && this.synthesizer.language !== 'default' ?
let language = this.synthesizer.language && this.synthesizer.language !== 'default' ?
this.synthesizer.language :
cs.speechSynthesisLanguage ;
const voice = this.synthesizer.voice && this.synthesizer.voice !== 'default' ?
let voice = this.synthesizer.voice && this.synthesizer.voice !== 'default' ?
this.synthesizer.voice :
cs.speechSynthesisVoice;
const label = this.synthesizer.label && this.synthesizer.label !== 'default' ?
this.synthesizer.label :
cs.speechSynthesisLabel;
// label can be null/empty in synthesizer config, just use application level label if it's default
let label = this.synthesizer.label === 'default' ?
cs.speechSynthesisLabel :
this.synthesizer.label;
const fallbackVendor = this.synthesizer.fallbackVendor && this.synthesizer.fallbackVendor !== 'default' ?
this.synthesizer.fallbackVendor :
@@ -205,16 +232,24 @@ class TaskSay extends Task {
const fallbackVoice = this.synthesizer.fallbackVoice && this.synthesizer.fallbackVoice !== 'default' ?
this.synthesizer.fallbackVoice :
cs.fallbackSpeechSynthesisVoice;
const fallbackLabel = this.synthesizer.fallbackLabel && this.synthesizer.fallbackLabel !== 'default' ?
this.synthesizer.fallbackLabel :
cs.fallbackSpeechSynthesisLabel;
// label can be null/empty in synthesizer config, just use application level label if it's default
const fallbackLabel = this.synthesizer.fallbackLabel === 'default' ?
cs.fallbackSpeechSynthesisLabel :
this.synthesizer.fallbackLabel;
let filepath;
try {
filepath = await this._synthesizeWithSpecificVendor(cs, ep, {vendor, language, voice, label});
} catch (error) {
if (fallbackVendor && this.isHandledByPrimaryProvider) {
if (cs.hasFallbackTts) {
vendor = fallbackVendor;
language = fallbackLanguage;
voice = fallbackVoice;
label = fallbackLabel;
}
const startFallback = async(error) => {
if (fallbackVendor && this.isHandledByPrimaryProvider && !cs.hasFallbackTts) {
this.notifyError(
{ msg: 'TTS error', details:`TTS vendor ${vendor} error: ${error}`, failover: 'in progress'});
this.isHandledByPrimaryProvider = false;
cs.hasFallbackTts = true;
this.logger.info(`Synthesize error, fallback to ${fallbackVendor}`);
filepath = await this._synthesizeWithSpecificVendor(cs, ep,
{
@@ -224,22 +259,107 @@ class TaskSay extends Task {
label: fallbackLabel
});
} else {
this.notifyError(
{ msg: 'TTS error', details:`TTS vendor ${vendor} error: ${error}`, failover: 'not available'});
throw error;
}
};
let filepath;
try {
filepath = await this._synthesizeWithSpecificVendor(cs, ep, {vendor, language, voice, label});
} catch (error) {
await startFallback(error);
}
this.notifyStatus({event: 'start-playback'});
while (!this.killed && (this.loop === 'forever' || this.loop--) && this.ep?.connected) {
while (!this.killed && (this.loop === 'forever' || this.loop--) && ep?.connected) {
let segment = 0;
while (!this.killed && segment < filepath.length) {
if (cs.isInConference) {
const {memberId, confName, confUuid} = cs;
await this.playToConfMember(this.ep, memberId, confName, confUuid, filepath[segment]);
await this.playToConfMember(ep, memberId, confName, confUuid, filepath[segment]);
}
else {
this.logger.debug(`Say:exec sending command to play file ${filepath[segment]}`);
await ep.play(filepath[segment]);
this.logger.debug(`Say:exec completed play file ${filepath[segment]}`);
let tts_cache_filename;
if (filepath[segment].startsWith('say:{')) {
const arr = /^say:\{.*\}\s*(.*)$/.exec(filepath[segment]);
if (arr) this.logger.debug(`Say:exec sending streaming tts request: ${arr[1].substring(0, 64)}..`);
}
else this.logger.debug(`Say:exec sending ${filepath[segment].substring(0, 64)}`);
ep.once('playback-start', (evt) => {
this.logger.debug({evt}, 'Say got playback-start');
if (this.otelSpan) {
this._addStreamingTtsAttributes(this.otelSpan, evt);
this.otelSpan.end();
this.otelSpan = null;
if (evt.variable_tts_cache_filename) {
tts_cache_filename = evt.variable_tts_cache_filename;
cs.trackTmpFile(evt.variable_tts_cache_filename);
}
else {
this.logger.info('No tts_cache_filename in playback-start event');
}
}
});
ep.once('playback-stop', (evt) => {
if (!tts_cache_filename || evt.variable_tts_cache_filename !== tts_cache_filename) {
this.logger.info({evt}, 'Say: discarding playback-stop from other say verb');
}
else {
this.logger.debug({evt}, 'Say got playback-stop');
if (evt.variable_tts_error) {
writeAlerts({
account_sid,
alert_type: AlertType.TTS_FAILURE,
vendor,
detail: evt.variable_tts_error
}).catch((err) => this.logger.info({err}, 'Error generating alert for no tts'));
}
if (evt.variable_tts_cache_filename && !this.killed) {
const text = parseTextFromSayString(this.text[segment]);
addFileToCache(evt.variable_tts_cache_filename, {
account_sid,
vendor,
language,
voice,
engine,
text
}).catch((err) => this.logger.info({err}, 'Error adding file to cache'));
}
}
if (this._playResolve) {
evt.variable_tts_error ? this._playReject(new Error(evt.variable_tts_error)) : this._playResolve();
}
});
// wait for playback-stop event received to confirm if the playback is successful
this._playPromise = new Promise((resolve, reject) => {
this._playResolve = resolve;
this._playReject = reject;
});
const r = await ep.play(filepath[segment]);
this.logger.debug({r}, 'Say:exec play result');
try {
// wait for playback-stop event received to confirm if the playback is successful
await this._playPromise;
} catch (err) {
try {
await startFallback(err);
continue;
} catch (err) {
this.logger.info({err}, 'Error waiting for playback-stop event');
}
} finally {
this._playPromise = null;
this._playResolve = null;
this._playReject = null;
}
if (filepath[segment].startsWith('say:{')) {
const arr = /^say:\{.*\}\s*(.*)$/.exec(filepath[segment]);
if (arr) this.logger.debug(`Say:exec complete playing streaming tts request: ${arr[1].substring(0, 64)}..`);
} else {
// This log will print spech credentials in say command for tts stream mode
this.logger.debug(`Say:exec completed play file ${filepath[segment]}`);
}
}
segment++;
}
@@ -249,7 +369,7 @@ class TaskSay extends Task {
async kill(cs) {
super.kill(cs);
if (this.ep.connected) {
if (this.ep?.connected) {
this.logger.debug('TaskSay:kill - killing audio');
if (cs.isInConference) {
const {memberId, confName} = cs;
@@ -259,8 +379,78 @@ class TaskSay extends Task {
this.notifyStatus({event: 'kill-playback'});
this.ep.api('uuid_break', this.ep.uuid);
}
this.ep.removeAllListeners('playback-start');
this.ep.removeAllListeners('playback-stop');
// if we are waiting for playback-stop event, resolve the promise
if (this._playResolve) {
this._playResolve();
this._playResolve = null;
}
}
}
_addStreamingTtsAttributes(span, evt) {
const attrs = {'tts.cached': false};
for (const [key, value] of Object.entries(evt)) {
if (key.startsWith('variable_tts_')) {
let newKey = key.substring('variable_tts_'.length)
.replace('whisper_', 'whisper.')
.replace('deepgram_', 'deepgram.')
.replace('playht_', 'playht.')
.replace('rimelabs_', 'rimelabs.')
.replace('verbio_', 'verbio.')
.replace('elevenlabs_', 'elevenlabs.');
if (spanMapping[newKey]) newKey = spanMapping[newKey];
attrs[newKey] = value;
}
}
delete attrs['cache_filename']; //no value in adding this to the span
span.setAttributes(attrs);
}
}
const spanMapping = {
// IMPORTANT!!! JAMBONZ WEBAPP WILL SHOW TEXT PERFECTLY IF THE SPAN NAME IS SMALLER OR EQUAL 25 CHARACTERS.
// EX: whisper.ratelim_reqs has length 20 <= 25 which is perfect
// Elevenlabs
'elevenlabs.reported_latency_ms': 'elevenlabs.latency_ms',
'elevenlabs.request_id': 'elevenlabs.req_id',
'elevenlabs.history_item_id': 'elevenlabs.item_id',
'elevenlabs.optimize_streaming_latency': 'elevenlabs.optimization',
'elevenlabs.name_lookup_time_ms': 'name_lookup_ms',
'elevenlabs.connect_time_ms': 'connect_ms',
'elevenlabs.final_response_time_ms': 'final_response_ms',
// Whisper
'whisper.reported_latency_ms': 'whisper.latency_ms',
'whisper.request_id': 'whisper.req_id',
'whisper.reported_organization': 'whisper.organization',
'whisper.reported_ratelimit_requests': 'whisper.ratelimit',
'whisper.reported_ratelimit_remaining_requests': 'whisper.ratelimit_remain',
'whisper.reported_ratelimit_reset_requests': 'whisper.ratelimit_reset',
'whisper.name_lookup_time_ms': 'name_lookup_ms',
'whisper.connect_time_ms': 'connect_ms',
'whisper.final_response_time_ms': 'final_response_ms',
// Deepgram
'deepgram.request_id': 'deepgram.req_id',
'deepgram.reported_model_name': 'deepgram.model_name',
'deepgram.reported_model_uuid': 'deepgram.model_uuid',
'deepgram.reported_char_count': 'deepgram.char_count',
'deepgram.name_lookup_time_ms': 'name_lookup_ms',
'deepgram.connect_time_ms': 'connect_ms',
'deepgram.final_response_time_ms': 'final_response_ms',
// Playht
'playht.request_id': 'playht.req_id',
'playht.name_lookup_time_ms': 'name_lookup_ms',
'playht.connect_time_ms': 'connect_ms',
'playht.final_response_time_ms': 'final_response_ms',
// Rimelabs
'rimelabs.name_lookup_time_ms': 'name_lookup_ms',
'rimelabs.connect_time_ms': 'connect_ms',
'rimelabs.final_response_time_ms': 'final_response_ms',
// verbio
'verbio.name_lookup_time_ms': 'name_lookup_ms',
'verbio.connect_time_ms': 'connect_ms',
'verbio.final_response_time_ms': 'final_response_ms',
};
module.exports = TaskSay;

View File

@@ -14,17 +14,15 @@ class SttTask extends Task {
const {
setChannelVarsForStt,
normalizeTranscription,
removeSpeechListeners,
setSpeechCredentialsAtRuntime,
compileSonioxTranscripts,
consolidateTranscripts
} = require('../utils/transcription-utils')(logger);
this.setChannelVarsForStt = setChannelVarsForStt;
this.normalizeTranscription = normalizeTranscription;
this.removeSpeechListeners = removeSpeechListeners;
this.compileSonioxTranscripts = compileSonioxTranscripts;
this.consolidateTranscripts = consolidateTranscripts;
this.eventHandlers = [];
this.isHandledByPrimaryProvider = true;
if (this.data.recognizer) {
const recognizer = this.data.recognizer;
@@ -35,7 +33,8 @@ class SttTask extends Task {
//fallback
this.fallbackVendor = recognizer.fallbackVendor || 'default';
this.fallbackLanguage = recognizer.fallbackLanguage || 'default';
this.fallbackLabel = recognizer.fallbackLabel || 'default';
// label can be empty and should not have default value.
this.fallbackLabel = recognizer.fallbackLabel;
/* let credentials be supplied in the recognizer object at runtime */
this.sttCredentials = setSpeechCredentialsAtRuntime(recognizer);
@@ -49,6 +48,8 @@ class SttTask extends Task {
/* buffer for soniox transcripts */
this._sonioxTranscripts = [];
/*bug name prefix */
this.bugname_prefix = '';
}
@@ -56,6 +57,23 @@ class SttTask extends Task {
super.exec(cs);
this.ep = ep;
this.ep2 = ep2;
// use session preferences if we don't have specific verb-level settings.
if (cs.recognizer) {
for (const k in cs.recognizer) {
const newValue = this.data.recognizer && this.data.recognizer[k] !== undefined ?
this.data.recognizer[k] :
cs.recognizer[k];
if (Array.isArray(newValue)) {
this.data.recognizer[k] = [...(this.data.recognizer[k] || []), ...cs.recognizer[k]];
} else if (typeof newValue === 'object' && newValue !== null) {
this.data.recognizer[k] = { ...(this.data.recognizer[k] || {}), ...cs.recognizer[k] };
} else {
this.data.recognizer[k] = newValue;
}
}
}
if ('default' === this.vendor || !this.vendor) {
this.vendor = cs.speechRecognizerVendor;
if (this.data.recognizer) this.data.recognizer.vendor = this.vendor;
@@ -64,7 +82,8 @@ class SttTask extends Task {
this.language = cs.speechRecognizerLanguage;
if (this.data.recognizer) this.data.recognizer.language = this.language;
}
if ('default' === this.label || !this.label) {
// label can be empty, should not assign application level label
if ('default' === this.label) {
this.label = cs.speechRecognizerLabel;
if (this.data.recognizer) this.data.recognizer.label = this.label;
}
@@ -77,10 +96,18 @@ class SttTask extends Task {
this.fallbackLanguage = cs.fallbackSpeechRecognizerLanguage;
if (this.data.recognizer) this.data.recognizer.fallbackLanguage = this.fallbackLanguage;
}
if ('default' === this.fallbackLabel || !this.fallbackLabel) {
// label can be empty, should not assign application level label
if ('default' === this.fallbackLabel) {
this.fallbackLabel = cs.fallbackSpeechRecognizerLabel;
if (this.data.recognizer) this.data.recognizer.fallbackLabel = this.fallbackLabel;
}
// If call is already fallback to 2nd ASR vendor
// use that.
if (cs.hasFallbackAsr) {
this.vendor = this.fallbackVendor;
this.language = this.fallbackLanguage;
this.label = this.fallbackLabel;
}
if (!this.data.recognizer.vendor) {
this.data.recognizer.vendor = this.vendor;
}
@@ -89,30 +116,28 @@ class SttTask extends Task {
this.data.recognizer.model = cs.speechRecognizerLanguage;
}
if (cs.recognizer) {
for (const k in cs.recognizer) {
if (Array.isArray(this.data.recognizer[k]) ||
Array.isArray(cs.recognizer[k]) ||
typeof this.data.recognizer[k] === 'object' ||
typeof cs.recognizer[k] === 'object'
) {
this.data.recognizer[k] = {
...this.data.recognizer[k],
...cs.recognizer[k]
};
} else {
this.data.recognizer[k] = cs.recognizer[k] || this.data.recognizer[k];
}
}
}
if (!this.sttCredentials) {
if (
// not gather task, such as transcribe
(!this.input ||
// gather task with speech
this.input.includes('speech')) &&
!this.sttCredentials) {
try {
this.sttCredentials = await this._initSpeechCredentials(this.cs, this.vendor, this.label);
} catch (error) {
if (this.fallbackVendor && this.isHandledByPrimaryProvider) {
await this._fallback();
if (this.canFallback) {
this.notifyError(
{
msg: 'ASR error', details:`Invalid vendor ${this.vendor}, Error: ${error}`,
failover: 'in progress'
});
await this._initFallback();
} else {
this.notifyError(
{
msg: 'ASR error', details:`Invalid vendor ${this.vendor}, Error: ${error}`,
failover: 'not available'
});
throw error;
}
}
@@ -123,10 +148,28 @@ class SttTask extends Task {
this.notifyError({ msg: 'ASR error', details:'Cobalt requires a model to be specified'});
throw new Error('Cobalt requires a model to be specified');
}
if (cs.hasAltLanguages) {
this.data.recognizer.altLanguages = this.data.recognizer.altLanguages.concat(cs.altLanguages);
this.logger.debug({altLanguages: this.altLanguages},
'STT:exec - applying altLanguages');
}
if (cs.hasGlobalSttPunctuation && !this.data.recognizer.punctuation) {
this.data.recognizer.punctuation = cs.globalSttPunctuation;
}
}
addCustomEventListener(ep, event, handler) {
this.eventHandlers.push({ep, event, handler});
ep.addCustomEventListener(event, handler);
}
removeCustomEventListeners() {
this.eventHandlers.forEach((h) => h.ep.removeCustomEventListener(h.event, h.handler));
}
async _initSpeechCredentials(cs, vendor, label) {
const {getNuanceAccessToken, getIbmAccessToken} = cs.srf.locals.dbHelpers;
const {getNuanceAccessToken, getIbmAccessToken, getAwsAuthToken, getVerbioAccessToken} = cs.srf.locals.dbHelpers;
let credentials = cs.getSpeechCredentials(vendor, 'stt', label);
if (!credentials) {
@@ -137,11 +180,6 @@ class SttTask extends Task {
alert_type: AlertType.STT_NOT_PROVISIONED,
vendor
}).catch((err) => this.logger.info({err}, 'Error generating alert for no stt'));
// Notify application that STT vender is wrong.
this.notifyError({
msg: 'ASR error',
details: `No speech-to-text service credentials for ${vendor} have been configured`
});
this.notifyTaskDone();
throw new Error(`No speech-to-text service credentials for ${vendor} have been configured`);
}
@@ -159,13 +197,33 @@ class SttTask extends Task {
const {access_token, servedFromCache} = await getIbmAccessToken(stt_api_key);
this.logger.debug({stt_api_key}, `got ibm access token ${servedFromCache ? 'from cache' : ''}`);
credentials = {...credentials, access_token, stt_region};
} else if (['aws', 'polly'].includes(vendor) && credentials.roleArn) {
/* get aws access token */
const {roleArn, region} = credentials;
const {accessKeyId, secretAccessKey, sessionToken, servedFromCache} =
await getAwsAuthToken({
region,
roleArn
});
this.logger.debug({roleArn}, `got aws access token ${servedFromCache ? 'from cache' : ''}`);
credentials = {...credentials, accessKeyId, secretAccessKey, sessionToken};
} else if (vendor === 'verbio' && credentials.client_id && credentials.client_secret) {
const {access_token, servedFromCache} = await getVerbioAccessToken(credentials);
this.logger.debug({client_id: credentials.client_id},
`got verbio access token ${servedFromCache ? 'from cache' : ''}`);
credentials.access_token = access_token;
}
return credentials;
}
async _fallback() {
get canFallback() {
return this.fallbackVendor && this.isHandledByPrimaryProvider && !this.cs.hasFallbackAsr;
}
async _initFallback() {
assert(this.fallbackVendor, 'fallback failed without fallbackVendor configuration');
this.isHandledByPrimaryProvider = false;
this.cs.hasFallbackAsr = true;
this.logger.info(`Failed to use primary STT provider, fallback to ${this.fallbackVendor}`);
this.vendor = this.fallbackVendor;
this.language = this.fallbackLanguage;
@@ -174,6 +232,8 @@ class SttTask extends Task {
this.data.recognizer.language = this.language;
this.data.recognizer.label = this.label;
this.sttCredentials = await this._initSpeechCredentials(this.cs, this.vendor, this.label);
// cleanup previous listener from previous vendor
this.removeCustomEventListeners();
}
async compileHintsForCobalt(ep, hostport, model, token, hints) {
@@ -217,6 +277,20 @@ class SttTask extends Task {
_doContinuousAsrWithDeepgram(asrTimeout) {
/* deepgram has an utterance_end_ms property that simplifies things */
assert(this.vendor === 'deepgram');
if (asrTimeout < 1000) {
this.notifyError({
msg: 'ASR error',
details:`asrTimeout ${asrTimeout} is too short for deepgram; setting it to 1000ms`
});
asrTimeout = 1000;
}
else if (asrTimeout > 5000) {
this.notifyError({
msg: 'ASR error',
details:`asrTimeout ${asrTimeout} is too long for deepgram; setting it to 5000ms`
});
asrTimeout = 5000;
}
this.logger.debug(`_doContinuousAsrWithDeepgram - setting utterance_end_ms to ${asrTimeout}`);
const dgOptions = this.data.recognizer.deepgramOptions = this.data.recognizer.deepgramOptions || {};
dgOptions.utteranceEndMs = dgOptions.utteranceEndMs || asrTimeout;
@@ -236,7 +310,6 @@ class SttTask extends Task {
detail: evt.error,
vendor: this.vendor,
}).catch((err) => this.logger.info({err}, `Error generating alert for ${this.vendor} connection failure`));
this.notifyError({msg: 'ASR error', details:`Failed connecting to speech vendor ${this.vendor}: ${evt.error}`});
}
_onVendorConnectFailure(cs, _ep, evt) {
@@ -249,7 +322,6 @@ class SttTask extends Task {
message: `Failed connecting to ${this.vendor} speech recognizer: ${reason}`,
vendor: this.vendor,
}).catch((err) => this.logger.info({err}, `Error generating alert for ${this.vendor} connection failure`));
this.notifyError({msg: 'ASR error', details:`Failed connecting to speech vendor ${this.vendor}: ${reason}`});
}
}

View File

@@ -45,6 +45,10 @@ class Task extends Emitter {
return this.name;
}
set disableTracing(val) {
this._disableTracing = val;
}
toJSON() {
return this.data;
}
@@ -160,15 +164,33 @@ class Task extends Emitter {
const httpHeaders = b3 && {b3};
span.setAttributes({'http.body': JSON.stringify(params)});
try {
if (this.id) params.verb_id = this.id;
const json = await this.cs.requestor.request(type, this.actionHook, params, httpHeaders);
span.setAttributes({'http.statusCode': 200});
span.end();
const isWsConnection = this.cs.requestor instanceof WsRequestor;
if (!isWsConnection || (expectResponse && json && Array.isArray(json) && json.length)) {
span.end();
} else {
/** we use this span to measure application response latency,
* and with websocket connections we generally get the application's response
* in a subsequent message from the far end, so we terminate the span when the
* first new set of verbs arrive after sending a transcript
* */
this.emit('VerbHookSpanWaitForEnd', {span});
// If actionHook delay action is configured, and ws application have not responded yet any verb for actionHook
// We have to transfer the task to call-session to await on next ws command verbs, and also run action Hook
// delay actions
//if (this.hookDelayActionOpts) {
// this.emit('ActionHookDelayActionOptions', this.hookDelayActionOpts);
//}
}
if (expectResponse && json && Array.isArray(json)) {
const makeTask = require('./make_task');
const tasks = normalizeJambones(this.logger, json).map((tdata) => makeTask(this.logger, tdata));
if (tasks && tasks.length > 0) {
this.logger.info({tasks: tasks}, `${this.name} replacing application with ${tasks.length} tasks`);
this.callSession.replaceApplication(tasks);
return true;
}
}
} catch (err) {
@@ -176,6 +198,7 @@ class Task extends Emitter {
span.end();
throw err;
}
return false;
}
}
@@ -255,6 +278,7 @@ class Task extends Emitter {
delete obj.requestor;
delete obj.notifier;
obj.tasks = cs.getRemainingTaskData();
obj.callInfo = cs.callInfo.toJSON();
if (opts && obj.tasks.length > 0) {
const key = Object.keys(obj.tasks[0])[0];
Object.assign(obj.tasks[0][key], {_: opts});

View File

@@ -31,21 +31,59 @@ class TaskTranscribe extends SttTask {
this.separateRecognitionPerChannel = this.data.recognizer.separateRecognitionPerChannel;
}
/* for nested transcribe in dial, unless the app explicitly says so we want to transcribe both legs */
if (this.parentTask?.name === TaskName.Dial) {
if (this.data.channel === 1 || this.data.channel === 2) {
/* transcribe only the channel specified */
this.separateRecognitionPerChannel = false;
this.channel = this.data.channel;
logger.debug(`TaskTranscribe: transcribing only channel ${this.channel} in the Dial verb`);
}
else if (this.separateRecognitionPerChannel !== false) {
this.separateRecognitionPerChannel = true;
}
else {
this.channel = 1;
}
}
else {
this.channel = 1;
}
this.childSpan = [null, null];
// Continuos asr timeout
// Continuous asr timeout
this.asrTimeout = typeof this.data.recognizer.asrTimeout === 'number' ? this.data.recognizer.asrTimeout * 1000 : 0;
if (this.asrTimeout > 0) {
this.isContinuousAsr = true;
}
/* buffer speech for continuous asr */
this._bufferedTranscripts = [];
this._bufferedTranscripts = [ [], [] ]; // for channel 1 and 2
this.bugname_prefix = 'transcribe_';
this.paused = false;
}
get name() { return TaskName.Transcribe; }
get transcribing1() {
return this.channel === 1 || this.separateRecognitionPerChannel;
}
get transcribing2() {
return this.channel === 2 || this.separateRecognitionPerChannel && this.ep2;
}
async exec(cs, {ep, ep2}) {
await super.exec(cs, {ep, ep2});
if (this.data.recognizer.vendor === 'nuance') {
this.data.recognizer.nuanceOptions = {
// by default, nuance STT will recognize only 1st utterance.
// enable multiple allow nuance detact all utterances
utteranceDetectionMode: 'multiple',
...this.data.recognizer.nuanceOptions
};
}
const {updateSpeechCredentialLastUsed} = require('../utils/db-utils')(this.logger, cs.srf);
if (cs.hasGlobalSttHints) {
@@ -55,41 +93,42 @@ class TaskTranscribe extends SttTask {
this.logger.debug({hints: this.data.recognizer.hints, hintsBoost: this.data.recognizer.hintsBoost},
'Transcribe:exec - applying global sttHints');
}
if (cs.hasAltLanguages) {
this.data.recognizer.altLanguages = this.data.recognizer.altLanguages.concat(cs.altLanguages);
this.logger.debug({altLanguages: this.altLanguages},
'Transcribe:exec - applying altLanguages');
}
if (cs.hasGlobalSttPunctuation && !this.data.recognizer.punctuation) {
this.data.recognizer.punctuation = cs.globalSttPunctuation;
}
try {
await this._startTranscribing(cs, ep, 1);
if (this.separateRecognitionPerChannel && ep2) {
if (this.transcribing1) {
await this._startTranscribing(cs, ep, 1);
}
if (this.transcribing2) {
await this._startTranscribing(cs, ep2, 2);
}
updateSpeechCredentialLastUsed(this.sttCredentials.speech_credential_sid)
.catch(() => {/*already logged error */});
await this.awaitTaskDone();
} catch (err) {
this.logger.info(err, 'TaskTranscribe:exec - error');
this.parentTask && this.parentTask.emit('error', err);
if (!(await this._startFallback(cs, ep, {error: err}))) {
this.logger.info(err, 'TaskTranscribe:exec - error');
this.parentTask && this.parentTask.emit('error', err);
this.removeCustomEventListeners();
return;
}
}
this.removeSpeechListeners(ep);
await this.awaitTaskDone();
this.removeCustomEventListeners();
}
async _stopTranscription() {
let stopTranscription = false;
if (this.ep?.connected) {
if (this.transcribing1 && this.ep?.connected) {
stopTranscription = true;
this.ep.stopTranscription({vendor: this.vendor})
this.ep.stopTranscription({
vendor: this.vendor,
bugname: this.bugname
})
.catch((err) => this.logger.info(err, 'Error TaskTranscribe:kill'));
}
if (this.separateRecognitionPerChannel && this.ep2 && this.ep2.connected) {
if (this.transcribing2 && this.ep2?.connected) {
stopTranscription = true;
this.ep2.stopTranscription({vendor: this.vendor})
this.ep2.stopTranscription({vendor: this.vendor, bugname: this.bugname})
.catch((err) => this.logger.info(err, 'Error TaskTranscribe:kill'));
}
@@ -111,13 +150,13 @@ class TaskTranscribe extends SttTask {
this.logger.info(`TaskTranscribe:updateTranscribe status ${status}`);
switch (status) {
case TranscribeStatus.Pause:
this.paused = true;
await this._stopTranscription();
break;
case TranscribeStatus.Resume:
await this._startTranscribing(this.cs, this.ep, 1);
if (this.separateRecognitionPerChannel && this.ep2) {
await this._startTranscribing(this.cs, this.ep2, 2);
}
this.paused = false;
if (this.transcribing1) await this._startTranscribing(this.cs, this.ep, 1);
if (this.transcribing2) await this._startTranscribing(this.cs, this.ep2, 2);
break;
}
}
@@ -132,61 +171,61 @@ class TaskTranscribe extends SttTask {
if (this.isContinuousAsr) this._doContinuousAsrWithDeepgram(this.asrTimeout);
}
const opts = this.setChannelVarsForStt(this, this.sttCredentials, this.data.recognizer);
const opts = this.setChannelVarsForStt(this, this.sttCredentials, this.language, this.data.recognizer);
switch (this.vendor) {
case 'google':
this.bugname = 'google_transcribe';
ep.addCustomEventListener(GoogleTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}google_transcribe`;
this.addCustomEventListener(ep, GoogleTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
ep.addCustomEventListener(GoogleTranscriptionEvents.NoAudioDetected,
this.addCustomEventListener(ep, GoogleTranscriptionEvents.NoAudioDetected,
this._onNoAudio.bind(this, cs, ep, channel));
ep.addCustomEventListener(GoogleTranscriptionEvents.MaxDurationExceeded,
this.addCustomEventListener(ep, GoogleTranscriptionEvents.MaxDurationExceeded,
this._onMaxDurationExceeded.bind(this, cs, ep, channel));
break;
case 'aws':
case 'polly':
this.bugname = 'aws_transcribe';
ep.addCustomEventListener(AwsTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}aws_transcribe`;
this.addCustomEventListener(ep, AwsTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
ep.addCustomEventListener(AwsTranscriptionEvents.NoAudioDetected,
this.addCustomEventListener(ep, AwsTranscriptionEvents.NoAudioDetected,
this._onNoAudio.bind(this, cs, ep, channel));
ep.addCustomEventListener(AwsTranscriptionEvents.MaxDurationExceeded,
this.addCustomEventListener(ep, AwsTranscriptionEvents.MaxDurationExceeded,
this._onMaxDurationExceeded.bind(this, cs, ep, channel));
break;
case 'microsoft':
this.bugname = 'azure_transcribe';
ep.addCustomEventListener(AzureTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}azure_transcribe`;
this.addCustomEventListener(ep, AzureTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
ep.addCustomEventListener(AzureTranscriptionEvents.NoSpeechDetected,
this._onNoAudio.bind(this, cs, ep, channel));
//this.addCustomEventListener(ep, AzureTranscriptionEvents.NoSpeechDetected,
// this._onNoAudio.bind(this, cs, ep, channel));
break;
case 'nuance':
this.bugname = 'nuance_transcribe';
ep.addCustomEventListener(NuanceTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}nuance_transcribe`;
this.addCustomEventListener(ep, NuanceTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
break;
case 'deepgram':
this.bugname = 'deepgram_transcribe';
ep.addCustomEventListener(DeepgramTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}deepgram_transcribe`;
this.addCustomEventListener(ep, DeepgramTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
ep.addCustomEventListener(DeepgramTranscriptionEvents.Connect,
this.addCustomEventListener(ep, DeepgramTranscriptionEvents.Connect,
this._onVendorConnect.bind(this, cs, ep));
ep.addCustomEventListener(DeepgramTranscriptionEvents.ConnectFailure,
this.addCustomEventListener(ep, DeepgramTranscriptionEvents.ConnectFailure,
this._onVendorConnectFailure.bind(this, cs, ep, channel));
/* if app sets deepgramOptions.utteranceEndMs they essentially want continuous asr */
if (opts.DEEPGRAM_SPEECH_UTTERANCE_END_MS) this.isContinuousAsr = true;
//if (opts.DEEPGRAM_SPEECH_UTTERANCE_END_MS) this.isContinuousAsr = true;
break;
case 'soniox':
this.bugname = 'soniox_transcribe';
ep.addCustomEventListener(SonioxTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}soniox_transcribe`;
this.addCustomEventListener(ep, SonioxTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
break;
case 'cobalt':
this.bugname = 'cobalt_transcribe';
ep.addCustomEventListener(CobaltTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}cobalt_transcribe`;
this.addCustomEventListener(ep, CobaltTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
/* cobalt doesnt have language, it has model, which is required */
@@ -215,38 +254,39 @@ class TaskTranscribe extends SttTask {
break;
case 'ibm':
this.bugname = 'ibm_transcribe';
ep.addCustomEventListener(IbmTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}ibm_transcribe`;
this.addCustomEventListener(ep, IbmTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
ep.addCustomEventListener(IbmTranscriptionEvents.Connect,
this.addCustomEventListener(ep, IbmTranscriptionEvents.Connect,
this._onVendorConnect.bind(this, cs, ep));
ep.addCustomEventListener(IbmTranscriptionEvents.ConnectFailure,
this.addCustomEventListener(ep, IbmTranscriptionEvents.ConnectFailure,
this._onVendorConnectFailure.bind(this, cs, ep, channel));
break;
case 'nvidia':
this.bugname = 'nvidia_transcribe';
ep.addCustomEventListener(NvidiaTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep));
this.bugname = `${this.bugname_prefix}nvidia_transcribe`;
this.addCustomEventListener(ep, NvidiaTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
break;
case 'assemblyai':
this.bugname = 'assemblyai_transcribe';
ep.addCustomEventListener(AssemblyAiTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}assemblyai_transcribe`;
this.addCustomEventListener(ep, AssemblyAiTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
ep.addCustomEventListener(AssemblyAiTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
ep.addCustomEventListener(AssemblyAiTranscriptionEvents.Error, this._onVendorError.bind(this, cs, ep));
ep.addCustomEventListener(AssemblyAiTranscriptionEvents.ConnectFailure,
this.addCustomEventListener(ep,
AssemblyAiTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
this.addCustomEventListener(ep, AssemblyAiTranscriptionEvents.Error, this._onVendorError.bind(this, cs, ep));
this.addCustomEventListener(ep, AssemblyAiTranscriptionEvents.ConnectFailure,
this._onVendorConnectFailure.bind(this, cs, ep, channel));
break;
default:
if (this.vendor.startsWith('custom:')) {
this.bugname = `${this.vendor}_transcribe`;
ep.addCustomEventListener(JambonzTranscriptionEvents.Transcription,
this.bugname = `${this.bugname_prefix}${this.vendor}_transcribe`;
this.addCustomEventListener(ep, JambonzTranscriptionEvents.Transcription,
this._onTranscription.bind(this, cs, ep, channel));
ep.addCustomEventListener(JambonzTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
ep.addCustomEventListener(JambonzTranscriptionEvents.ConnectFailure,
this.addCustomEventListener(ep, JambonzTranscriptionEvents.Connect, this._onVendorConnect.bind(this, cs, ep));
this.addCustomEventListener(ep, JambonzTranscriptionEvents.ConnectFailure,
this._onVendorConnectFailure.bind(this, cs, ep));
break;
}
@@ -258,7 +298,7 @@ class TaskTranscribe extends SttTask {
}
/* common handler for all stt engine errors */
ep.addCustomEventListener(JambonzTranscriptionEvents.Error, this._onJambonzError.bind(this, cs, ep));
this.addCustomEventListener(ep, JambonzTranscriptionEvents.Error, this._onJambonzError.bind(this, cs, ep));
await ep.set(opts)
.catch((err) => this.logger.info(err, 'Error setting channel variables'));
}
@@ -273,11 +313,13 @@ class TaskTranscribe extends SttTask {
}
async _transcribe(ep) {
this.logger.debug(
`TaskTranscribe:_transcribe - starting transcription vendor ${this.vendor} bugname ${this.bugname}`);
await ep.startTranscription({
vendor: this.vendor,
interim: this.interim ? true : false,
locale: this.language,
channels: /*this.separateRecognitionPerChannel ? 2 : */ 1,
channels: 1,
bugname: this.bugname,
hostport: this.hostport
});
@@ -286,81 +328,161 @@ class TaskTranscribe extends SttTask {
async _onTranscription(cs, ep, channel, evt, fsEvent) {
// make sure this is not a transcript from answering machine detection
const bugname = fsEvent.getHeader('media-bugname');
const finished = fsEvent.getHeader('transcription-session-finished');
const bufferedTranscripts = this._bufferedTranscripts[channel - 1];
if (bugname && this.bugname !== bugname) return;
if (this.paused) {
this.logger.debug({evt}, 'TaskTranscribe:_onTranscription - paused, ignoring transcript');
}
if (this.vendor === 'ibm' && evt?.state === 'listening') return;
if (this.vendor === 'deepgram' && evt.type === 'UtteranceEnd') {
/* we will only get this when we have set utterance_end_ms */
if (this._bufferedTranscripts.length === 0) {
/* DH: send a speech event when we get UtteranceEnd if they want interim events */
if (this.interim) {
this.logger.debug('Gather:_onTranscription - got UtteranceEnd event from deepgram, sending speech event');
this._resolve(channel, evt);
}
if (bufferedTranscripts.length === 0) {
this.logger.debug('Gather:_onTranscription - got UtteranceEnd event from deepgram but no buffered transcripts');
}
else {
this.logger.debug('Gather:_onTranscription - got UtteranceEnd event from deepgram, return buffered transcript');
evt = this.consolidateTranscripts(this._bufferedTranscripts, 1, this.language);
this._bufferedTranscripts = [];
this._resolve('speech', evt);
evt = this.consolidateTranscripts(bufferedTranscripts, channel, this.language, this.vendor);
evt.is_final = true;
this._bufferedTranscripts[channel - 1] = [];
this._resolve(channel, evt);
}
return;
}
this.logger.debug({evt}, 'TaskTranscribe:_onTranscription - before normalization');
evt = this.normalizeTranscription(evt, this.vendor, channel, this.language);
evt = this.normalizeTranscription(evt, this.vendor, channel, this.language, undefined,
this.data.recognizer.punctuation);
this.logger.debug({evt}, 'TaskTranscribe:_onTranscription');
if (evt.alternatives.length === 0) {
this.logger.info({evt}, 'TaskTranscribe:_onTranscription - got empty transcript, continue listening');
return;
}
if (evt.alternatives[0]?.transcript === '' && !cs.callGone && !this.killed) {
if (['microsoft', 'deepgram'].includes(this.vendor)) {
this.logger.info({evt}, 'TaskTranscribe:_onTranscription - got empty transcript, continue listening');
let emptyTranscript = false;
if (evt.is_final) {
if (evt.alternatives.length === 0 || evt.alternatives[0].transcript === '' && !cs.callGone && !this.killed) {
emptyTranscript = true;
if (finished === 'true' &&
['microsoft', 'deepgram'].includes(this.vendor) &&
bufferedTranscripts.length === 0) {
this.logger.debug({evt}, 'TaskGather:_onTranscription - got empty transcript from old gather, disregarding');
return;
}
else if (this.vendor !== 'deepgram') {
this.logger.info({evt}, 'TaskGather:_onTranscription - got empty transcript, continue listening');
return;
}
else if (this.isContinuousAsr) {
this.logger.info({evt},
'TaskGather:_onTranscription - got empty deepgram transcript during continous asr, continue listening');
return;
}
else if (this.vendor === 'deepgram' && bufferedTranscripts.length > 0) {
this.logger.info({evt},
'TaskGather:_onTranscription - got empty transcript from deepgram, return the buffered transcripts');
}
}
if (this.isContinuousAsr) {
/* append the transcript and start listening again for asrTimeout */
const t = evt.alternatives[0].transcript;
if (t) {
/* remove trailing punctuation */
if (/[,;:\.!\?]$/.test(t)) {
this.logger.debug('TaskGather:_onTranscription - removing trailing punctuation');
evt.alternatives[0].transcript = t.slice(0, -1);
}
}
this.logger.info({evt}, 'TaskGather:_onTranscription - got transcript during continous asr');
bufferedTranscripts.push(evt);
this._startAsrTimer(channel);
/* some STT engines will keep listening after a final response, so no need to restart */
if (!['soniox', 'aws', 'microsoft', 'deepgram', 'google']
.includes(this.vendor)) this._startTranscribing(cs, ep, channel);
}
else {
this.logger.info({evt}, 'TaskTranscribe:_onTranscription - got empty transcript, listen again');
this._transcribe(ep);
}
return;
}
if (this.vendor === 'soniox') {
/* compile transcripts into one */
this._sonioxTranscripts.push(evt.vendor.finalWords);
evt = this.compileSonioxTranscripts(this._sonioxTranscripts, 1, this.language);
this._sonioxTranscripts = [];
}
else if (this.vendor === 'deepgram') {
/* compile transcripts into one */
if (!emptyTranscript) bufferedTranscripts.push(evt);
if (this.vendor === 'soniox') {
/* compile transcripts into one */
this._sonioxTranscripts.push(evt.vendor.finalWords);
if (evt.is_final) {
evt = this.compileSonioxTranscripts(this._sonioxTranscripts, 1, this.language);
this._sonioxTranscripts = [];
/* deepgram can send an empty and final transcript; only if we have any buffered should we resolve */
if (bufferedTranscripts.length === 0) return;
evt = this.consolidateTranscripts(bufferedTranscripts, channel, this.language);
this._bufferedTranscripts[channel - 1] = [];
}
/* here is where we return a final transcript */
this.logger.debug({evt}, 'TaskTranscribe:_onTranscription - sending final transcript');
this._resolve(channel, evt);
/* some STT engines will keep listening after a final response, so no need to restart */
if (!['soniox', 'aws', 'microsoft', 'deepgram', 'google'].includes(this.vendor) &&
!this.vendor.startsWith('custom:')) {
this.logger.debug('TaskTranscribe:_onTranscription - restarting transcribe');
this._startTranscribing(cs, ep, channel);
}
}
}
else {
/* interim transcript */
if (this.isContinuousAsr && evt.is_final) {
this._bufferedTranscripts.push(evt);
this._startAsrTimer(channel);
} else {
await this._resolve(channel, evt);
/* deepgram can send a non-final transcript but with words that are final, so we need to buffer */
if (this.vendor === 'deepgram') {
const originalEvent = evt.vendor.evt;
if (originalEvent.is_final && evt.alternatives[0].transcript !== '') {
this.logger.debug({evt}, 'Gather:_onTranscription - buffering a completed (partial) deepgram transcript');
bufferedTranscripts.push(evt);
}
}
if (this.interim) {
this.logger.debug({evt}, 'TaskTranscribe:_onTranscription - sending interim transcript');
this._resolve(channel, evt);
}
}
}
async _resolve(channel, evt) {
/* we've got a transcript, so end the otel child span for this channel */
if (this.childSpan[channel - 1] && this.childSpan[channel - 1].span) {
this.childSpan[channel - 1].span.setAttributes({
channel,
'stt.resolve': 'transcript',
'stt.result': JSON.stringify(evt)
});
this.childSpan[channel - 1].span.end();
if (evt.is_final) {
/* we've got a final transcript, so end the otel child span for this channel */
if (this.childSpan[channel - 1] && this.childSpan[channel - 1].span) {
this.childSpan[channel - 1].span.setAttributes({
channel,
'stt.resolve': 'transcript',
'stt.result': JSON.stringify(evt)
});
this.childSpan[channel - 1].span.end();
}
}
if (this.transcriptionHook) {
const b3 = this.getTracingPropagation();
const httpHeaders = b3 && {b3};
const payload = {
...this.cs.callInfo,
...httpHeaders,
...(evt.alternatives && {speech: evt}),
...(evt.type && {speechEvent: evt})
};
try {
const json = await this.cs.requestor.request('verb:hook', this.transcriptionHook, {
...this.cs.callInfo,
...httpHeaders,
speech: evt
});
this.logger.info({json}, 'sent transcriptionHook');
this.logger.debug({payload}, 'sending transcriptionHook');
const json = await this.cs.requestor.request('verb:hook', this.transcriptionHook, payload);
this.logger.info({json}, 'completed transcriptionHook');
if (json && Array.isArray(json) && !this.parentTask) {
const makeTask = require('./make_task');
const tasks = normalizeJambones(this.logger, json).map((tdata) => makeTask(this.logger, tdata));
@@ -381,7 +503,7 @@ class TaskTranscribe extends SttTask {
this._clearTimer();
this.notifyTaskDone();
}
else {
else if (evt.is_final) {
/* start another child span for this channel */
const {span, ctx} = this.startChildSpan(`${STT_LISTEN_SPAN_NAME}:${channel}`);
this.childSpan[channel - 1] = {span, ctx};
@@ -389,7 +511,8 @@ class TaskTranscribe extends SttTask {
}
_onNoAudio(cs, ep, channel) {
this.logger.debug(`TaskTranscribe:_onNoAudio restarting transcription on channel ${channel}`);
this.logger.debug(`TaskTranscribe:_onNoAudio on channel ${channel}`);
if (this.paused) return;
if (this.childSpan[channel - 1] && this.childSpan[channel - 1].span) {
this.childSpan[channel - 1].span.setAttributes({
channel,
@@ -405,7 +528,8 @@ class TaskTranscribe extends SttTask {
}
_onMaxDurationExceeded(cs, ep, channel) {
this.logger.debug(`TaskTranscribe:_onMaxDurationExceeded restarting transcription on channel ${channel}`);
this.logger.debug(`TaskTranscribe:_onMaxDurationExceeded on channel ${channel}`);
if (this.paused) return;
if (this.childSpan[channel - 1] && this.childSpan[channel - 1].span) {
this.childSpan[channel - 1].span.setAttributes({
channel,
@@ -428,44 +552,66 @@ class TaskTranscribe extends SttTask {
}
}
async _onJambonzError(cs, _ep, evt) {
this.logger.info({evt}, 'TaskTranscribe:_onJambonzError');
if (this.isHandledByPrimaryProvider && this.fallbackVendor) {
_ep.stopTranscription({vendor: this.vendor})
async _startFallback(cs, _ep, evt) {
if (this.canFallback) {
_ep.stopTranscription({
vendor: this.vendor,
bugname: this.bugname
})
.catch((err) => this.logger.error({err}, `Error stopping transcription for primary vendor ${this.vendor}`));
const {updateSpeechCredentialLastUsed} = require('../utils/db-utils')(this.logger, cs.srf);
try {
await this._fallback();
this.notifyError({ msg: 'ASR error',
details:`STT Vendor ${this.vendor} error: ${evt.error || evt.reason}`, failover: 'in progress'});
await this._initFallback();
let channel = 1;
if (this.ep !== _ep) {
channel = 2;
}
this[`_speechHandlersSet_${channel}`] = false;
this._startTranscribing(cs, _ep, channel);
updateSpeechCredentialLastUsed(this.sttCredentials.speech_credential_sid);
return;
return true;
} catch (error) {
this.notifyError({ msg: 'ASR error',
details:`STT Vendor ${this.vendor} error: ${evt.error || evt.reason}`, failover: 'not available'});
this.logger.info({error}, `There is error while falling back to ${this.fallbackVendor}`);
}
} else {
const {writeAlerts, AlertType} = cs.srf.locals;
this.logger.debug('transcribe:_startFallback no condition for falling back');
this.notifyError({ msg: 'ASR error',
details:`STT Vendor ${this.vendor} error: ${evt.error || evt.reason}`, failover: 'not available'});
}
return false;
}
if (this.vendor === 'nuance') {
const {code, error} = evt;
if (code === 404 && error === 'No speech') return this._resolve('timeout');
if (code === 413 && error === 'Too much speech') return this._resolve('timeout');
}
this.logger.info({evt}, 'TaskTranscribe:_onJambonzError');
writeAlerts({
account_sid: cs.accountSid,
alert_type: AlertType.STT_FAILURE,
message: `Custom speech vendor ${this.vendor} error: ${evt.error}`,
vendor: this.vendor,
}).catch((err) => this.logger.info({err}, 'Error generating alert for jambonz custom connection failure'));
this.notifyError({msg: 'ASR error', details:`Custom speech vendor ${this.vendor} error: ${evt.error}`});
async _onJambonzError(cs, _ep, evt) {
if (this.vendor === 'google' && evt.error_code === 0) {
this.logger.info({evt}, 'TaskTranscribe:_onJambonzError - ignoring google error code 0');
return;
}
this.logger.info({evt}, 'TaskTranscribe:_onJambonzError');
if (this.paused) return;
const {writeAlerts, AlertType} = cs.srf.locals;
if (this.vendor === 'nuance') {
const {code, error} = evt;
if (code === 404 && error === 'No speech') return this._resolve('timeout');
if (code === 413 && error === 'Too much speech') return this._resolve('timeout');
}
this.logger.info({evt}, 'TaskTranscribe:_onJambonzError');
writeAlerts({
account_sid: cs.accountSid,
alert_type: AlertType.STT_FAILURE,
message: `Custom speech vendor ${this.vendor} error: ${evt.error}`,
vendor: this.vendor,
}).catch((err) => this.logger.info({err}, 'Error generating alert for jambonz custom connection failure'));
if (!(await this._startFallback(cs, _ep, evt))) {
this.notifyTaskDone();
}
}
_onVendorConnectFailure(cs, _ep, channel, evt) {
async _onVendorConnectFailure(cs, _ep, channel, evt) {
super._onVendorConnectFailure(cs, _ep, evt);
if (this.childSpan[channel - 1] && this.childSpan[channel - 1].span) {
this.childSpan[channel - 1].span.setAttributes({
@@ -474,7 +620,9 @@ class TaskTranscribe extends SttTask {
});
this.childSpan[channel - 1].span.end();
}
this.notifyTaskDone();
if (!(await this._startFallback(cs, _ep, evt))) {
this.notifyTaskDone();
}
}
_startAsrTimer(channel) {
@@ -483,8 +631,9 @@ class TaskTranscribe extends SttTask {
this._clearAsrTimer(channel);
this._asrTimer = setTimeout(() => {
this.logger.debug(`TaskTranscribe:_startAsrTimer - asr timer went off for channel: ${channel}`);
const evt = this.consolidateTranscripts(this._bufferedTranscripts, channel, this.language);
this._bufferedTranscripts = [];
const evt = this.consolidateTranscripts(
this._bufferedTranscripts[channel - 1], channel, this.language, this.vendor);
this._bufferedTranscripts[channel - 1] = [];
this._resolve(channel, evt);
}, this.asrTimeout);
this.logger.debug(`TaskTranscribe:_startAsrTimer: set for ${this.asrTimeout}ms for channel ${channel}`);

180
lib/tasks/tts-task.js Normal file
View File

@@ -0,0 +1,180 @@
const Task = require('./task');
const { TaskPreconditions } = require('../utils/constants');
class TtsTask extends Task {
constructor(logger, data, parentTask) {
super(logger, data);
this.parentTask = parentTask;
this.preconditions = TaskPreconditions.Endpoint;
this.earlyMedia = this.data.earlyMedia === true || (parentTask && parentTask.earlyMedia);
this.synthesizer = this.data.synthesizer || {};
this.disableTtsCache = this.data.disableTtsCache;
this.options = this.synthesizer.options || {};
}
async exec(cs) {
super.exec(cs);
}
async _synthesizeWithSpecificVendor(cs, ep, {
vendor,
language,
voice,
label,
disableTtsStreaming,
preCache
}) {
const {srf, accountSid:account_sid} = cs;
const {updateSpeechCredentialLastUsed} = require('../utils/db-utils')(this.logger, srf);
const {writeAlerts, AlertType, stats} = srf.locals;
const {synthAudio} = srf.locals.dbHelpers;
const engine = this.synthesizer.engine || cs.synthesizer?.engine || 'neural';
const salt = cs.callSid;
let credentials = cs.getSpeechCredentials(vendor, 'tts', label);
/* parse Nuance voices into name and model */
let model;
if (vendor === 'nuance' && voice) {
const arr = /([A-Za-z-]*)\s+-\s+(enhanced|standard)/.exec(voice);
if (arr) {
voice = arr[1];
model = arr[2];
}
} else if (vendor === 'deepgram') {
model = voice;
}
/* allow for microsoft custom region voice and api_key to be specified as an override */
if (vendor === 'microsoft' && this.options.deploymentId) {
credentials = credentials || {};
credentials.use_custom_tts = true;
credentials.custom_tts_endpoint = this.options.deploymentId;
credentials.api_key = this.options.apiKey || credentials.apiKey;
credentials.region = this.options.region || credentials.region;
voice = this.options.voice || voice;
} else if (vendor === 'elevenlabs') {
credentials = credentials || {};
credentials.model_id = this.options.model_id || credentials.model_id;
credentials.voice_settings = this.options.voice_settings || {};
credentials.optimize_streaming_latency = this.options.optimize_streaming_latency
|| credentials.optimize_streaming_latency;
voice = this.options.voice_id || voice;
}
ep.set({
tts_engine: vendor,
tts_voice: voice,
cache_speech_handles: 1,
}).catch((err) => this.logger.info({err}, `${this.name}: Error setting tts_engine on endpoint`));
if (!preCache) this.logger.info({vendor, language, voice, model}, `${this.name}:exec`);
try {
if (!credentials) {
writeAlerts({
account_sid,
alert_type: AlertType.TTS_NOT_PROVISIONED,
vendor
}).catch((err) => this.logger.info({err}, 'Error generating alert for no tts'));
this.notifyError({
msg: 'TTS error',
details:`No speech credentials provisioned for selected vendor ${vendor}`
});
throw new Error('no provisioned speech credentials for TTS');
}
// synthesize all of the text elements
let lastUpdated = false;
/* produce an audio segment from the provided text */
const generateAudio = async(text) => {
if (this.killed) return;
if (text.startsWith('silence_stream://')) return text;
/* otel: trace time for tts */
if (!preCache && !this.parentTask) {
const {span} = this.startChildSpan('tts-generation', {
'tts.vendor': vendor,
'tts.language': language,
'tts.voice': voice
});
this.otelSpan = span;
}
try {
const {filePath, servedFromCache, rtt} = await synthAudio(stats, {
account_sid,
text,
vendor,
language,
voice,
engine,
model,
salt,
credentials,
options: this.options,
disableTtsCache : this.disableTtsCache,
disableTtsStreaming,
preCache
});
if (!filePath.startsWith('say:')) {
this.logger.debug(`file ${filePath}, served from cache ${servedFromCache}`);
if (filePath) cs.trackTmpFile(filePath);
if (this.otelSpan) {
this.otelSpan.setAttributes({'tts.cached': servedFromCache});
this.otelSpan.end();
this.otelSpan = null;
}
if (!servedFromCache && !lastUpdated) {
lastUpdated = true;
updateSpeechCredentialLastUsed(credentials.speech_credential_sid).catch(() => {/* logged error */});
}
if (!servedFromCache && rtt && !preCache) {
this.notifyStatus({
event: 'synthesized-audio',
vendor,
language,
characters: text.length,
elapsedTime: rtt
});
}
}
else {
this.logger.debug('a streaming tts api will be used');
const modifiedPath = filePath.replace('say:{', `say:{session-uuid=${ep.uuid},`);
return modifiedPath;
}
return filePath;
} catch (err) {
this.logger.info({err}, 'Error synthesizing tts');
if (this.otelSpan) this.otelSpan.end();
writeAlerts({
account_sid: cs.accountSid,
alert_type: AlertType.TTS_FAILURE,
vendor,
detail: err.message
}).catch((err) => this.logger.info({err}, 'Error generating alert for tts failure'));
this.notifyError({msg: 'TTS error', details: err.message || err});
throw err;
}
};
const arr = this.text.map((t) => (this._validateURL(t) ? t : generateAudio(t)));
return (await Promise.all(arr)).filter((fp) => fp && fp.length);
} catch (err) {
this.logger.info(err, 'TaskSay:exec error');
throw err;
}
}
_validateURL(urlString) {
try {
new URL(urlString);
return true;
} catch (e) {
return false;
}
}
}
module.exports = TtsTask;

View File

@@ -0,0 +1,175 @@
const makeTask = require('../tasks/make_task');
const Emitter = require('events');
const { normalizeJambones } = require('@jambonz/verb-specifications');
const {TaskName} = require('../utils/constants');
const assert = require('assert');
/**
* ActionHookDelayProcessor
* @extends Emitter
*
* @param {Object} logger - logger instance
* @param {Object} opts - options
* @param {Object} cs - call session
* @param {Object} ep - endpoint
*
* @emits {Event} 'giveup' - when associated giveup timer expires
*
* Ref:https://www.jambonz.org/docs/supporting-articles/handling-action-hook-delays/
*/
class ActionHookDelayProcessor extends Emitter {
constructor(logger, opts, cs) {
super();
this.logger = logger;
this.cs = cs;
this._active = false;
const enabled = this.init(opts);
if (enabled && (!this.actions || !Array.isArray(this.actions) || this.actions.length === 0)) {
throw new Error('ActionHookDelayProcessor: no actions specified');
}
else if (enabled && this.actions.some((a) => !a.verb || ![TaskName.Say, TaskName.Play].includes(a.verb))) {
throw new Error(`ActionHookDelayProcessor: invalid actions specified: ${JSON.stringify(this.actions)}`);
}
}
get properties() {
return {
actions: this.actions,
retries: this.retries,
noResponseTimeout: this.noResponseTimeout,
noResponseGiveUpTimeout: this.noResponseGiveUpTimeout
};
}
get ep() {
return this.cs.ep;
}
init(opts) {
this.logger.debug({opts}, 'ActionHookDelayProcessor#init');
this.actions = opts.actions;
this.retries = opts.retries || 0;
this.noResponseTimeout = opts.noResponseTimeout || 0;
this.noResponseGiveUpTimeout = opts.noResponseGiveUpTimeout;
// return false if these options actually disable the ahdp
return ('enable' in opts && opts.enable === true) ||
('enabled' in opts && opts.enabled === true) ||
(!('enable' in opts) && !('enabled' in opts));
}
start() {
this.logger.debug('ActionHookDelayProcessor#start');
if (this._active) {
this.logger.debug('ActionHookDelayProcessor#start: already started due to prior gather which is continuing');
return;
}
assert(!this._noResponseTimer);
this._active = true;
this._retryCount = 0;
const timeoutMs = this.noResponseTimeout === 0 ? 1 : this.noResponseTimeout * 1000;
this._noResponseTimer = setTimeout(this._onNoResponseTimer.bind(this), timeoutMs);
if (this.noResponseGiveUpTimeout > 0) {
const timeoutMs = this.noResponseGiveUpTimeout * 1000;
this._noResponseGiveUpTimer = setTimeout(this._onNoResponseGiveUpTimer.bind(this), timeoutMs);
}
}
async stop() {
this.logger.debug('ActionHookDelayProcessor#stop');
this._active = false;
if (this._noResponseTimer) {
clearTimeout(this._noResponseTimer);
this._noResponseTimer = null;
}
if (this._noResponseGiveUpTimer) {
clearTimeout(this._noResponseGiveUpTimer);
this._noResponseGiveUpTimer = null;
}
if (this._taskInProgress) {
this.logger.debug(`ActionHookDelayProcessor#stop: killing task in progress: ${this._taskInProgress.name}`);
/** if we are doing a play, kill it immediately
* if we are doing a say, wait for it to finish
*/
if (TaskName.Say === this._taskInProgress.name) {
this._sayResolver = () => {
this.logger.debug('ActionHookDelayProcessor#stop: say is done, continue on..');
this._taskInProgress.kill(this.cs);
this._taskInProgress = null;
};
this.logger.debug('ActionHookDelayProcessor#stop returning promise');
return new Promise((resolve) => this._sayResolver = resolve);
}
else {
/* play */
this._taskInProgress.kill(this.cs);
this._taskInProgress = null;
}
}
this.logger.debug('ActionHookDelayProcessor#stop returning');
}
_onNoResponseTimer() {
this.logger.debug('ActionHookDelayProcessor#_onNoResponseTimer');
this._noResponseTimer = null;
/* get the next play or say action */
const verb = this.actions[this._retryCount % this.actions.length];
const t = normalizeJambones(this.logger, [verb]);
this.logger.debug({verb}, 'ActionHookDelayProcessor#_onNoResponseTimer: starting action');
try {
this._taskInProgress = makeTask(this.logger, t[0]);
this._taskInProgress.disableTracing = true;
this._taskInProgress.exec(this.cs, {ep: this.ep});
} catch (err) {
this.logger.info(err, 'ActionHookDelayProcessor#_onNoResponseTimer: error starting action');
this._taskInProgress = null;
return;
}
this.ep.once('playback-start', (evt) => {
this.logger.debug({evt}, 'got playback-start');
if (!this._active) {
this.logger.info({evt}, 'ActionHookDelayProcessor#_onNoResponseTimer: killing audio immediately');
this.ep.api('uuid_break', this.ep.uuid)
.catch((err) => this.logger.info(err,
'ActionHookDelayProcessor#_onNoResponseTimer Error killing audio'));
}
});
this.ep.once('playback-stop', (evt) => {
this._taskInProgress = null;
if (this._sayResolver) {
/* we were waiting for the play to finish before continuing to next task */
this.logger.debug({evt}, 'got playback-stop');
this._sayResolver();
this._sayResolver = null;
}
else {
/* possibly start the no response timer again */
if (this._active && this.retries > 0 && this._retryCount < this.retries && this.noResponseTimeout > 0) {
this.logger.debug({evt}, 'ActionHookDelayProcessor#_onNoResponseTimer: playback-stop on play/say action');
const timeoutMs = this.noResponseTimeout * 1000;
this._noResponseTimer = setTimeout(this._onNoResponseTimer.bind(this), timeoutMs);
}
}
});
this._retryCount++;
}
_onNoResponseGiveUpTimer() {
this._active = false;
this.logger.info('ActionHookDelayProcessor#_onNoResponseGiveUpTimer');
this.stop().catch((err) => {});
this.emit('giveup');
}
}
module.exports = ActionHookDelayProcessor;

View File

@@ -266,7 +266,7 @@ module.exports = (logger) => {
/* set stt options */
logger.info(`starting amd for vendor ${vendor} and language ${language}`);
const sttOpts = amd.setChannelVarsForStt({name: TaskName.Gather}, sttCredentials, {
const sttOpts = amd.setChannelVarsForStt({name: TaskName.Gather}, sttCredentials, language, {
vendor,
hints,
enhancedModel: true,

View File

@@ -0,0 +1,198 @@
const { normalizeJambones } = require('@jambonz/verb-specifications');
const makeTask = require('../tasks/make_task');
const { JAMBONZ_RECORD_WS_BASE_URL, JAMBONZ_RECORD_WS_USERNAME, JAMBONZ_RECORD_WS_PASSWORD } = require('../config');
const Emitter = require('events');
class BackgroundTaskManager extends Emitter {
constructor({cs, logger, rootSpan}) {
super();
this.tasks = new Map();
this.cs = cs;
this.logger = logger;
this.rootSpan = rootSpan;
}
isTaskRunning(type) {
return this.tasks.has(type);
}
getTask(type) {
if (this.tasks.has(type)) {
return this.tasks.get(type);
}
}
count() {
return this.tasks.size;
}
async newTask(type, opts, sticky = false) {
this.logger.info({opts}, `initiating Background task ${type}`);
if (this.tasks.has(type)) {
this.logger.info(`Background task ${type} is running, skipped`);
return;
}
let task;
switch (type) {
case 'listen':
task = await this._initListen(opts);
break;
case 'bargeIn':
task = await this._initBargeIn(opts);
break;
case 'record':
task = await this._initRecord();
break;
case 'transcribe':
task = await this._initTranscribe(opts);
break;
default:
break;
}
if (task) {
this.tasks.set(type, task);
}
if (task && sticky) task.sticky = true;
return task;
}
stop(type) {
const task = this.getTask(type);
if (task) {
this.logger.info(`stopping background task: ${type}`);
task.removeAllListeners();
task.span.end();
task.kill();
// Remove task from managed List
this.tasks.delete(type);
}
}
stopAll() {
this.logger.debug('BackgroundTaskManager:stopAll');
for (const key of this.tasks.keys()) {
this.stop(key);
}
}
// Initiate Listen
async _initListen(opts, bugname = 'jambonz-background-listen', ignoreCustomerData = false, type = 'listen') {
let task;
try {
const t = normalizeJambones(this.logger, [opts]);
task = makeTask(this.logger, t[0]);
task.bugname = bugname;
task.ignoreCustomerData = ignoreCustomerData;
const resources = await this.cs._evaluatePreconditions(task);
const {span, ctx} = this.rootSpan.startChildSpan(`background-${type}:${task.summary}`);
task.span = span;
task.ctx = ctx;
task.exec(this.cs, resources)
.then(this._taskCompleted.bind(this, type, task))
.catch(this._taskError.bind(this, type, task));
} catch (err) {
this.logger.info({err, opts}, `BackgroundTaskManager:_initListen - Error creating ${bugname} task`);
}
return task;
}
// Initiate Gather
async _initBargeIn(opts) {
let task;
try {
const t = normalizeJambones(this.logger, [opts]);
task = makeTask(this.logger, t[0]);
task
.once('dtmf', this._bargeInTaskCompleted.bind(this))
.once('vad', this._bargeInTaskCompleted.bind(this))
.once('transcription', this._bargeInTaskCompleted.bind(this))
.once('timeout', this._bargeInTaskCompleted.bind(this));
const resources = await this.cs._evaluatePreconditions(task);
const {span, ctx} = this.rootSpan.startChildSpan(`background-bargeIn:${task.summary}`);
task.span = span;
task.ctx = ctx;
task.bugname_prefix = 'background_bargeIn_';
task.exec(this.cs, resources)
.then(() => {
this._taskCompleted('bargeIn', task);
if (task.sticky && !this.cs.callGone && !this.cs._stopping) {
this.logger.info('BackgroundTaskManager:_initBargeIn: restarting background bargeIn');
this._bargeInHandled = false;
this.newTask('bargeIn', opts, true);
}
return;
})
.catch(this._taskError.bind(this, 'bargeIn', task));
} catch (err) {
this.logger.info(err, 'BackgroundTaskManager:_initGather - Error creating bargeIn task');
}
return task;
}
// Initiate Record
async _initRecord() {
if (this.cs.accountInfo.account.record_all_calls || this.cs.application.record_all_calls) {
if (!JAMBONZ_RECORD_WS_BASE_URL || !this.cs.accountInfo.account.bucket_credential) {
this.logger.error('_initRecord: invalid cfg - missing JAMBONZ_RECORD_WS_BASE_URL or bucket config');
return undefined;
}
const listenOpts = {
url: `${JAMBONZ_RECORD_WS_BASE_URL}/record/${this.cs.accountInfo.account.bucket_credential.vendor}`,
disableBidirectionalAudio: true,
mixType : 'stereo',
passDtmf: true
};
if (JAMBONZ_RECORD_WS_USERNAME && JAMBONZ_RECORD_WS_PASSWORD) {
listenOpts.wsAuth = {
username: JAMBONZ_RECORD_WS_USERNAME,
password: JAMBONZ_RECORD_WS_PASSWORD
};
}
this.logger.debug({listenOpts}, '_initRecord: enabling listen');
return await this._initListen({verb: 'listen', ...listenOpts}, 'jambonz-session-record', true, 'record');
}
}
// Initiate Transcribe
async _initTranscribe(opts) {
let task;
try {
const t = normalizeJambones(this.logger, [opts]);
task = makeTask(this.logger, t[0]);
const resources = await this.cs._evaluatePreconditions(task);
const {span, ctx} = this.rootSpan.startChildSpan(`background-transcribe:${task.summary}`);
task.span = span;
task.ctx = ctx;
task.bugname_prefix = 'background_transcribe_';
task.exec(this.cs, resources)
.then(this._taskCompleted.bind(this, 'transcribe', task))
.catch(this._taskError.bind(this, 'transcribe', task));
} catch (err) {
this.logger.info(err, 'BackgroundTaskManager:_initTranscribe - Error creating transcribe task');
}
return task;
}
_taskCompleted(type, task) {
this.logger.debug({type, task}, `BackgroundTaskManager:_taskCompleted: task completed, sticky: ${task.sticky}`);
task.removeAllListeners();
task.span.end();
this.tasks.delete(type);
}
_taskError(type, task, error) {
this.logger.info({type, task, error}, 'BackgroundTaskManager:_taskError: task Error');
task.removeAllListeners();
task.span.end();
this.tasks.delete(type);
}
_bargeInTaskCompleted(evt) {
if (this._bargeInHandled) return;
this._bargeInHandled = true;
this.logger.debug({evt},
'BackgroundTaskManager:_bargeInTaskCompleted on event from background bargeIn, emitting bargein-done event');
this.emit('bargeIn-done', evt);
}
}
module.exports = BackgroundTaskManager;

View File

@@ -1,12 +1,13 @@
{
"TaskName": {
"Cognigy": "cognigy",
"Answer": "answer",
"Conference": "conference",
"Config": "config",
"Dequeue": "dequeue",
"Dial": "dial",
"Dialogflow": "dialogflow",
"Dtmf": "dtmf",
"Dub": "dub",
"Enqueue": "enqueue",
"Gather": "gather",
"Hangup": "hangup",
@@ -29,7 +30,8 @@
"Tag": "tag",
"Transcribe": "transcribe"
},
"AllowedSipRecVerbs": ["config", "gather", "transcribe", "listen", "tag"],
"AllowedSipRecVerbs": ["answer", "config", "gather", "transcribe", "listen", "tag"],
"AllowedConfirmSessionVerbs": ["config", "gather", "plays", "say", "tag"],
"CallStatus": {
"Trying": "trying",
"Ringing": "ringing",
@@ -95,6 +97,10 @@
"Transcription": "soniox_transcribe::transcription",
"Error": "soniox_transcribe::error"
},
"VerbioTranscriptionEvents": {
"Transcription": "verbio_transcribe::transcription",
"Error": "verbio_transcribe::error"
},
"CobaltTranscriptionEvents": {
"Transcription": "cobalt_speech::transcription",
"CompileContext": "cobalt_speech::compile_context_response",
@@ -132,6 +138,9 @@
"ConnectFailure": "assemblyai_transcribe::connect_failed",
"Connect": "assemblyai_transcribe::connect"
},
"VadDetection": {
"Detection": "vad_detect:detection"
},
"ListenEvents": {
"Connect": "mod_audio_fork::connect",
"ConnectFailure": "mod_audio_fork::connect_failed",
@@ -169,6 +178,7 @@
"session:new",
"session:reconnect",
"session:redirect",
"session:adulting",
"call:status",
"queue:status",
"dial:confirm",

View File

@@ -41,6 +41,7 @@ const speechMapper = (cred) => {
const o = JSON.parse(decrypt(credential));
obj.access_key_id = o.access_key_id;
obj.secret_access_key = o.secret_access_key;
obj.role_arn = o.role_arn;
obj.aws_region = o.aws_region;
}
else if ('microsoft' === obj.vendor) {
@@ -75,6 +76,8 @@ const speechMapper = (cred) => {
else if ('deepgram' === obj.vendor) {
const o = JSON.parse(decrypt(credential));
obj.api_key = o.api_key;
obj.deepgram_stt_uri = o.deepgram_stt_uri;
obj.deepgram_stt_use_tls = o.deepgram_stt_use_tls;
}
else if ('soniox' === obj.vendor) {
const o = JSON.parse(decrypt(credential));
@@ -92,6 +95,17 @@ const speechMapper = (cred) => {
obj.api_key = o.api_key;
obj.model_id = o.model_id;
obj.options = o.options;
} else if ('playht' === obj.vendor) {
const o = JSON.parse(decrypt(credential));
obj.api_key = o.api_key;
obj.user_id = o.user_id;
obj.voice_engine = o.voice_engine;
obj.options = o.options;
} else if ('rimelabs' === obj.vendor) {
const o = JSON.parse(decrypt(credential));
obj.api_key = o.api_key;
obj.model_id = o.model_id;
obj.options = o.options;
} else if ('assemblyai' === obj.vendor) {
const o = JSON.parse(decrypt(credential));
obj.api_key = o.api_key;
@@ -99,6 +113,11 @@ const speechMapper = (cred) => {
const o = JSON.parse(decrypt(credential));
obj.api_key = o.api_key;
obj.model_id = o.model_id;
} else if ('verbio' === obj.vendor) {
const o = JSON.parse(decrypt(credential));
obj.client_id = o.client_id;
obj.client_secret = o.client_secret;
obj.engine_version = o.engine_version;
} else if (obj.vendor.startsWith('custom:')) {
const o = JSON.parse(decrypt(credential));
obj.auth_token = o.auth_token;

View File

@@ -14,6 +14,7 @@ const {
HTTP_PROXY_PORT,
HTTP_PROXY_PROTOCOL,
NODE_ENV,
HTTP_USER_AGENT_HEADER,
} = require('../config');
const toBase64 = (str) => Buffer.from(str || '', 'utf8').toString('base64');
@@ -116,6 +117,10 @@ class HttpRequestor extends BaseRequestor {
const url = hook.url || hook;
const method = hook.method || 'POST';
let buf = '';
httpHeaders = {
...httpHeaders,
...(HTTP_USER_AGENT_HEADER && {'user-agent' : HTTP_USER_AGENT_HEADER})
};
assert.ok(url, 'HttpRequestor:request url was not provided');
assert.ok, (['GET', 'POST'].includes(method), `HttpRequestor:request method must be 'GET' or 'POST' not ${method}`);

View File

@@ -171,13 +171,16 @@ function installSrfLocals(srf, logger) {
retrieveFromSortedSet,
retrieveByPatternSortedSet,
sortedSetLength,
sortedSetPositionByPattern
sortedSetPositionByPattern,
} = require('@jambonz/realtimedb-helpers')({}, logger, tracer);
const registrar = new Registrar(logger, client);
const {
synthAudio,
addFileToCache,
getNuanceAccessToken,
getIbmAccessToken,
getAwsAuthToken,
getVerbioAccessToken
} = require('@jambonz/speech-utils')({}, logger);
const {
writeAlerts,
@@ -215,6 +218,8 @@ function installSrfLocals(srf, logger) {
listCalls,
deleteCall,
synthAudio,
getAwsAuthToken,
addFileToCache,
createHash,
retrieveHash,
deleteKey,
@@ -235,7 +240,8 @@ function installSrfLocals(srf, logger) {
retrieveFromSortedSet,
retrieveByPatternSortedSet,
sortedSetLength,
sortedSetPositionByPattern
sortedSetPositionByPattern,
getVerbioAccessToken
},
parentLogger: logger,
getSBC,

View File

@@ -0,0 +1,18 @@
const parseDecibels = (db) => {
if (!db) return 0;
if (typeof db === 'number') {
return db;
}
else if (typeof db === 'string') {
const match = db.match(/([+-]?\d+(\.\d+)?)\s*db/i);
if (match) {
return Math.trunc(parseFloat(match[1]));
} else {
return 0;
}
} else {
return 0;
}
};
module.exports = parseDecibels;

View File

@@ -16,9 +16,13 @@ const uuidv4 = require('uuid-random');
const HttpRequestor = require('./http-requestor');
const WsRequestor = require('./ws-requestor');
const {makeOpusFirst} = require('./sdp-utils');
const {
JAMBONES_USE_FREESWITCH_TIMER_FD
} = require('../config');
class SingleDialer extends Emitter {
constructor({logger, sbcAddress, target, opts, application, callInfo, accountInfo, rootSpan, startSpan, dialTask}) {
constructor({logger, sbcAddress, target, opts, application, callInfo, accountInfo, rootSpan, startSpan, dialTask,
onHoldMusic}) {
super();
assert(target.type);
@@ -41,6 +45,7 @@ class SingleDialer extends Emitter {
this.callSid = uuidv4();
this.dialTask = dialTask;
this.onHoldMusic = onHoldMusic;
this.on('callStatusChange', this._notifyCallStatusChange.bind(this));
}
@@ -131,6 +136,7 @@ class SingleDialer extends Emitter {
this.serviceUrl = srf.locals.serviceUrl;
this.ep = await ms.createEndpoint();
this._configMsEndpoint();
this.logger.debug(`SingleDialer:exec - created endpoint ${this.ep.uuid}`);
/**
@@ -189,6 +195,10 @@ class SingleDialer extends Emitter {
callSid: this.callSid,
traceId: this.rootSpan.traceId
});
if (this.dialTask && this.dialTask.tag !== null &&
typeof this.dialTask.tag === 'object' && !Array.isArray(this.dialTask.tag)) {
this.callInfo.customerData = this.dialTask.tag;
}
this.logger = srf.locals.parentLogger.child({
callSid: this.callSid,
parentCallSid: this.parentCallInfo.callSid,
@@ -253,7 +263,7 @@ class SingleDialer extends Emitter {
.on('modify', async(req, res) => {
try {
if (this.ep) {
if (this.dialTask && this.dialTask.isOnHold) {
if (this.dialTask && this.dialTask.isOnHoldEnabled) {
this.logger.info('dial is onhold, emit event');
this.emit('reinvite', req, res);
} else {
@@ -285,17 +295,17 @@ class SingleDialer extends Emitter {
if (err.status === 487) status.callStatus = CallStatus.NoAnswer;
else if ([486, 600].includes(err.status)) status.callStatus = CallStatus.Busy;
this.logger.info(`SingleDialer:exec outdial failure ${err.status}`);
inviteSpan.setAttributes({'invite.status_code': err.status});
inviteSpan.end();
inviteSpan?.setAttributes({'invite.status_code': err.status});
inviteSpan?.end();
}
else {
this.logger.error(err, 'SingleDialer:exec');
status.sipStatus = 500;
inviteSpan.setAttributes({
inviteSpan?.setAttributes({
'invite.status_code': 500,
'invite.err': err.message
});
inviteSpan.end();
inviteSpan?.end();
}
this.emit('callStatusChange', status);
if (this.ep) this.ep.destroy();
@@ -320,6 +330,16 @@ class SingleDialer extends Emitter {
}
}
_configMsEndpoint() {
const opts = {
...(this.onHoldMusic && {holdMusic: `shout://${this.onHoldMusic.replace(/^https?:\/\//, '')}`}),
...(JAMBONES_USE_FREESWITCH_TIMER_FD && {timer_name: 'timerfd'})
};
if (Object.keys(opts).length > 0) {
this.ep.set(opts);
}
}
/**
* Run an application on the call after answer, e.g. call screening.
* Once the application completes in some fashion, emit an 'accepted' event
@@ -333,6 +353,7 @@ class SingleDialer extends Emitter {
const json = await this.requestor.request('dial:confirm', confirmHook, this.callInfo.toJSON());
if (!json || (Array.isArray(json) && json.length === 0)) {
this.logger.info('SingleDialer:_executeApp: no tasks returned from confirm hook');
this.emit('accept');
return;
}
const tasks = normalizeJambones(this.logger, json).map((tdata) => makeTask(this.logger, tdata));
@@ -392,6 +413,7 @@ class SingleDialer extends Emitter {
const app = {...application};
if ('WS' === app.call_hook?.method ||
app.call_hook?.url.startsWith('ws://') || app.call_hook?.url.startsWith('wss://')) {
if (app.call_hook?.url) app.call_hook.url += '/adulting';
const requestor = new WsRequestor(logger, this.accountInfo.account.account_sid,
app.call_hook, this.accountInfo.account.webhook_secret);
app.requestor = requestor;
@@ -406,6 +428,8 @@ class SingleDialer extends Emitter {
this.accountInfo.account.webhook_secret);
else app.notifier = {request: () => {}, close: () => {}};
}
// Replace old application with new application.
this.application = app;
const cs = new AdultingCallSession({
logger: newLogger,
singleDialer: this,
@@ -415,6 +439,13 @@ class SingleDialer extends Emitter {
tasks,
rootSpan
});
app.requestor.request('session:adulting', '/adulting', {
...cs.callInfo.toJSON(),
parentCallInfo: this.parentCallInfo
}).catch((err) => {
newLogger.error({err}, 'doAdulting: error sending adulting request');
});
cs.req = this.req;
cs.exec().catch((err) => newLogger.error({err}, 'doAdulting: error executing session'));
return cs;
@@ -436,6 +467,7 @@ class SingleDialer extends Emitter {
async reAnchorMedia() {
assert(this.dlg && this.dlg.connected && !this.ep);
this.ep = await this.ms.createEndpoint({remoteSdp: this.dlg.remote.sdp});
this._configMsEndpoint();
await this.dlg.modify(this.ep.local.sdp, {
headers: {
'X-Reason': 'anchor-media'
@@ -466,11 +498,12 @@ class SingleDialer extends Emitter {
}
function placeOutdial({
logger, srf, ms, sbcAddress, target, opts, application, callInfo, accountInfo, rootSpan, startSpan, dialTask
logger, srf, ms, sbcAddress, target, opts, application, callInfo, accountInfo, rootSpan, startSpan, dialTask,
onHoldMusic
}) {
const myOpts = deepcopy(opts);
const sd = new SingleDialer({
logger, sbcAddress, target, myOpts, application, callInfo, accountInfo, rootSpan, startSpan, dialTask
logger, sbcAddress, target, myOpts, application, callInfo, accountInfo, rootSpan, startSpan, dialTask, onHoldMusic
});
sd.exec(srf, ms, myOpts);
return sd;

View File

@@ -97,8 +97,12 @@ const parseSiprecPayload = (req, logger) => {
obj[`${prefix}participantstreamassoc`].forEach((ps) => {
const part = participants[ps.$.participant_id];
if (part) {
part.send = ps[`${prefix}send`][0];
part.recv = ps[`${prefix}recv`][0];
if (ps.hasOwnProperty(`${prefix}send`)) {
part.send = ps[`${prefix}send`][0];
}
if (ps.hasOwnProperty(`${prefix}recv`)) {
part.recv = ps[`${prefix}recv`][0];
}
}
});
}
@@ -109,9 +113,9 @@ const parseSiprecPayload = (req, logger) => {
obj[`${prefix}stream`].forEach((s) => {
const streamId = s.$.stream_id;
let sender;
for (const [k, v] of Object.entries(participants)) {
for (const v of Object.values(participants)) {
if (v.send === streamId) {
sender = k;
sender = v;
break;
}
}
@@ -121,9 +125,15 @@ const parseSiprecPayload = (req, logger) => {
sender.label = s[`${prefix}label`][0];
if (-1 !== ['1', 'a_leg', 'inbound'].indexOf(sender.label)) {
opts.caller.aor = sender.aor ;
if (-1 !== ['1', 'a_leg', 'inbound', '10'].indexOf(sender.label)) {
opts.caller.aor = sender.aor;
if (sender.name) opts.caller.name = sender.name;
// Remap the sdp stream base on sender label
if (!opts.sdp1.includes(`a=label:${sender.label}`)) {
const tmp = opts.sdp1;
opts.sdp1 = opts.sdp2;
opts.sdp2 = tmp;
}
}
else {
opts.callee.aor = sender.aor ;

View File

@@ -1,15 +1,5 @@
const {
TaskName,
AzureTranscriptionEvents,
GoogleTranscriptionEvents,
AwsTranscriptionEvents,
NuanceTranscriptionEvents,
DeepgramTranscriptionEvents,
SonioxTranscriptionEvents,
NvidiaTranscriptionEvents,
CobaltTranscriptionEvents,
JambonzTranscriptionEvents,
AssemblyAiTranscriptionEvents
} = require('./constants.json');
const stickyVars = {
@@ -112,7 +102,55 @@ const stickyVars = {
]
};
const consolidateTranscripts = (bufferedTranscripts, channel, language) => {
/**
* @see https://developers.deepgram.com/docs/models-languages-overview
*/
const optimalDeepramModels = {
zh: ['base', 'base'],
'zh-CN':['base', 'base'],
'zh-TW': ['base', 'base'],
da: ['enhanced', 'enhanced'],
en: ['nova-2-phonecall', 'nova-2'],
'en-US': ['nova-2-phonecall', 'nova-2'],
'en-AU': ['nova-2', 'nova-2'],
'en-GB': ['nova-2', 'nova-2'],
'en-IN': ['nova-2', 'nova-2'],
'en-NZ': ['nova-2', 'nova-2'],
nl: ['nova-2', 'nova-2'],
fr: ['nova-2', 'nova-2'],
'fr-CA': ['nova-2', 'nova-2'],
de: ['nova-2', 'nova-2'],
hi: ['nova-2', 'nova-2'],
'hi-Latn': ['nova-2', 'nova-2'],
id: ['base', 'base'],
it: ['nova-2', 'nova-2'],
ja: ['enhanced', 'enhanced'],
ko: ['nova-2', 'nova-2'],
no: ['nova-2', 'nova-2'],
pl: ['nova-2', 'nova-2'],
pt: ['nova-2', 'nova-2'],
'pt-BR': ['nova-2', 'nova-2'],
'pt-PT': ['nova-2', 'nova-2'],
ru: ['nova-2', 'nova-2'],
es: ['nova-2', 'nova-2'],
'es-419': ['nova-2', 'nova-2'],
'es-LATAM': ['enhanced', 'enhanced'],
sv: ['nova-2', 'nova-2'],
ta: ['enhanced', 'enhanced'],
taq: ['enhanced', 'enhanced'],
tr: ['nova-2', 'nova-2'],
uk: ['nova-2', 'nova-2']
};
const selectDefaultDeepgramModel = (task, language) => {
if (language in optimalDeepramModels) {
const [gather, transcribe] = optimalDeepramModels[language];
return task.name === TaskName.Gather ? gather : transcribe;
}
return 'base';
};
const consolidateTranscripts = (bufferedTranscripts, channel, language, vendor) => {
if (bufferedTranscripts.length === 1) return bufferedTranscripts[0];
let totalConfidence = 0;
const finalTranscript = bufferedTranscripts.reduce((acc, evt) => {
@@ -153,7 +191,7 @@ const consolidateTranscripts = (bufferedTranscripts, channel, language) => {
totalConfidence / bufferedTranscripts.length;
finalTranscript.alternatives[0].transcript = finalTranscript.alternatives[0].transcript.trim();
finalTranscript.vendor = {
name: 'deepgram',
name: vendor,
evt: bufferedTranscripts
};
return finalTranscript;
@@ -232,7 +270,7 @@ const normalizeDeepgram = (evt, channel, language, shortUtterance) => {
language_code: language,
channel_tag: channel,
is_final: shortUtterance ? evt.is_final : evt.speech_final,
alternatives: [alternatives[0]],
alternatives: alternatives.length ? [alternatives[0]] : [],
vendor: {
name: 'deepgram',
evt: copy
@@ -338,19 +376,34 @@ const normalizeNuance = (evt, channel, language) => {
};
};
const normalizeMicrosoft = (evt, channel, language) => {
const normalizeVerbio = (evt, channel, language) => {
const copy = JSON.parse(JSON.stringify(evt));
return {
language_code: language,
channel_tag: channel,
is_final: evt.is_final,
alternatives: evt.alternatives,
vendor: {
name: 'verbio',
evt: copy
}
};
};
const normalizeMicrosoft = (evt, channel, language, punctuation = true) => {
const copy = JSON.parse(JSON.stringify(evt));
const nbest = evt.NBest;
const language_code = evt.PrimaryLanguage?.Language || language;
const alternatives = nbest ? nbest.map((n) => {
return {
confidence: n.Confidence,
transcript: n.Display
// remove all puntuation if needed
transcript: punctuation ? n.Display : n.Display.replace(/\p{P}/gu, '')
};
}) :
[
{
transcript: evt.DisplayText || evt.Text
transcript: punctuation ? evt.DisplayText || evt.Text : (evt.DisplayText || evt.Text).replace(/\p{P}/gu, '')
}
];
@@ -400,14 +453,14 @@ const normalizeAssemblyAi = (evt, channel, language) => {
};
module.exports = (logger) => {
const normalizeTranscription = (evt, vendor, channel, language, shortUtterance) => {
const normalizeTranscription = (evt, vendor, channel, language, shortUtterance, punctuation) => {
//logger.debug({ evt, vendor, channel, language }, 'normalizeTranscription');
switch (vendor) {
case 'deepgram':
return normalizeDeepgram(evt, channel, language, shortUtterance);
case 'microsoft':
return normalizeMicrosoft(evt, channel, language);
return normalizeMicrosoft(evt, channel, language, punctuation);
case 'google':
return normalizeGoogle(evt, channel, language);
case 'aws':
@@ -424,6 +477,8 @@ module.exports = (logger) => {
return normalizeCobalt(evt, channel, language);
case 'assemblyai':
return normalizeAssemblyAi(evt, channel, language, shortUtterance);
case 'verbio':
return normalizeVerbio(evt, channel, language);
default:
if (vendor.startsWith('custom:')) {
return normalizeCustom(evt, channel, language, vendor);
@@ -433,22 +488,15 @@ module.exports = (logger) => {
}
};
const setChannelVarsForStt = (task, sttCredentials, rOpts = {}) => {
const setChannelVarsForStt = (task, sttCredentials, language, rOpts = {}) => {
let opts = {};
const {enable, voiceMs = 0, mode = -1} = rOpts.vad || {};
const vad = {enable, voiceMs, mode};
const vendor = rOpts.vendor;
/* voice activity detection works across vendors */
opts = {
...opts,
...(vad.enable && {START_RECOGNIZING_ON_VAD: 1}),
...(vad.enable && vad.voiceMs && {RECOGNIZER_VAD_VOICE_MS: vad.voiceMs}),
...(vad.enable && typeof vad.mode === 'number' && {RECOGNIZER_VAD_MODE: vad.mode}),
};
if ('google' === vendor) {
const model = task.name === TaskName.Gather ? 'command_and_search' : 'latest_long';
const useV2 = rOpts.googleOptions?.serviceVersion === 'v2';
const model = task.name === TaskName.Gather ?
(useV2 ? 'telephony_short' : 'command_and_search') :
(useV2 ? 'long' : 'latest_long');
opts = {
...opts,
...(sttCredentials && {GOOGLE_APPLICATION_CREDENTIALS: JSON.stringify(sttCredentials.credentials)}),
@@ -473,13 +521,34 @@ module.exports = (logger) => {
...(rOpts.hints?.length > 0 && typeof rOpts.hints[0] === 'object' &&
{GOOGLE_SPEECH_HINTS: JSON.stringify(rOpts.hints)}),
...(typeof rOpts.hintsBoost === 'number' && {GOOGLE_SPEECH_HINTS_BOOST: rOpts.hintsBoost}),
...(rOpts.altLanguages?.length > 0 &&
// When altLanguages is emptylist, we have to send value to freeswitch to clear the previous settings
...(rOpts.altLanguages &&
{GOOGLE_SPEECH_ALTERNATIVE_LANGUAGE_CODES: [...new Set(rOpts.altLanguages)].join(',')}),
...(rOpts.interactionType &&
{GOOGLE_SPEECH_METADATA_INTERACTION_TYPE: rOpts.interactionType}),
...{GOOGLE_SPEECH_MODEL: rOpts.model || model},
...(rOpts.naicsCode > 0 && {GOOGLE_SPEECH_METADATA_INDUSTRY_NAICS_CODE: rOpts.naicsCode}),
GOOGLE_SPEECH_METADATA_RECORDING_DEVICE_TYPE: 'phone_line',
...(useV2 && {
GOOGLE_SPEECH_RECOGNIZER_PARENT: `projects/${sttCredentials.credentials.project_id}/locations/global`,
GOOGLE_SPEECH_CLOUD_SERVICES_VERSION: 'v2',
...(rOpts.googleOptions?.speechStartTimeoutMs && {
GOOGLE_SPEECH_START_TIMEOUT_MS: rOpts.googleOptions.speechStartTimeoutMs
}),
...(rOpts.googleOptions?.speechEndTimeoutMs && {
GOOGLE_SPEECH_END_TIMEOUT_MS: rOpts.googleOptions.speechEndTimeoutMs
}),
...(rOpts.googleOptions?.transcriptNormalization && {
GOOGLE_SPEECH_TRANSCRIPTION_NORMALIZATION: JSON.stringify(rOpts.googleOptions.transcriptNormalization)
}),
...(rOpts.googleOptions?.enableVoiceActivityEvents && {
GOOGLE_SPEECH_ENABLE_VOICE_ACTIVITY_EVENTS: rOpts.googleOptions.enableVoiceActivityEvents
}),
...(rOpts.sgoogleOptions?.recognizerId) && {GOOGLE_SPEECH_RECOGNIZER_ID: rOpts.googleOptions.recognizerId},
...(rOpts.googleOptions?.enableVoiceActivityEvents && {
GOOGLE_SPEECH_ENABLE_VOICE_ACTIVITY_EVENTS: rOpts.googleOptions.enableVoiceActivityEvents
}),
}),
};
}
else if (['aws', 'polly'].includes(vendor)) {
@@ -489,9 +558,10 @@ module.exports = (logger) => {
...(rOpts.vocabularyFilterName && {AWS_VOCABULARY_FILTER_NAME: rOpts.vocabularyFilterName}),
...(rOpts.filterMethod && {AWS_VOCABULARY_FILTER_METHOD: rOpts.filterMethod}),
...(sttCredentials && {
AWS_ACCESS_KEY_ID: sttCredentials.accessKeyId,
AWS_SECRET_ACCESS_KEY: sttCredentials.secretAccessKey,
AWS_REGION: sttCredentials.region
...(sttCredentials.accessKeyId && {AWS_ACCESS_KEY_ID: sttCredentials.accessKeyId}),
...(sttCredentials.secretAccessKey && {AWS_SECRET_ACCESS_KEY: sttCredentials.secretAccessKey}),
AWS_REGION: sttCredentials.region,
...(sttCredentials.sessionToken && {AWS_SESSION_TOKEN: sttCredentials.sessionToken}),
}),
};
}
@@ -503,7 +573,8 @@ module.exports = (logger) => {
{AZURE_SPEECH_HINTS: rOpts.hints.map((h) => h.trim()).join(',')}),
...(rOpts.hints?.length > 0 && typeof rOpts.hints[0] === 'object' &&
{AZURE_SPEECH_HINTS: rOpts.hints.map((h) => h.phrase).join(',')}),
...(rOpts.altLanguages && rOpts.altLanguages.length > 0 &&
// When altLanguages is emptylist, we have to send value to freeswitch to clear the previous settings
...(rOpts.altLanguages &&
{AZURE_SPEECH_ALTERNATIVE_LANGUAGE_CODES: [...new Set(rOpts.altLanguages)].join(',')}),
...(rOpts.requestSnr && {AZURE_REQUEST_SNR: 1}),
...(rOpts.profanityOption && {AZURE_PROFANITY_OPTION: rOpts.profanityOption}),
@@ -517,12 +588,19 @@ module.exports = (logger) => {
...{AZURE_USE_OUTPUT_FORMAT_DETAILED: 1},
...(azureOptions.speechSegmentationSilenceTimeoutMs &&
{AZURE_SPEECH_SEGMENTATION_SILENCE_TIMEOUT_MS: azureOptions.speechSegmentationSilenceTimeoutMs}),
...(azureOptions.languageIdMode &&
{AZURE_LANGUAGE_ID_MODE: azureOptions.languageIdMode}),
...(azureOptions.postProcessing &&
{AZURE_POST_PROCESSING_OPTION: azureOptions.postProcessing}),
...(sttCredentials && {
...(sttCredentials.api_key && {AZURE_SUBSCRIPTION_KEY: sttCredentials.api_key}),
...(sttCredentials.region && {AZURE_REGION: sttCredentials.region}),
}),
...(sttCredentials.use_custom_stt && sttCredentials.custom_stt_endpoint &&
{AZURE_SERVICE_ENDPOINT_ID: sttCredentials.custom_stt_endpoint}),
//azureSttEndpointId overrides sttCredentials.custom_stt_endpoint
...(rOpts.azureSttEndpointId &&
{AZURE_SERVICE_ENDPOINT_ID: rOpts.azureSttEndpointId}),
};
}
else if ('nuance' === vendor) {
@@ -574,15 +652,24 @@ module.exports = (logger) => {
};
}
else if ('deepgram' === vendor) {
let {model} = rOpts;
const {deepgramOptions = {}} = rOpts;
const deepgramUri = deepgramOptions.deepgramSttUri || sttCredentials.deepgram_stt_uri;
const useTls = deepgramOptions.deepgramSttUseTls || sttCredentials.deepgram_stt_use_tls;
/* default to a sensible model if not supplied */
if (!model) {
model = selectDefaultDeepgramModel(task, language);
}
opts = {
...opts,
DEEPGRAM_SPEECH_MODEL: model,
...(deepgramUri && {DEEPGRAM_URI: deepgramUri}),
...(deepgramUri && useTls && {DEEPGRAM_USE_TLS: 1}),
...(sttCredentials.api_key) &&
{DEEPGRAM_API_KEY: sttCredentials.api_key},
...(deepgramOptions.tier) &&
{DEEPGRAM_SPEECH_TIER: deepgramOptions.tier},
...(deepgramOptions.model) &&
{DEEPGRAM_SPEECH_MODEL: deepgramOptions.model},
...(deepgramOptions.punctuate) &&
{DEEPGRAM_SPEECH_ENABLE_AUTOMATIC_PUNCTUATION: 1},
...(deepgramOptions.smartFormatting) &&
@@ -612,7 +699,9 @@ module.exports = (logger) => {
...(deepgramOptions.keywords) &&
{DEEPGRAM_SPEECH_KEYWORDS: deepgramOptions.keywords.join(',')},
...('endpointing' in deepgramOptions) &&
{DEEPGRAM_SPEECH_ENDPOINTING: deepgramOptions.endpointing},
{DEEPGRAM_SPEECH_ENDPOINTING: deepgramOptions.endpointing === false ? 'false' : deepgramOptions.endpointing,
// default DEEPGRAM_SPEECH_UTTERANCE_END_MS is 1000, will be override by user settings later if there is.
DEEPGRAM_SPEECH_UTTERANCE_END_MS: 1000},
...(deepgramOptions.utteranceEndMs) &&
{DEEPGRAM_SPEECH_UTTERANCE_END_MS: deepgramOptions.utteranceEndMs},
...(deepgramOptions.vadTurnoff) &&
@@ -725,8 +814,26 @@ module.exports = (logger) => {
...(rOpts.hints?.length > 0 &&
{ASSEMBLYAI_WORD_BOOST: JSON.stringify(rOpts.hints)})
};
}
else if (vendor.startsWith('custom:')) {
} else if ('verbio' === vendor) {
const {verbioOptions = {}} = rOpts;
opts = {
...opts,
...(sttCredentials.access_token && { VERBIO_ACCESS_TOKEN: sttCredentials.access_token}),
...(sttCredentials.engine_version && {VERBIO_ENGINE_VERSION: sttCredentials.engine_version}),
...(language && {VERBIO_LANGUAGE: language}),
...(verbioOptions.enable_formatting && {VERBIO_ENABLE_FORMATTING: verbioOptions.enable_formatting}),
...(verbioOptions.enable_diarization && {VERBIO_ENABLE_DIARIZATION: verbioOptions.enable_diarization}),
...(verbioOptions.topic && {VERBIO_TOPIC: verbioOptions.topic}),
...(verbioOptions.inline_grammar && {VERBIO_INLINE_GRAMMAR: verbioOptions.inline_grammar}),
...(verbioOptions.grammar_uri && {VERBIO_GRAMMAR_URI: verbioOptions.grammar_uri}),
...(verbioOptions.label && {VERBIO_LABEL: verbioOptions.label}),
...(verbioOptions.recognition_timeout && {VERBIO_RECOGNITION_TIMEOUT: verbioOptions.recognition_timeout}),
...(verbioOptions.speech_complete_timeout &&
{VERBIO_SPEECH_COMPLETE_TIMEOUT: verbioOptions.speech_complete_timeout}),
...(verbioOptions.speech_incomplete_timeout &&
{VERBIO_SPEECH_INCOMPLETE_TIMEOUT: verbioOptions.speech_incomplete_timeout}),
};
} else if (vendor.startsWith('custom:')) {
let {options = {}} = rOpts;
const {auth_token, custom_stt_url} = sttCredentials;
options = {
@@ -740,7 +847,7 @@ module.exports = (logger) => {
opts = {
...opts,
JAMBONZ_STT_API_KEY: auth_token,
...(auth_token && {JAMBONZ_STT_API_KEY: auth_token}),
JAMBONZ_STT_URL: custom_stt_url,
...(Object.keys(options).length > 0 && {JAMBONZ_STT_OPTIONS: JSON.stringify(options)}),
};
@@ -752,48 +859,6 @@ module.exports = (logger) => {
return opts;
};
const removeSpeechListeners = (ep) => {
ep.removeCustomEventListener(GoogleTranscriptionEvents.Transcription);
ep.removeCustomEventListener(GoogleTranscriptionEvents.EndOfUtterance);
ep.removeCustomEventListener(GoogleTranscriptionEvents.VadDetected);
ep.removeCustomEventListener(AwsTranscriptionEvents.Transcription);
ep.removeCustomEventListener(AwsTranscriptionEvents.VadDetected);
ep.removeCustomEventListener(AzureTranscriptionEvents.Transcription);
ep.removeCustomEventListener(AzureTranscriptionEvents.NoSpeechDetected);
ep.removeCustomEventListener(AzureTranscriptionEvents.VadDetected);
ep.removeCustomEventListener(NuanceTranscriptionEvents.Transcription);
ep.removeCustomEventListener(NuanceTranscriptionEvents.TranscriptionComplete);
ep.removeCustomEventListener(NuanceTranscriptionEvents.StartOfSpeech);
ep.removeCustomEventListener(NuanceTranscriptionEvents.VadDetected);
ep.removeCustomEventListener(DeepgramTranscriptionEvents.Transcription);
ep.removeCustomEventListener(DeepgramTranscriptionEvents.Connect);
ep.removeCustomEventListener(DeepgramTranscriptionEvents.ConnectFailure);
ep.removeCustomEventListener(SonioxTranscriptionEvents.Transcription);
ep.removeCustomEventListener(CobaltTranscriptionEvents.Transcription);
ep.removeCustomEventListener(CobaltTranscriptionEvents.CompileContext);
ep.removeCustomEventListener(NvidiaTranscriptionEvents.Transcription);
ep.removeCustomEventListener(NvidiaTranscriptionEvents.TranscriptionComplete);
ep.removeCustomEventListener(NvidiaTranscriptionEvents.StartOfSpeech);
ep.removeCustomEventListener(NvidiaTranscriptionEvents.VadDetected);
ep.removeCustomEventListener(JambonzTranscriptionEvents.Transcription);
ep.removeCustomEventListener(JambonzTranscriptionEvents.Connect);
ep.removeCustomEventListener(JambonzTranscriptionEvents.ConnectFailure);
ep.removeCustomEventListener(JambonzTranscriptionEvents.Error);
ep.removeCustomEventListener(AssemblyAiTranscriptionEvents.Transcription);
ep.removeCustomEventListener(AssemblyAiTranscriptionEvents.Connect);
ep.removeCustomEventListener(AssemblyAiTranscriptionEvents.ConnectFailure);
};
const setSpeechCredentialsAtRuntime = (recognizer) => {
if (!recognizer) return;
if (recognizer.vendor === 'nuance') {
@@ -832,7 +897,6 @@ module.exports = (logger) => {
return {
normalizeTranscription,
setChannelVarsForStt,
removeSpeechListeners,
setSpeechCredentialsAtRuntime,
compileSonioxTranscripts,
consolidateTranscripts

View File

@@ -9,7 +9,8 @@ const {
JAMBONES_WS_PING_INTERVAL_MS,
MAX_RECONNECTS,
JAMBONES_WS_HANDSHAKE_TIMEOUT_MS,
JAMBONES_WS_MAX_PAYLOAD
JAMBONES_WS_MAX_PAYLOAD,
HTTP_USER_AGENT_HEADER
} = require('../config');
class WsRequestor extends BaseRequestor {
@@ -55,6 +56,12 @@ class WsRequestor extends BaseRequestor {
}
if (type === 'session:new') this.call_sid = params.callSid;
if (type === 'session:reconnect') {
this._reconnectPromise = new Promise((resolve, reject) => {
this._reconnectResolve = resolve;
this._reconnectReject = reject;
});
}
/* if we have an absolute url, and it is http then do a standard webhook */
if (this._isAbsoluteUrl(url) && url.startsWith('http')) {
@@ -70,20 +77,23 @@ class WsRequestor extends BaseRequestor {
}
/* connect if necessary */
const queueMsg = () => {
this.logger.debug(
`WsRequestor:request(${this.id}) - queueing ${type} message since we are connecting`);
if (wantsAck) {
const p = new Promise((resolve, reject) => {
this.queuedMsg.push({type, hook, params, httpHeaders, promise: {resolve, reject}});
});
return p;
}
else {
this.queuedMsg.push({type, hook, params, httpHeaders});
}
return;
};
if (!this.ws) {
if (this.connectInProgress) {
this.logger.debug(
`WsRequestor:request(${this.id}) - queueing ${type} message since we are connecting`);
if (wantsAck) {
const p = new Promise((resolve, reject) => {
this.queuedMsg.push({type, hook, params, httpHeaders, promise: {resolve, reject}});
});
return p;
}
else {
this.queuedMsg.push({type, hook, params, httpHeaders});
}
return;
return queueMsg();
}
this.connectInProgress = true;
this.logger.debug(`WsRequestor:request(${this.id}) - connecting since we do not have a connection for ${type}`);
@@ -101,6 +111,10 @@ class WsRequestor extends BaseRequestor {
return Promise.reject(err);
}
}
// If jambonz wait for ack from reconnect, queue the msg until reconnect is acked
if (type !== 'session:reconnect' && this._reconnectPromise) {
return queueMsg();
}
assert(this.ws);
/* prepare and send message */
@@ -118,7 +132,7 @@ class WsRequestor extends BaseRequestor {
type,
msgid,
call_sid: this.call_sid,
hook: type === 'verb:hook' ? url : undefined,
hook: ['verb:hook', 'session:redirect'].includes(type) ? url : undefined,
data: {...payload},
...b3
};
@@ -138,6 +152,18 @@ class WsRequestor extends BaseRequestor {
}
};
const rejectQueuedMsgs = (err) => {
if (this.queuedMsg.length > 0) {
for (const {promise} of this.queuedMsg) {
this.logger.debug(`WsRequestor:request - preparing queued ${type} for rejectQueuedMsgs`);
if (promise) {
promise.reject(err);
}
}
this.queuedMsg.length = 0;
}
};
//this.logger.debug({obj}, `websocket: sending (${url})`);
/* special case: reconnecting before we received ack to session:new */
@@ -178,16 +204,37 @@ class WsRequestor extends BaseRequestor {
this.logger.debug({response}, `WsRequestor:request ${url} succeeded in ${rtt}ms`);
this.stats.histogram('app.hook.ws_response_time', rtt, ['hook_type:app']);
resolve(response);
if (this._reconnectResolve) {
this._reconnectResolve();
}
},
failure: (err) => {
if (this._reconnectReject) {
this._reconnectReject(err);
}
clearTimeout(timer);
reject(err);
}
});
/* send the message */
this.ws.send(JSON.stringify(obj), () => {
this.ws.send(JSON.stringify(obj), async() => {
this.logger.debug({obj}, `WsRequestor:request websocket: sent (${url})`);
// If session:reconnect is waiting for ack, hold here until ack to send queuedMsgs
if (this._reconnectPromise) {
try {
await this._reconnectPromise;
} catch (err) {
// bad thing happened to session:recconnect
rejectQueuedMsgs(err);
this.emit('reconnect-error');
return;
} finally {
this._reconnectPromise = null;
this._reconnectResolve = null;
this._reconnectReject = null;
}
}
sendQueuedMsgs();
});
});
@@ -228,6 +275,9 @@ class WsRequestor extends BaseRequestor {
maxRedirects: 2,
handshakeTimeout,
maxPayload: JAMBONES_WS_MAX_PAYLOAD ? parseInt(JAMBONES_WS_MAX_PAYLOAD) : 24 * 1024,
headers: {
...(HTTP_USER_AGENT_HEADER && {'user-agent' : HTTP_USER_AGENT_HEADER})
}
};
if (this.username && this.password) opts = {...opts, auth: `${this.username}:${this.password}`};
@@ -322,7 +372,9 @@ class WsRequestor extends BaseRequestor {
'WsRequestor:_onSocketClosed time to reconnect');
if (!this.ws && !this.connectInProgress) {
this.connectInProgress = true;
this._connect().catch((err) => this.connectInProgress = false);
return this._connect()
.catch((err) => this.logger.error('WsRequestor:_onSocketClosed There is error while reconnect', err))
.finally(() => this.connectInProgress = false);
}
}, this.backoffMs);
this.backoffMs = this.backoffMs < 2000 ? this.backoffMs * 2 : (this.backoffMs + 2000);
@@ -340,7 +392,9 @@ class WsRequestor extends BaseRequestor {
/* messages must be JSON format */
try {
const obj = JSON.parse(content);
const {type, msgid, command, call_sid = this.call_sid, queueCommand = false, data} = obj;
//const {type, msgid, command, call_sid = this.call_sid, queueCommand = false, data} = obj;
const {type, msgid, command, queueCommand = false, data} = obj;
const call_sid = obj.callSid || this.call_sid;
//this.logger.debug({obj}, 'WsRequestor:request websocket: received');
assert.ok(type, 'type property not supplied');

13455
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,9 @@
{
"name": "jambonz-feature-server",
"version": "0.8.5",
"version": "0.9.0",
"main": "app.js",
"engines": {
"node": ">= 10.16.0"
"node": ">= 18.x"
},
"keywords": [
"sip",
@@ -25,57 +25,57 @@
"jslint:fix": "eslint app.js tracer.js lib --fix"
},
"dependencies": {
"@aws-sdk/client-auto-scaling": "^3.360.0",
"@aws-sdk/client-sns": "^3.360.0",
"@jambonz/db-helpers": "^0.9.1",
"@aws-sdk/client-auto-scaling": "^3.549.0",
"@aws-sdk/client-sns": "^3.549.0",
"@jambonz/db-helpers": "^0.9.6",
"@jambonz/http-health-check": "^0.0.1",
"@jambonz/mw-registrar": "^0.2.4",
"@jambonz/realtimedb-helpers": "^0.8.7",
"@jambonz/speech-utils": "^0.0.31",
"@jambonz/stats-collector": "^0.1.9",
"@jambonz/mw-registrar": "^0.2.7",
"@jambonz/realtimedb-helpers": "^0.8.8",
"@jambonz/speech-utils": "^0.1.11",
"@jambonz/stats-collector": "^0.1.10",
"@jambonz/time-series": "^0.2.8",
"@jambonz/verb-specifications": "^0.0.46",
"@opentelemetry/api": "^1.4.0",
"@opentelemetry/exporter-jaeger": "^1.9.0",
"@opentelemetry/exporter-trace-otlp-http": "^0.35.0",
"@opentelemetry/exporter-zipkin": "^1.9.0",
"@opentelemetry/instrumentation": "^0.35.0",
"@opentelemetry/resources": "^1.9.0",
"@opentelemetry/sdk-trace-base": "^1.9.0",
"@opentelemetry/sdk-trace-node": "^1.9.0",
"@opentelemetry/semantic-conventions": "^1.9.0",
"@jambonz/verb-specifications": "^0.0.74",
"@opentelemetry/api": "^1.8.0",
"@opentelemetry/exporter-jaeger": "^1.23.0",
"@opentelemetry/exporter-trace-otlp-http": "^0.50.0",
"@opentelemetry/exporter-zipkin": "^1.23.0",
"@opentelemetry/instrumentation": "^0.50.0",
"@opentelemetry/resources": "^1.23.0",
"@opentelemetry/sdk-trace-base": "^1.23.0",
"@opentelemetry/sdk-trace-node": "^1.23.0",
"@opentelemetry/semantic-conventions": "^1.23.0",
"bent": "^7.3.12",
"debug": "^4.3.4",
"deepcopy": "^2.1.0",
"drachtio-fsmrf": "^3.0.28",
"drachtio-srf": "^4.5.31",
"express": "^4.18.2",
"drachtio-fsmrf": "^3.0.43",
"drachtio-srf": "^4.5.35",
"express": "^4.19.2",
"express-validator": "^7.0.1",
"ip": "^1.1.8",
"moment": "^2.29.4",
"parse-url": "^8.1.0",
"pino": "^8.8.0",
"ip": "^2.0.1",
"moment": "^2.30.1",
"parse-url": "^9.2.0",
"pino": "^8.20.0",
"polly-ssml-split": "^0.1.0",
"proxyquire": "^2.1.3",
"sdp-transform": "^2.14.1",
"short-uuid": "^4.2.2",
"sinon": "^15.0.1",
"sdp-transform": "^2.14.2",
"short-uuid": "^5.1.0",
"sinon": "^17.0.1",
"to-snake-case": "^1.0.0",
"undici": "^5.26.2",
"undici": "^6.19.2",
"uuid-random": "^1.3.2",
"verify-aws-sns-signature": "^0.1.0",
"ws": "^8.9.0",
"ws": "^8.17.1",
"xml2js": "^0.6.2"
},
"devDependencies": {
"clear-module": "^4.1.2",
"eslint": "^7.32.0",
"eslint-plugin-promise": "^4.3.1",
"eslint": "7.32.0",
"eslint-plugin-promise": "^6.1.1",
"nyc": "^15.1.0",
"tape": "^5.6.1"
"tape": "^5.7.5"
},
"optionalDependencies": {
"bufferutil": "^4.0.6",
"utf-8-validate": "^5.0.8"
"bufferutil": "^4.0.8",
"utf-8-validate": "^6.0.3"
}
}

View File

@@ -99,6 +99,8 @@ test('test create-call call-hook basic authentication', async(t) => {
let obj = await getJSON(`http:127.0.0.1:3100/lastRequest/${from}`)
t.ok(obj.headers.Authorization = 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'create-call: call-hook contains basic authentication header');
t.ok(obj.headers['user-agent'] = 'jambonz',
'create-call: call-hook contains user-agent header');
disconnect();
} catch (err) {
console.log(`error received: ${err}`);

View File

@@ -42,7 +42,7 @@ services:
ipv4_address: 172.38.0.7
drachtio:
image: drachtio/drachtio-server:0.8.22
image: drachtio/drachtio-server:0.8.25-rc8
restart: always
command: drachtio --contact "sip:*;transport=udp" --mtu 4096 --address 0.0.0.0 --port 9022
ports:
@@ -57,7 +57,7 @@ services:
condition: service_healthy
freeswitch:
image: drachtio/drachtio-freeswitch-mrf:0.4.33
image: drachtio/drachtio-freeswitch-mrf:0.7.3
restart: always
command: freeswitch --rtp-range-start 20000 --rtp-range-end 20100
environment:

53
test/hangup-test.js Normal file
View File

@@ -0,0 +1,53 @@
const test = require('tape');
const { sippUac } = require('./sipp')('test_fs');
const clearModule = require('clear-module');
const {provisionCallHook, provisionCustomHook} = require('./utils')
const bent = require('bent');
const getJSON = bent('json')
process.on('unhandledRejection', (reason, p) => {
console.log('Unhandled Rejection at: Promise', p, 'reason:', reason);
});
function connect(connectable) {
return new Promise((resolve, reject) => {
connectable.on('connect', () => {
return resolve();
});
});
}
test('\'hangup\' custom headers', async(t) => {
clearModule.all();
const {srf, disconnect} = require('../app');
try {
await connect(srf);
// GIVEN
const verbs = [
{
verb: 'play',
url: 'https://example.com/example.mp3'
},
{
"verb": "hangup",
"headers": {
"X-Reason" : "maximum call duration exceeded"
}
}
];
const from = 'hangup_custom_headers';
await provisionCallHook(from, verbs)
// THEN
await sippUac('uac-success-received-bye.xml', '172.38.0.10', from);
t.pass('play: succeeds when using single link');
disconnect();
} catch (err) {
console.log(`error received: ${err}`);
disconnect();
t.error(err);
}
});

View File

@@ -16,7 +16,8 @@ require('./listen-tests');
require('./config-test');
require('./queue-test');
require('./in-dialog-test');
require('./http-proxy-test');
require('./hangup-test');
require('./sdp-utils-test');
require('./http-proxy-test');
require('./remove-test-db');
require('./docker_stop');

View File

@@ -188,7 +188,7 @@ test('\'play\' tests with seekOffset and actionHook', async(t) => {
const seconds = parseInt(obj.body.playback_seconds);
const milliseconds = parseInt(obj.body.playback_milliseconds);
const lastOffsetPos = parseInt(obj.body.playback_last_offset_pos);
//console.log({obj}, 'lastRequest');
console.log({obj}, 'lastRequest');
t.ok(obj.body.reason === "playCompleted", "play: actionHook success received");
t.ok(seconds === 2, "playback_seconds: actionHook success received");
t.ok(milliseconds === 2048, "playback_milliseconds: actionHook success received");

View File

@@ -52,6 +52,7 @@ test('\'transcribe\' test - google', async(t) => {
// THEN
await sippUac('uac-gather-account-creds-success.xml', '172.38.0.10', from);
let obj = await getJSON(`http://127.0.0.1:3100/lastRequest/${from}_actionHook`);
//console.log(JSON.stringify(obj));
t.ok(obj.body.speech.alternatives[0].transcript.toLowerCase().startsWith('i\'d like to speak to customer support'),
'transcribe: succeeds when using google credentials');
@@ -89,6 +90,7 @@ test('\'transcribe\' test - microsoft', async(t) => {
// THEN
await sippUac('uac-gather-account-creds-success.xml', '172.38.0.10', from);
let obj = await getJSON(`http://127.0.0.1:3100/lastRequest/${from}_actionHook`);
//console.log(JSON.stringify(obj));
t.ok(obj.body.speech.alternatives[0].transcript.toLowerCase().startsWith('i\'d like to speak to customer support'),
'transcribe: succeeds when using microsoft credentials');
@@ -126,6 +128,7 @@ test('\'transcribe\' test - aws', async(t) => {
// THEN
await sippUac('uac-gather-account-creds-success.xml', '172.38.0.10', from);
let obj = await getJSON(`http://127.0.0.1:3100/lastRequest/${from}_actionHook`);
//console.log(JSON.stringify(obj));
t.ok(obj.body.speech.alternatives[0].transcript.toLowerCase().startsWith('i\'d like to speak to customer support'),
'transcribe: succeeds when using aws credentials');
@@ -155,6 +158,9 @@ test('\'transcribe\' test - deepgram config options', async(t) => {
"recognizer": {
"vendor": "deepgram",
"language": "en-US",
"altLanguages": [
"en-US"
],
"deepgramOptions": {
"model": "2-ea",
"tier": "nova",
@@ -172,6 +178,9 @@ test('\'transcribe\' test - deepgram config options', async(t) => {
"transcriptionHook": "/transcriptionHook",
"recognizer": {
"vendor": "deepgram",
"altLanguages": [
"en-AU"
],
"hints": ["customer support", "sales", "human resources", "HR"],
"deepgramOptions": {
"apiKey": DEEPGRAM_API_KEY,
@@ -184,6 +193,7 @@ test('\'transcribe\' test - deepgram config options', async(t) => {
// THEN
await sippUac('uac-gather-account-creds-success.xml', '172.38.0.10', from);
let obj = await getJSON(`http://127.0.0.1:3100/lastRequest/${from}_actionHook`);
//console.log(JSON.stringify(obj));
t.ok(obj.body.speech.alternatives[0].transcript.toLowerCase().includes('like to speak to customer support'),
'transcribe: succeeds when using deepgram credentials');
@@ -224,6 +234,7 @@ test('\'transcribe\' test - deepgram', async(t) => {
// THEN
await sippUac('uac-gather-account-creds-success.xml', '172.38.0.10', from);
let obj = await getJSON(`http://127.0.0.1:3100/lastRequest/${from}_actionHook`);
//console.log(JSON.stringify(obj));
t.ok(obj.body.speech.alternatives[0].transcript.toLowerCase().includes('like to speak to customer support'),
'transcribe: succeeds when using deepgram credentials');
@@ -303,9 +314,131 @@ test('\'transcribe\' test - google with asrTimeout', async(t) => {
// THEN
await sippUac('uac-gather-account-creds-success.xml', '172.38.0.10', from);
let obj = await getJSON(`http://127.0.0.1:3100/lastRequest/${from}_actionHook`);
//console.log(JSON.stringify(obj));
t.ok(obj.body.speech.alternatives[0].transcript.toLowerCase().startsWith('i\'d like to speak to customer support'),
'transcribe: succeeds when using google credentials');
disconnect();
} catch (err) {
console.log(`error received: ${err}`);
disconnect();
t.error(err);
}
});
test('\'transcribe\' test - deepgram config options altLanguages', async(t) => {
if (!DEEPGRAM_API_KEY ) {
t.pass('skipping deepgram tests');
return t.end();
}
clearModule.all();
const {srf, disconnect} = require('../app');
try {
await connect(srf);
// GIVEN
let verbs = [
{
"verb": "config",
"recognizer": {
"vendor": "deepgram",
"language": "en-US",
"altLanguages": [
"en-US"
],
"deepgramOptions": {
"model": "nova-2",
"numerals": true,
"ner": true,
"vadTurnoff": 10,
"keywords": [
"CPT"
]
}
}
},
{
"verb": "transcribe",
"transcriptionHook": "/transcriptionHook",
"recognizer": {
"vendor": "deepgram",
"hints": ["customer support", "sales", "human resources", "HR"],
"deepgramOptions": {
"apiKey": DEEPGRAM_API_KEY,
}
}
}
];
let from = "gather_success_no_altLanguages";
await provisionCallHook(from, verbs);
// THEN
await sippUac('uac-gather-account-creds-success.xml', '172.38.0.10', from);
let obj = await getJSON(`http://127.0.0.1:3100/lastRequest/${from}_actionHook`);
//console.log(JSON.stringify(obj));
t.ok(obj.body.speech.alternatives[0].transcript.toLowerCase().includes('like to speak to customer support'),
'transcribe: succeeds when using deepgram credentials');
disconnect();
} catch (err) {
console.log(`error received: ${err}`);
disconnect();
t.error(err);
}
});
test('\'transcribe\' test - deepgram config options altLanguages', async(t) => {
if (!DEEPGRAM_API_KEY ) {
t.pass('skipping deepgram tests');
return t.end();
}
clearModule.all();
const {srf, disconnect} = require('../app');
try {
await connect(srf);
// GIVEN
let verbs = [
{
"verb": "config",
"recognizer": {
"vendor": "deepgram",
"language": "en-US",
"altLanguages": [
"en-US"
],
"deepgramOptions": {
"model": "nova-2",
"numerals": true,
"ner": true,
"vadTurnoff": 10,
"keywords": [
"CPT"
]
}
}
},
{
"verb": "transcribe",
"transcriptionHook": "/transcriptionHook",
"recognizer": {
"vendor": "deepgram",
"hints": ["customer support", "sales", "human resources", "HR"],
"altLanguages": [],
"deepgramOptions": {
"apiKey": DEEPGRAM_API_KEY,
}
}
}
];
let from = "gather_success_has_altLanguages";
await provisionCallHook(from, verbs);
// THEN
await sippUac('uac-gather-account-creds-success.xml', '172.38.0.10', from);
let obj = await getJSON(`http://127.0.0.1:3100/lastRequest/${from}_actionHook`);
//console.log(JSON.stringify(obj));
t.ok(obj.body.speech.alternatives[0].transcript.toLowerCase().includes('like to speak to customer support'),
'transcribe: succeeds when using deepgram credentials');
disconnect();
} catch (err) {
console.log(`error received: ${err}`);