diff options
author | 2024-10-01 00:32:40 +0530 | |
---|---|---|
committer | 2024-10-06 01:16:32 +0530 | |
commit | 5a8f670c3f772cfe0345ed53e5989a6dca08a905 (patch) | |
tree | 7d82533a0f5356547e31609c3db1d0101de83376 /agl_service_voiceagent/utils/stt_model.py | |
parent | 1144fcd343bc56f8c27ff73d3e76904010dbb832 (diff) |
Remove OpenAI's Whisper AI and Bug FixingHEADtrout_19.90.0trout/19.90.019.90.0master
- Removed OpenAI's Whisper AI from agl-service-voiceagent and using
whisper.cpp for speech-to-text.
- Fix audio_recorder.
- Update grpc protoc to include the online-mode status in ServiceStatus
- Set online_mode flag default to 0
- Change wake word to "hey automotive"
Bug-AGL: SPEC-5200
Change-Id: I9f1629cdcaef43498bf4cb9fdd950291a415819d
Signed-off-by: Anuj Solanki <anuj603362@gmail.com>
Diffstat (limited to 'agl_service_voiceagent/utils/stt_model.py')
-rw-r--r-- | agl_service_voiceagent/utils/stt_model.py | 44 |
1 files changed, 22 insertions, 22 deletions
diff --git a/agl_service_voiceagent/utils/stt_model.py b/agl_service_voiceagent/utils/stt_model.py index 7e8ad8b..0a092ea 100644 --- a/agl_service_voiceagent/utils/stt_model.py +++ b/agl_service_voiceagent/utils/stt_model.py @@ -21,7 +21,7 @@ import wave from agl_service_voiceagent.utils.common import generate_unique_uuid # import the whisper model -import whisper +# import whisper # for whisper timeout feature from concurrent.futures import ThreadPoolExecutor import subprocess @@ -93,28 +93,28 @@ class STTModel: return result # Recognize speech using the whisper model - def recognize_using_whisper(self,filename,language = None,timeout = 5,fp16=False): - """ - Recognize speech and return the result as a JSON object. - - Args: - filename (str): The path to the audio file. - timeout (int, optional): The timeout for recognition (default is 5 seconds). - fp16 (bool, optional): If True, use 16-bit floating point precision, (default is False) because cuda is not supported. - language (str, optional): The language code for recognition (default is None). - - Returns: - dict: A JSON object containing recognition results. - """ - def transcribe_with_whisper(): - return self.whisper_model.transcribe(filename, language = language,fp16=fp16) + # def recognize_using_whisper(self,filename,language = None,timeout = 5,fp16=False): + # """ + # Recognize speech and return the result as a JSON object. + + # Args: + # filename (str): The path to the audio file. + # timeout (int, optional): The timeout for recognition (default is 5 seconds). + # fp16 (bool, optional): If True, use 16-bit floating point precision, (default is False) because cuda is not supported. + # language (str, optional): The language code for recognition (default is None). + + # Returns: + # dict: A JSON object containing recognition results. + # """ + # def transcribe_with_whisper(): + # return self.whisper_model.transcribe(filename, language = language,fp16=fp16) - with ThreadPoolExecutor() as executor: - future = executor.submit(transcribe_with_whisper) - try: - return future.result(timeout=timeout) - except TimeoutError: - return {"error": "Transcription with Whisper exceeded the timeout."} + # with ThreadPoolExecutor() as executor: + # future = executor.submit(transcribe_with_whisper) + # try: + # return future.result(timeout=timeout) + # except TimeoutError: + # return {"error": "Transcription with Whisper exceeded the timeout."} def recognize_using_whisper_cpp(self,filename): command = self.whisper_cpp_path |