diff options
Diffstat (limited to 'lib/data')
-rw-r--r-- | lib/data/data_providers/app_config_provider.dart | 51 | ||||
-rw-r--r-- | lib/data/data_providers/app_provider.dart | 17 | ||||
-rw-r--r-- | lib/data/data_providers/voice_agent_client.dart | 312 | ||||
-rw-r--r-- | lib/data/data_providers/voice_assistant_notifier.dart | 148 | ||||
-rw-r--r-- | lib/data/models/voice_assistant_state.dart | 104 |
5 files changed, 628 insertions, 4 deletions
diff --git a/lib/data/data_providers/app_config_provider.dart b/lib/data/data_providers/app_config_provider.dart index b82eb54..9e187a9 100644 --- a/lib/data/data_providers/app_config_provider.dart +++ b/lib/data/data_providers/app_config_provider.dart @@ -88,6 +88,21 @@ class MpdConfig { } } +class VoiceAgentConfig { + final String hostname; + final int port; + + static String defaultHostname = 'localhost'; + static int defaultPort = 51053; + + VoiceAgentConfig({required this.hostname,required this.port}); + + static VoiceAgentConfig defaultConfig() { + return VoiceAgentConfig( + hostname: VoiceAgentConfig.defaultHostname, port: VoiceAgentConfig.defaultPort); + } +} + class AppConfig { final bool disableBkgAnimation; final bool plainBackground; @@ -96,6 +111,7 @@ class AppConfig { final RadioConfig radioConfig; final StorageConfig storageConfig; final MpdConfig mpdConfig; + final VoiceAgentConfig voiceAgentConfig; static String configFilePath = '/etc/xdg/AGL/ics-homescreen.yaml'; @@ -106,7 +122,8 @@ class AppConfig { required this.kuksaConfig, required this.radioConfig, required this.storageConfig, - required this.mpdConfig}); + required this.mpdConfig, + required this.voiceAgentConfig}); static KuksaConfig parseKuksaConfig(YamlMap kuksaMap) { try { @@ -238,6 +255,25 @@ class AppConfig { return MpdConfig.defaultConfig(); } } + + static VoiceAgentConfig parseVoiceAgentConfig(YamlMap voiceAgentMap) { + try { + String hostname = VoiceAgentConfig.defaultHostname; + if (voiceAgentMap.containsKey('hostname')) { + hostname = voiceAgentMap['hostname']; + } + + int port = VoiceAgentConfig.defaultPort; + if (voiceAgentMap.containsKey('port')) { + port = voiceAgentMap['port']; + } + + return VoiceAgentConfig(hostname: hostname, port: port); + } catch (_) { + debugPrint("Invalid VoiceAgent configuration, using defaults"); + return VoiceAgentConfig.defaultConfig(); + } + } } final appConfigProvider = Provider((ref) { @@ -281,6 +317,13 @@ final appConfigProvider = Provider((ref) { mpdConfig = MpdConfig.defaultConfig(); } + VoiceAgentConfig voiceAgentConfig; + if(yamlMap.containsKey('voiceAgent')){ + voiceAgentConfig = AppConfig.parseVoiceAgentConfig(yamlMap['voiceAgent']); + } else { + voiceAgentConfig = VoiceAgentConfig.defaultConfig(); + } + bool disableBkgAnimation = disableBkgAnimationDefault; if (yamlMap.containsKey('disable-bg-animation')) { var value = yamlMap['disable-bg-animation']; @@ -312,7 +355,8 @@ final appConfigProvider = Provider((ref) { kuksaConfig: kuksaConfig, radioConfig: radioConfig, storageConfig: storageConfig, - mpdConfig: mpdConfig); + mpdConfig: mpdConfig, + voiceAgentConfig: voiceAgentConfig); } catch (_) { return AppConfig( disableBkgAnimation: false, @@ -321,6 +365,7 @@ final appConfigProvider = Provider((ref) { kuksaConfig: KuksaConfig.defaultConfig(), radioConfig: RadioConfig.defaultConfig(), storageConfig: StorageConfig.defaultConfig(), - mpdConfig: MpdConfig.defaultConfig()); + mpdConfig: MpdConfig.defaultConfig(), + voiceAgentConfig: VoiceAgentConfig.defaultConfig()); } }); diff --git a/lib/data/data_providers/app_provider.dart b/lib/data/data_providers/app_provider.dart index 0f7ed0c..64c0e47 100644 --- a/lib/data/data_providers/app_provider.dart +++ b/lib/data/data_providers/app_provider.dart @@ -16,10 +16,14 @@ import 'package:flutter_ics_homescreen/data/data_providers/radio_client.dart'; import 'package:flutter_ics_homescreen/data/data_providers/storage_client.dart'; import 'package:flutter_ics_homescreen/data/data_providers/mpd_client.dart'; import 'package:flutter_ics_homescreen/data/data_providers/play_controller.dart'; +import 'package:flutter_ics_homescreen/data/data_providers/voice_agent_client.dart'; +import 'package:flutter_ics_homescreen/data/data_providers/voice_assistant_notifier.dart'; import 'package:flutter_ics_homescreen/export.dart'; import 'package:flutter_ics_homescreen/data/models/users.dart'; +import '../models/voice_assistant_state.dart'; + enum AppState { home, dashboard, @@ -44,7 +48,9 @@ enum AppState { clock, date, time, - year + year, + voiceAssistant, + sttModel, } class AppStateNotifier extends Notifier<AppState> { @@ -73,6 +79,11 @@ final valClientProvider = Provider((ref) { return ValClient(config: config, ref: ref); }); +final voiceAgentClientProvider = Provider((ref){ + VoiceAgentConfig config = ref.watch(appConfigProvider).voiceAgentConfig; + return VoiceAgentClient(config: config, ref: ref); +}); + final appLauncherProvider = Provider((ref) { return AppLauncher(ref: ref); }); @@ -154,3 +165,7 @@ final currentTimeProvider = StateNotifierProvider<CurrentTimeNotifier, DateTime>((ref) { return CurrentTimeNotifier(); }); + + +final voiceAssistantStateProvider = + NotifierProvider<VoiceAssistantStateNotifier, VoiceAssistantState>(VoiceAssistantStateNotifier.new); diff --git a/lib/data/data_providers/voice_agent_client.dart b/lib/data/data_providers/voice_agent_client.dart new file mode 100644 index 0000000..295e138 --- /dev/null +++ b/lib/data/data_providers/voice_agent_client.dart @@ -0,0 +1,312 @@ +import 'dart:async'; +import 'package:flutter_ics_homescreen/data/models/voice_assistant_state.dart'; +import 'package:protos/val_api.dart'; + +import '../../export.dart'; + +class VoiceAgentClient { + final VoiceAgentConfig config; + late ClientChannel _channel; + late VoiceAgentServiceClient _client; + final Ref ref; + StreamSubscription<WakeWordStatus>? _wakeWordStatusSubscription; + + VoiceAgentClient({required this.config,required this.ref}) { + // Initialize the client channel without connecting immediately + String host = config.hostname; + int port = config.port; + _channel = ClientChannel( + host, + port: port, + options: const ChannelOptions( + credentials: ChannelCredentials.insecure(), + ), + ); + debugPrint("Connecting to Voice Assistant"); + _client = VoiceAgentServiceClient(_channel); + + } + + Future<ServiceStatus> checkServiceStatus() async { + final empty = Empty(); + try { + final response = await _client.checkServiceStatus(empty); + return response; + } catch (e) { + // Handle the error gracefully, such as returning an error status + return ServiceStatus()..status = false; + } + } + + Stream<WakeWordStatus> detectWakeWord() { + final empty = Empty(); + try { + return _client.detectWakeWord(empty); + } catch (e) { + // Handle the error gracefully, such as returning a default status + return const Stream.empty(); // An empty stream as a placeholder + } + } + + Future<RecognizeResult> recognizeVoiceCommand( + Stream<RecognizeVoiceControl> controlStream) async { + try { + final response = await _client.recognizeVoiceCommand(controlStream); + return response; + } catch (e) { + // Handle the error gracefully, such as returning a default RecognizeResult + return RecognizeResult()..status = RecognizeStatusType.REC_ERROR; + } + } + + Future<RecognizeResult> recognizeTextCommandGrpc( + RecognizeTextControl controlInput) async { + try { + final response = await _client.recognizeTextCommand(controlInput); + return response; + } catch (e) { + // Handle the error gracefully, such as returning a default RecognizeResult + return RecognizeResult()..status = RecognizeStatusType.REC_ERROR; + } + } + + Future<ExecuteResult> executeCommandGrpc(ExecuteInput input) async { + try { + final response = await _client.executeCommand(input); + return response; + } catch (e) { + // Handle the error gracefully, such as returning an error status + return ExecuteResult()..status = ExecuteStatusType.EXEC_ERROR; + } + } + + Future<void> shutdown() async { + // await _channel.shutdown(); + } + + // Grpc helper methods + Future<void> startWakeWordDetection() async { + // Capture the state before any async operations + _wakeWordStatusSubscription?.cancel(); + final isWakeWordModeActive = ref.read(voiceAssistantStateProvider.select((value) => value.isWakeWordMode)); + + if (isWakeWordModeActive) { + debugPrint("Wake Word Detection Started"); + } else { + debugPrint("Wake Word Detection Stopped"); + return; + } + _wakeWordStatusSubscription = detectWakeWord().listen( + (response) async { + if (response.status) { + await startVoiceAssistant(); + // Wait for 2-3 seconds and then restart wake word detection + await Future.delayed(const Duration(seconds: 2)); + startWakeWordDetection(); + } + if(!ref.read(voiceAssistantStateProvider.select((value) => value.isWakeWordMode))){ + _wakeWordStatusSubscription?.cancel(); + return; + } + }, + onError: (error) { + }, + cancelOnError: true, + ); + } + + Future<String> startRecording() async { + String streamId = ""; + try { + // Create a RecognizeControl message to start recording + final controlMessage = RecognizeVoiceControl() + ..action = RecordAction.START + ..recordMode = RecordMode + .MANUAL; // You can change this to your desired record mode + + // Create a Stream with the control message + final controlStream = Stream.fromIterable([controlMessage]); + + // Call the gRPC method to start recording + final response = + await recognizeVoiceCommand(controlStream); + + streamId = response.streamId; + } catch (e) { + } + return streamId; + } + + Future<RecognizeResult> stopRecording( + String streamId, String nluModel, String stt,bool isOnlineMode) async { + + try { + NLUModel model = NLUModel.RASA; + if (nluModel == "snips") { + model = NLUModel.SNIPS; + } + STTFramework sttFramework = STTFramework.VOSK; + if (stt == "whisper") { + sttFramework = STTFramework.WHISPER; + } + OnlineMode onlineMode = OnlineMode.OFFLINE; + if (isOnlineMode) { + onlineMode = OnlineMode.ONLINE; + } + // Create a RecognizeControl message to stop recording + final controlMessage = RecognizeVoiceControl() + ..action = RecordAction.STOP + ..nluModel = model + ..streamId = + streamId // Use the same stream ID as when starting recording + ..recordMode = RecordMode.MANUAL + ..sttFramework = sttFramework + ..onlineMode = onlineMode; + + + // Create a Stream with the control message + final controlStream = Stream.fromIterable([controlMessage]); + + // Call the gRPC method to stop recording + final response = + await recognizeVoiceCommand(controlStream); + + // Process and store the result + if (response.status == RecognizeStatusType.REC_SUCCESS) { + } else if (response.status == RecognizeStatusType.INTENT_NOT_RECOGNIZED) { + final command = response.command; + debugPrint("Command is : $command"); + } + else { + debugPrint('Failed to process your voice command. Please try again.'); + } + await shutdown(); + return response; + } catch (e) { + // addChatMessage(/**/'Failed to process your voice command. Please try again.'); + await shutdown(); + return RecognizeResult()..status = RecognizeStatusType.REC_ERROR; + } + // await voiceAgentClient.shutdown(); + } + + Future<RecognizeResult> recognizeTextCommand(String command, String nluModel) async { + debugPrint("Recognizing Text Command: $command"); + try { + NLUModel model = NLUModel.RASA; + if (nluModel == "snips") { + model = NLUModel.SNIPS; + } + // Create a RecognizeControl message to stop recording + final controlMessage = RecognizeTextControl() + ..textCommand = command + ..nluModel = model; + + // Call the gRPC method to stop recording + final response = + await recognizeTextCommandGrpc(controlMessage); + debugPrint("Response is : $response"); + + // Process and store the result + if (response.status == RecognizeStatusType.REC_SUCCESS) { + // Do nothing + } else if (response.status == RecognizeStatusType.INTENT_NOT_RECOGNIZED) { + final command = response.command; + debugPrint("Command is : $command"); + } else { + debugPrint('Failed to process your voice command. Please try again.'); + } + return response; + } catch (e) { + return RecognizeResult()..status = RecognizeStatusType.REC_ERROR; + } + } + + Future<void> executeCommand(RecognizeResult response) async { + try { + // Create an ExecuteInput message using the response from stopRecording + final executeInput = ExecuteInput() + ..intent = response.intent + ..intentSlots.addAll(response.intentSlots); + + // Call the gRPC method to execute the voice command + final execResponse = await executeCommandGrpc(executeInput); + + // Handle the response as needed + if (execResponse.status == ExecuteStatusType.EXEC_SUCCESS) { + final commandResponse = execResponse.response; + ref.read(voiceAssistantStateProvider.notifier).updateCommandResponse(commandResponse); + debugPrint("Command Response is : $commandResponse"); + } else if (execResponse.status == ExecuteStatusType.KUKSA_CONN_ERROR) { + final commandResponse = execResponse.response; + ref.read(voiceAssistantStateProvider.notifier).updateCommandResponse(commandResponse); + } else { + ref.read(voiceAssistantStateProvider.notifier).updateCommandResponse("Sorry, I couldn't execute your command. Please try again."); + } + } catch (e) { + } + await shutdown(); + } + + + Future<void> disableOverlay() async{ + await Future.delayed(Duration(seconds: 3)); + ref.read(voiceAssistantStateProvider.notifier).toggleShowOverlay(false); + } + + Future<void> startVoiceAssistant()async { + ref.read(voiceAssistantStateProvider.notifier).updateCommand(null); + ref.read(voiceAssistantStateProvider.notifier).updateCommandResponse(null); + + SttModel stt = ref.read(voiceAssistantStateProvider.select((value)=>value.sttModel)); + bool isOnlineMode = ref.read(voiceAssistantStateProvider.select((value)=>value.isOnlineMode)); + String nluModel = "snips"; + String sttModel = "whisper"; + if(stt == SttModel.vosk){ + sttModel = "vosk"; + } + bool isOverlayEnabled = ref.read(voiceAssistantStateProvider.select((value)=>value.voiceAssistantOverlay)); + bool overlayState = ref.read(voiceAssistantStateProvider.select((value)=>value.showOverLay)); + + String streamId = await startRecording(); + if (streamId.isNotEmpty) { + debugPrint('Recording started. Please speak your command.'); + if(isOverlayEnabled){ + if(!overlayState){ + ref.read(voiceAssistantStateProvider.notifier).toggleShowOverlay(true); + } + } + + ref.read(voiceAssistantStateProvider.notifier).updateButtonPressed(true); + ref.read(voiceAssistantStateProvider.notifier).updateIsRecording(); + ref.read(voiceAssistantStateProvider.notifier).updateIsCommandProcessing(false); + + // wait for the recording time + await Future.delayed(Duration(seconds: ref.watch(voiceAssistantStateProvider.select((value)=>value.recordingTime)))); + + ref.read(voiceAssistantStateProvider.notifier).updateIsRecording(); + ref.read(voiceAssistantStateProvider.notifier).updateIsCommandProcessing(true); + + // stop the recording and process the command + RecognizeResult recognizeResult = await stopRecording(streamId, nluModel, sttModel,isOnlineMode); + + ref.read(voiceAssistantStateProvider.notifier).updateCommand(recognizeResult.command); + debugPrint('Recording stopped. Processing the command...'); + + // Execute the command + await executeCommand(recognizeResult); + + ref.read(voiceAssistantStateProvider.notifier).updateIsCommandProcessing(false); + ref.read(voiceAssistantStateProvider.notifier).updateButtonPressed(false); + ref.read(voiceAssistantStateProvider.notifier).updateCommand(null); + ref.read(voiceAssistantStateProvider.notifier).updateCommandResponse(null); + disableOverlay(); + + } else { + debugPrint('Failed to start recording. Please try again.'); + } + + } + + +} diff --git a/lib/data/data_providers/voice_assistant_notifier.dart b/lib/data/data_providers/voice_assistant_notifier.dart new file mode 100644 index 0000000..0bc681a --- /dev/null +++ b/lib/data/data_providers/voice_assistant_notifier.dart @@ -0,0 +1,148 @@ +import 'package:protos/val_api.dart'; + +import '../../export.dart'; +import '../models/voice_assistant_state.dart'; + + +class VoiceAssistantStateNotifier extends Notifier<VoiceAssistantState>{ + @override + VoiceAssistantState build() { + return const VoiceAssistantState.initial(); + } + + void updateVoiceAssistantState(VoiceAssistantState newState){ + state = newState; + } + + void updateVoiceAssistantStateWith({ + bool? isWakeWordMode, + bool? isVoiceAssistantEnable, + bool? voiceAssistantOverlay, + bool? isOnlineMode, + bool? isOnlineModeAvailable, + String? wakeWord, + SttModel? sttModel, + String? streamId, + bool? isCommandProcessing, + String? commandProcessingText, + int? recordingTime, + bool? buttonPressed, + bool? isRecording, + String? command, + String? commandResponse, + bool? isWakeWordDetected, + bool? showOverLay, + }){ + state = state.copyWith( + isWakeWordMode: isWakeWordMode, + isVoiceAssistantEnable: isVoiceAssistantEnable, + voiceAssistantOverlay: voiceAssistantOverlay, + isOnlineMode: isOnlineMode, + isOnlineModeAvailable: isOnlineModeAvailable, + wakeWord: wakeWord, + sttModel: sttModel, + streamId: streamId, + isCommandProcessing: isCommandProcessing, + commandProcessingText: commandProcessingText, + recordingTime: recordingTime, + buttonPressed: buttonPressed, + isRecording: isRecording, + command: command, + commandResponse: commandResponse, + isWakeWordDetected: isWakeWordDetected, + showOverLay: showOverLay, + ); + } + + void resetToDefaults(){ + state = const VoiceAssistantState.initial(); + } + + void updateWakeWordDetected(bool isWakeWordDetected){ + state = state.copyWith(isWakeWordDetected: isWakeWordDetected); + } + + void toggleShowOverlay(bool value){ + state = state.copyWith(showOverLay: value); + } + + bool toggleWakeWordMode(){ + state = state.copyWith(isWakeWordMode: !state.isWakeWordMode); + return state.isWakeWordMode; + } + + Future<void> toggleVoiceAssistant(ServiceStatus status) async { + bool prevState = state.isVoiceAssistantEnable; + if(!prevState){ + if(status.status){ + state = state.copyWith(isVoiceAssistantEnable: !state.isVoiceAssistantEnable); + state = state.copyWith(wakeWord: status.wakeWord); + state = state.copyWith(isOnlineModeAvailable: status.onlineMode); + } + else{ + debugPrint("Failed to start the Voice Assistant"); + } + } + else{ + state = state.copyWith(isVoiceAssistantEnable: !state.isVoiceAssistantEnable); + if(state.isWakeWordMode){ + state = state.copyWith(isWakeWordMode: false); + } + } + } + + void toggleVoiceAssistantOverlay(){ + state = state.copyWith(voiceAssistantOverlay: !state.voiceAssistantOverlay); + } + + void toggleOnlineMode(){ + state = state.copyWith(isOnlineMode: !state.isOnlineMode); + } + + void updateWakeWord(String wakeWord){ + state = state.copyWith(wakeWord: wakeWord); + } + + void updateSttModel(SttModel sttModel){ + state = state.copyWith(sttModel: sttModel); + } + + void updateStreamId(String streamId){ + state = state.copyWith(streamId: streamId); + } + + void updateIsCommandProcessing(bool isCommandProcessing){ + state = state.copyWith(isCommandProcessing: isCommandProcessing); + } + + void updateCommandProcessingText(String commandProcessingText){ + state = state.copyWith(commandProcessingText: commandProcessingText); + } + + void updateRecordingTime(int recordingTime){ + state = state.copyWith(recordingTime: recordingTime); + } + + void updateIsRecording(){ + state = state.copyWith(isRecording: !state.isRecording); + } + + void updateCommand(String? command){ + state = state.copyWith(command: command); + } + + void updateCommandResponse(String? commandResponse){ + state = state.copyWith(commandResponse: commandResponse); + } + + + bool toggleButtonPressed(){ + bool prevState = state.buttonPressed; + state = state.copyWith(buttonPressed: !state.buttonPressed); + return !prevState; + } + + void updateButtonPressed(bool buttonPressed){ + state = state.copyWith(buttonPressed: buttonPressed); + } +}
\ No newline at end of file diff --git a/lib/data/models/voice_assistant_state.dart b/lib/data/models/voice_assistant_state.dart new file mode 100644 index 0000000..f898dd5 --- /dev/null +++ b/lib/data/models/voice_assistant_state.dart @@ -0,0 +1,104 @@ +enum SttModel { + whisper, + vosk +} + +class VoiceAssistantState{ + final bool isWakeWordMode; + final bool isVoiceAssistantEnable; + final bool voiceAssistantOverlay; + final bool isOnlineMode; + final bool isOnlineModeAvailable; + final String wakeWord; + final SttModel sttModel; + final String streamId; + final bool isCommandProcessing; + final String commandProcessingText; + final int recordingTime; + final bool buttonPressed; + final bool isRecording; + final String command; + final String commandResponse; + final bool isWakeWordDetected; + final bool showOverLay; + + + const VoiceAssistantState({ + required this.isWakeWordMode, + required this.isVoiceAssistantEnable, + required this.voiceAssistantOverlay, + required this.isOnlineMode, + required this.isOnlineModeAvailable, + required this.wakeWord, + required this.sttModel, + required this.streamId, + required this.isCommandProcessing, + required this.commandProcessingText, + required this.recordingTime, + required this.buttonPressed, + required this.isRecording, + required this.command, + required this.commandResponse, + required this.isWakeWordDetected, + required this.showOverLay, + }); + + const VoiceAssistantState.initial() + : wakeWord = "hello auto", + sttModel = SttModel.whisper, + streamId = "", + isWakeWordMode = false, + isVoiceAssistantEnable = false, + voiceAssistantOverlay = false, + isOnlineMode = false, + isOnlineModeAvailable = false, + isCommandProcessing = false, + commandProcessingText = "Processing...", + recordingTime = 4, + buttonPressed = false, + isRecording = false, + command = "", + commandResponse = "", + isWakeWordDetected = false, + showOverLay = false; + + VoiceAssistantState copyWith({ + bool? isWakeWordMode, + bool? isVoiceAssistantEnable, + bool? voiceAssistantOverlay, + bool? isOnlineMode, + bool? isOnlineModeAvailable, + String? wakeWord, + SttModel? sttModel, + String? streamId, + bool? isCommandProcessing, + String? commandProcessingText, + int? recordingTime, + bool? buttonPressed, + bool? isRecording, + String? command, + String? commandResponse, + bool? isWakeWordDetected, + bool? showOverLay, + }) { + return VoiceAssistantState( + isVoiceAssistantEnable : isVoiceAssistantEnable ?? this.isVoiceAssistantEnable, + isWakeWordMode : isWakeWordMode ?? this.isWakeWordMode, + voiceAssistantOverlay : voiceAssistantOverlay ?? this.voiceAssistantOverlay, + isOnlineMode : isOnlineMode ?? this.isOnlineMode, + isOnlineModeAvailable : isOnlineModeAvailable ?? this.isOnlineModeAvailable, + wakeWord : wakeWord ?? this.wakeWord, + sttModel : sttModel ?? this.sttModel, + streamId : streamId ?? this.streamId, + isCommandProcessing : isCommandProcessing ?? this.isCommandProcessing, + commandProcessingText : commandProcessingText ?? this.commandProcessingText, + recordingTime : recordingTime ?? this.recordingTime, + buttonPressed : buttonPressed ?? this.buttonPressed, + isRecording : isRecording ?? this.isRecording, + command : command ?? this.command, + commandResponse : commandResponse ?? this.commandResponse, + isWakeWordDetected: isWakeWordDetected ?? this.isWakeWordDetected, + showOverLay: showOverLay ?? this.showOverLay, + ); + } +}
\ No newline at end of file |