diff --git a/.dart_tool/package_config.json b/.dart_tool/package_config.json new file mode 100644 index 0000000..9565374 --- /dev/null +++ b/.dart_tool/package_config.json @@ -0,0 +1,56 @@ +{ + "configVersion": 2, + "packages": [ + { + "name": "characters", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/characters-1.1.0", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "collection", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/collection-1.15.0", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "flutter", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/packages/flutter", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "meta", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/meta-1.3.0", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "sky_engine", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/bin/cache/pkg/sky_engine", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "typed_data", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/typed_data-1.3.0", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "vector_math", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/vector_math-2.1.0", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "speech_recognition", + "rootUri": "../", + "packageUri": "lib/", + "languageVersion": "2.10" + } + ], + "generated": "2021-08-08T07:11:40.541237Z", + "generator": "pub", + "generatorVersion": "2.13.4" +} diff --git a/.dart_tool/package_config_subset b/.dart_tool/package_config_subset new file mode 100644 index 0000000..93a3b64 --- /dev/null +++ b/.dart_tool/package_config_subset @@ -0,0 +1,33 @@ +speech_recognition +2.10 +file:///Users/fumiyatanaka/Work/FlutterDev/speech_recognition/ +file:///Users/fumiyatanaka/Work/FlutterDev/speech_recognition/lib/ +characters +2.12 +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/characters-1.1.0/ +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/characters-1.1.0/lib/ +collection +2.12 +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/collection-1.15.0/ +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/collection-1.15.0/lib/ +meta +2.12 +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/meta-1.3.0/ +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/meta-1.3.0/lib/ +typed_data +2.12 +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/typed_data-1.3.0/ +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/typed_data-1.3.0/lib/ +vector_math +2.12 +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/vector_math-2.1.0/ +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/vector_math-2.1.0/lib/ +sky_engine +2.12 +file:///Users/fumiyatanaka/Work/flutter/bin/cache/pkg/sky_engine/ +file:///Users/fumiyatanaka/Work/flutter/bin/cache/pkg/sky_engine/lib/ +flutter +2.12 +file:///Users/fumiyatanaka/Work/flutter/packages/flutter/ +file:///Users/fumiyatanaka/Work/flutter/packages/flutter/lib/ +2 diff --git a/.dart_tool/version b/.dart_tool/version new file mode 100644 index 0000000..6b4d157 --- /dev/null +++ b/.dart_tool/version @@ -0,0 +1 @@ +2.2.3 \ No newline at end of file diff --git a/README.md b/README.md index 3068b05..5ade445 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ infos.plist, add : ## Limitation -On iOS, by default the plugin is configured for French, English, Russian, Spanish, Italian. +On iOS, by default the plugin is configured for French, English, Russian, Spanish, Italian and Japanese. On Android, without additional installations, it will probably works only with the default device locale. ## Troubleshooting diff --git a/example/.dart_tool/package_config.json b/example/.dart_tool/package_config.json new file mode 100644 index 0000000..f419186 --- /dev/null +++ b/example/.dart_tool/package_config.json @@ -0,0 +1,62 @@ +{ + "configVersion": 2, + "packages": [ + { + "name": "characters", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/characters-1.1.0", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "collection", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/collection-1.15.0", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "flutter", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/packages/flutter", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "meta", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/meta-1.3.0", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "sky_engine", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/bin/cache/pkg/sky_engine", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "speech_recognition", + "rootUri": "../../", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "typed_data", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/typed_data-1.3.0", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "vector_math", + "rootUri": "file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/vector_math-2.1.0", + "packageUri": "lib/", + "languageVersion": "2.12" + }, + { + "name": "speech_recognition_example", + "rootUri": "../", + "packageUri": "lib/", + "languageVersion": "2.12" + } + ], + "generated": "2021-08-08T07:15:12.083543Z", + "generator": "pub", + "generatorVersion": "2.13.4" +} diff --git a/example/.dart_tool/package_config_subset b/example/.dart_tool/package_config_subset new file mode 100644 index 0000000..3f281d6 --- /dev/null +++ b/example/.dart_tool/package_config_subset @@ -0,0 +1,37 @@ +speech_recognition +2.12 +file:///Users/fumiyatanaka/Work/FlutterDev/speech_recognition/ +file:///Users/fumiyatanaka/Work/FlutterDev/speech_recognition/lib/ +characters +2.12 +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/characters-1.1.0/ +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/characters-1.1.0/lib/ +collection +2.12 +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/collection-1.15.0/ +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/collection-1.15.0/lib/ +meta +2.12 +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/meta-1.3.0/ +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/meta-1.3.0/lib/ +typed_data +2.12 +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/typed_data-1.3.0/ +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/typed_data-1.3.0/lib/ +vector_math +2.12 +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/vector_math-2.1.0/ +file:///Users/fumiyatanaka/Work/flutter/.pub-cache/hosted/pub.dartlang.org/vector_math-2.1.0/lib/ +sky_engine +2.12 +file:///Users/fumiyatanaka/Work/flutter/bin/cache/pkg/sky_engine/ +file:///Users/fumiyatanaka/Work/flutter/bin/cache/pkg/sky_engine/lib/ +flutter +2.12 +file:///Users/fumiyatanaka/Work/flutter/packages/flutter/ +file:///Users/fumiyatanaka/Work/flutter/packages/flutter/lib/ +speech_recognition_example +2.12 +file:///Users/fumiyatanaka/Work/FlutterDev/speech_recognition/example/ +file:///Users/fumiyatanaka/Work/FlutterDev/speech_recognition/example/lib/ +2 diff --git a/example/.dart_tool/version b/example/.dart_tool/version new file mode 100644 index 0000000..6b4d157 --- /dev/null +++ b/example/.dart_tool/version @@ -0,0 +1 @@ +2.2.3 \ No newline at end of file diff --git a/example/.flutter-plugins-dependencies b/example/.flutter-plugins-dependencies new file mode 100644 index 0000000..ff29f8d --- /dev/null +++ b/example/.flutter-plugins-dependencies @@ -0,0 +1 @@ +{"info":"This is a generated file; do not edit or check into version control.","plugins":{"ios":[{"name":"speech_recognition","path":"/Users/fumiyatanaka/Work/FlutterDev/speech_recognition/","dependencies":[]}],"android":[{"name":"speech_recognition","path":"/Users/fumiyatanaka/Work/FlutterDev/speech_recognition/","dependencies":[]}],"macos":[],"linux":[],"windows":[],"web":[]},"dependencyGraph":[{"name":"speech_recognition","dependencies":[]}],"date_created":"2021-08-08 16:15:12.175867","version":"2.2.3"} \ No newline at end of file diff --git a/example/ios/Flutter/flutter_export_environment.sh b/example/ios/Flutter/flutter_export_environment.sh new file mode 100755 index 0000000..d69368e --- /dev/null +++ b/example/ios/Flutter/flutter_export_environment.sh @@ -0,0 +1,14 @@ +#!/bin/sh +# This is a generated file; do not edit or check into version control. +export "FLUTTER_ROOT=/Users/fumiyatanaka/Work/flutter" +export "FLUTTER_APPLICATION_PATH=/Users/fumiyatanaka/Work/FlutterDev/speech_recognition/example" +export "COCOAPODS_PARALLEL_CODE_SIGN=true" +export "FLUTTER_TARGET=lib/main.dart" +export "FLUTTER_BUILD_DIR=build" +export "SYMROOT=${SOURCE_ROOT}/../build/ios" +export "FLUTTER_BUILD_NAME=1.0.0" +export "FLUTTER_BUILD_NUMBER=1" +export "DART_OBFUSCATION=false" +export "TRACK_WIDGET_CREATION=false" +export "TREE_SHAKE_ICONS=false" +export "PACKAGE_CONFIG=.packages" diff --git a/example/lib/main.dart b/example/lib/main.dart index a5094aa..e9165f3 100644 --- a/example/lib/main.dart +++ b/example/lib/main.dart @@ -11,6 +11,7 @@ const languages = const [ const Language('Pусский', 'ru_RU'), const Language('Italiano', 'it_IT'), const Language('Español', 'es_ES'), + const Language('Japanese', 'ja_JP'), ]; class Language { @@ -26,7 +27,7 @@ class MyApp extends StatefulWidget { } class _MyAppState extends State { - SpeechRecognition _speech; + late SpeechRecognition _speech; bool _speechRecognitionAvailable = false; bool _isListening = false; @@ -50,7 +51,7 @@ class _MyAppState extends State { _speech.setCurrentLocaleHandler(onCurrentLocale); _speech.setRecognitionStartedHandler(onRecognitionStarted); _speech.setRecognitionResultHandler(onRecognitionResult); - _speech.setRecognitionCompleteHandler(onRecognitionComplete); + _speech.setRecognitionCompleteHandler((_) => onRecognitionComplete); _speech.setErrorHandler(errorHandler); _speech .activate() @@ -117,14 +118,16 @@ class _MyAppState extends State { setState(() => selectedLang = lang); } - Widget _buildButton({String label, VoidCallback onPressed}) => new Padding( + Widget _buildButton({required String label, VoidCallback? onPressed}) => new Padding( padding: new EdgeInsets.all(12.0), - child: new RaisedButton( - color: Colors.cyan.shade600, + child: ElevatedButton( onPressed: onPressed, - child: new Text( - label, - style: const TextStyle(color: Colors.white), + child: Container( + color: Colors.cyan.shade600, + child: new Text( + label, + style: const TextStyle(color: Colors.white), + ), ), )); diff --git a/example/pubspec.yaml b/example/pubspec.yaml index 674b76f..317c25b 100644 --- a/example/pubspec.yaml +++ b/example/pubspec.yaml @@ -1,6 +1,7 @@ name: speech_recognition_example description: Demonstrates how to use the speech_recognition plugin. - +environment: + sdk: '>=2.12.0 <3.0.0' dependencies: flutter: sdk: flutter diff --git a/ios/Classes/SwiftSpeechRecognitionPlugin.swift b/ios/Classes/SwiftSpeechRecognitionPlugin.swift index 8c8f378..78af8ac 100644 --- a/ios/Classes/SwiftSpeechRecognitionPlugin.swift +++ b/ios/Classes/SwiftSpeechRecognitionPlugin.swift @@ -4,184 +4,188 @@ import Speech @available(iOS 10.0, *) public class SwiftSpeechRecognitionPlugin: NSObject, FlutterPlugin, SFSpeechRecognizerDelegate { - public static func register(with registrar: FlutterPluginRegistrar) { - let channel = FlutterMethodChannel(name: "speech_recognition", binaryMessenger: registrar.messenger()) - let instance = SwiftSpeechRecognitionPlugin(channel: channel) - registrar.addMethodCallDelegate(instance, channel: channel) - } - - private let speechRecognizerFr = SFSpeechRecognizer(locale: Locale(identifier: "fr_FR"))! - private let speechRecognizerEn = SFSpeechRecognizer(locale: Locale(identifier: "en_US"))! - private let speechRecognizerRu = SFSpeechRecognizer(locale: Locale(identifier: "ru_RU"))! - private let speechRecognizerIt = SFSpeechRecognizer(locale: Locale(identifier: "it_IT"))! - private let speechRecognizerEs = SFSpeechRecognizer(locale: Locale(identifier: "es_ES"))! - - private var speechChannel: FlutterMethodChannel? - - private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest? - - private var recognitionTask: SFSpeechRecognitionTask? - - private let audioEngine = AVAudioEngine() - - init(channel:FlutterMethodChannel){ - speechChannel = channel - super.init() - } - - public func handle(_ call: FlutterMethodCall, result: @escaping FlutterResult) { - //result("iOS " + UIDevice.current.systemVersion) - switch (call.method) { - case "speech.activate": - self.activateRecognition(result: result) - case "speech.listen": - self.startRecognition(lang: call.arguments as! String, result: result) - case "speech.cancel": - self.cancelRecognition(result: result) - case "speech.stop": - self.stopRecognition(result: result) - default: - result(FlutterMethodNotImplemented) + public static func register(with registrar: FlutterPluginRegistrar) { + let channel = FlutterMethodChannel(name: "speech_recognition", binaryMessenger: registrar.messenger()) + let instance = SwiftSpeechRecognitionPlugin(channel: channel) + registrar.addMethodCallDelegate(instance, channel: channel) } - } - - private func activateRecognition(result: @escaping FlutterResult) { - speechRecognizerFr.delegate = self - speechRecognizerEn.delegate = self - speechRecognizerRu.delegate = self - speechRecognizerIt.delegate = self - speechRecognizerEs.delegate = self - - SFSpeechRecognizer.requestAuthorization { authStatus in - OperationQueue.main.addOperation { - switch authStatus { - case .authorized: - result(true) - self.speechChannel?.invokeMethod("speech.onCurrentLocale", arguments: "\(Locale.current.identifier)") - - case .denied: - result(false) - - case .restricted: - result(false) - - case .notDetermined: - result(false) + + private let speechRecognizerFr = SFSpeechRecognizer(locale: Locale(identifier: "fr_FR"))! + private let speechRecognizerEn = SFSpeechRecognizer(locale: Locale(identifier: "en_US"))! + private let speechRecognizerRu = SFSpeechRecognizer(locale: Locale(identifier: "ru_RU"))! + private let speechRecognizerIt = SFSpeechRecognizer(locale: Locale(identifier: "it_IT"))! + private let speechRecognizerEs = SFSpeechRecognizer(locale: Locale(identifier: "es_ES"))! + private let speechRecognizerJa = SFSpeechRecognizer(locale: Locale(identifier: "ja_JP"))! + + + private var speechChannel: FlutterMethodChannel? + + private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest? + + private var recognitionTask: SFSpeechRecognitionTask? + + private let audioEngine = AVAudioEngine() + + init(channel:FlutterMethodChannel){ + speechChannel = channel + super.init() + } + + public func handle(_ call: FlutterMethodCall, result: @escaping FlutterResult) { + //result("iOS " + UIDevice.current.systemVersion) + switch (call.method) { + case "speech.activate": + self.activateRecognition(result: result) + case "speech.listen": + self.startRecognition(lang: call.arguments as! String, result: result) + case "speech.cancel": + self.cancelRecognition(result: result) + case "speech.stop": + self.stopRecognition(result: result) + default: + result(FlutterMethodNotImplemented) } - print("SFSpeechRecognizer.requestAuthorization \(authStatus.rawValue)") - } } - } - - private func startRecognition(lang: String, result: FlutterResult) { - print("startRecognition...") - if audioEngine.isRunning { - audioEngine.stop() - recognitionRequest?.endAudio() - result(false) - } else { - try! start(lang: lang) - result(true) + + private func activateRecognition(result: @escaping FlutterResult) { + speechRecognizerFr.delegate = self + speechRecognizerEn.delegate = self + speechRecognizerRu.delegate = self + speechRecognizerIt.delegate = self + speechRecognizerEs.delegate = self + + SFSpeechRecognizer.requestAuthorization { authStatus in + OperationQueue.main.addOperation { + switch authStatus { + case .authorized: + result(true) + self.speechChannel?.invokeMethod("speech.onCurrentLocale", arguments: "\(Locale.current.identifier)") + + case .denied: + result(false) + + case .restricted: + result(false) + + case .notDetermined: + result(false) + } + print("SFSpeechRecognizer.requestAuthorization \(authStatus.rawValue)") + } + } } - } - - private func cancelRecognition(result: FlutterResult?) { - if let recognitionTask = recognitionTask { - recognitionTask.cancel() - self.recognitionTask = nil - if let r = result { - r(false) - } + + private func startRecognition(lang: String, result: FlutterResult) { + print("startRecognition...") + if audioEngine.isRunning { + audioEngine.stop() + recognitionRequest?.endAudio() + result(false) + } else { + try! start(lang: lang) + result(true) + } } - } - - private func stopRecognition(result: FlutterResult) { - if audioEngine.isRunning { - audioEngine.stop() - recognitionRequest?.endAudio() + + private func cancelRecognition(result: FlutterResult?) { + if let recognitionTask = recognitionTask { + recognitionTask.cancel() + self.recognitionTask = nil + if let r = result { + r(false) + } + } } - result(false) - } - - private func start(lang: String) throws { - - cancelRecognition(result: nil) - - let audioSession = AVAudioSession.sharedInstance() - try audioSession.setCategory(AVAudioSession.Category.record, mode: .default) - try audioSession.setMode(AVAudioSession.Mode.measurement) - try audioSession.setActive(true, options: .notifyOthersOnDeactivation) - - recognitionRequest = SFSpeechAudioBufferRecognitionRequest() - - let inputNode = audioEngine.inputNode - guard let recognitionRequest = recognitionRequest else { - fatalError("Unable to created a SFSpeechAudioBufferRecognitionRequest object") + private func stopRecognition(result: FlutterResult) { + if audioEngine.isRunning { + audioEngine.stop() + recognitionRequest?.endAudio() + } + result(false) } - - recognitionRequest.shouldReportPartialResults = true - - let speechRecognizer = getRecognizer(lang: lang) - - recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in - var isFinal = false - - if let result = result { - print("Speech : \(result.bestTranscription.formattedString)") - self.speechChannel?.invokeMethod("speech.onSpeech", arguments: result.bestTranscription.formattedString) - isFinal = result.isFinal - if isFinal { - self.speechChannel!.invokeMethod( - "speech.onRecognitionComplete", - arguments: result.bestTranscription.formattedString - ) + + private func start(lang: String) throws { + + cancelRecognition(result: nil) + + let audioSession = AVAudioSession.sharedInstance() + try audioSession.setCategory(AVAudioSession.Category.record, mode: .default) + try audioSession.setMode(AVAudioSession.Mode.measurement) + try audioSession.setActive(true, options: .notifyOthersOnDeactivation) + + recognitionRequest = SFSpeechAudioBufferRecognitionRequest() + + let inputNode = audioEngine.inputNode + + guard let recognitionRequest = recognitionRequest else { + fatalError("Unable to created a SFSpeechAudioBufferRecognitionRequest object") } - } - - if error != nil || isFinal { - self.audioEngine.stop() - inputNode.removeTap(onBus: 0) - self.recognitionRequest = nil - self.recognitionTask = nil - } + + recognitionRequest.shouldReportPartialResults = true + + let speechRecognizer = getRecognizer(lang: lang) + + recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in + var isFinal = false + + if let result = result { + print("Speech : \(result.bestTranscription.formattedString)") + self.speechChannel?.invokeMethod("speech.onSpeech", arguments: result.bestTranscription.formattedString) + isFinal = result.isFinal + if isFinal { + self.speechChannel!.invokeMethod( + "speech.onRecognitionComplete", + arguments: result.bestTranscription.formattedString + ) + } + } + + if error != nil || isFinal { + self.audioEngine.stop() + inputNode.removeTap(onBus: 0) + self.recognitionRequest = nil + self.recognitionTask = nil + } + } + + let recognitionFormat = inputNode.outputFormat(forBus: 0) + inputNode.installTap(onBus: 0, bufferSize: 1024, format: recognitionFormat) { + (buffer: AVAudioPCMBuffer, when: AVAudioTime) in + self.recognitionRequest?.append(buffer) + } + + audioEngine.prepare() + try audioEngine.start() + + speechChannel!.invokeMethod("speech.onRecognitionStarted", arguments: nil) } - - let recognitionFormat = inputNode.outputFormat(forBus: 0) - inputNode.installTap(onBus: 0, bufferSize: 1024, format: recognitionFormat) { - (buffer: AVAudioPCMBuffer, when: AVAudioTime) in - self.recognitionRequest?.append(buffer) + + private func getRecognizer(lang: String) -> Speech.SFSpeechRecognizer { + switch (lang) { + case "fr_FR": + return speechRecognizerFr + case "en_US": + return speechRecognizerEn + case "ru_RU": + return speechRecognizerRu + case "it_IT": + return speechRecognizerIt + case "es_ES": + return speechRecognizerEs + case "ja_JP": + return speechRecognizerJa + default: + return speechRecognizerFr + } } - - audioEngine.prepare() - try audioEngine.start() - - speechChannel!.invokeMethod("speech.onRecognitionStarted", arguments: nil) - } - - private func getRecognizer(lang: String) -> Speech.SFSpeechRecognizer { - switch (lang) { - case "fr_FR": - return speechRecognizerFr - case "en_US": - return speechRecognizerEn - case "ru_RU": - return speechRecognizerRu - case "it_IT": - return speechRecognizerIt - case "es_ES": - return speechRecognizerEs - default: - return speechRecognizerFr + + public func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) { + speechChannel?.invokeMethod("speech.onSpeechAvailability", arguments: available) } - } - - public func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) { - speechChannel?.invokeMethod("speech.onSpeechAvailability", arguments: available) - } } // Helper function inserted by Swift 4.2 migrator. fileprivate func convertFromAVAudioSessionCategory(_ input: AVAudioSession.Category) -> String { - return input.rawValue + return input.rawValue } diff --git a/pubspec.yaml b/pubspec.yaml index 733f1de..73c0c9c 100644 --- a/pubspec.yaml +++ b/pubspec.yaml @@ -4,7 +4,7 @@ version: 0.3.0+1 author: Erick Ghaumez homepage: https://github.com/rxlabz/speech_recognition environment: - sdk: '>=2.0.0 <3.0.0' + sdk: '>=2.12.0 <3.0.0' flutter: plugin: androidPackage: bz.rxla.flutter.speechrecognition