I am working the speech_to_text Flutter library on iOS and I am going through this difficulty whereby I’ve a easy webGL rendering utilizing an InAppWebview
. However the second I faucet on begin listening from the speech recognizer, I can see the webGL dice pauses for a second after which continues to spin. I believe the speech recognizer is being triggered on the UI thread inflicting the WebGL renderer to pause a bit. I attempted utilizing async await
and Futures
just like the code under. However does not appear to repair the problem. I haven’t got any points with Android. Is that this a Flutter interop bug or one thing else?
import 'dart:async';
import 'dart:math';
import 'bundle:flutter/materials.dart';
import 'bundle:speech_to_text/speech_recognition_error.dart';
import 'bundle:speech_to_text/speech_recognition_result.dart';
import 'bundle:speech_to_text/speech_to_text.dart';
import 'bundle:flutter_inappwebview/flutter_inappwebview.dart';
void important() => runApp(SpeechSampleApp());
class SpeechSampleApp extends StatefulWidget {
@override
_SpeechSampleAppState createState() => _SpeechSampleAppState();
}
/// An instance that demonstrates the essential performance of the
/// SpeechToText plugin for utilizing the speech recognition functionality
/// of the underlying platform.
class _SpeechSampleAppState extends State<SpeechSampleApp> {
bool _hasSpeech = false;
bool _logEvents = false;
bool _onDevice = false;
last TextEditingController _pauseForController =
TextEditingController(textual content: '3');
last TextEditingController _listenForController =
TextEditingController(textual content: '30');
double stage = 0.0;
double minSoundLevel = 50000;
double maxSoundLevel = -50000;
String lastWords="";
String lastError="";
String lastStatus="";
String _currentLocaleId = '';
Listing<LocaleName> _localeNames = [];
last SpeechToText speech = SpeechToText();
last GlobalKey webViewKey = GlobalKey();
InAppWebViewController? webViewController;
InAppWebViewGroupOptions choices = InAppWebViewGroupOptions(
crossPlatform: InAppWebViewOptions(
mediaPlaybackRequiresUserGesture: false,
javaScriptEnabled: true,
javaScriptCanOpenWindowsAutomatically: true,
),
android: AndroidInAppWebViewOptions(
useHybridComposition: true
),
ios: IOSInAppWebViewOptions(
allowsInlineMediaPlayback: true,
));
@override
void initState() {
tremendous.initState();
}
/// This initializes SpeechToText. That solely must be performed
/// as soon as per software, although calling it once more is innocent
/// it additionally does nothing. The UX of the pattern app ensures that
/// it may solely be referred to as as soon as.
Future<void> initSpeechState() async {
_logEvent('Initialize');
strive {
var hasSpeech = await speech.initialize(
onError: errorListener,
onStatus: statusListener,
debugLogging: _logEvents,
);
if (hasSpeech) {
// Get the listing of languages put in on the supporting platform in order that they
// will be displayed within the UI for choice by the person.
_localeNames = await speech.locales();
var systemLocale = await speech.systemLocale();
_currentLocaleId = systemLocale?.localeId ?? '';
}
if (!mounted) return;
setState(() {
_hasSpeech = hasSpeech;
});
} catch (e) {
setState(() {
lastError="Speech recognition failed: ${e.toString()}";
_hasSpeech = false;
});
}
}
@override
Widget construct(BuildContext context) {
return MaterialApp(
residence: Scaffold(
appBar: AppBar(
title: const Textual content('Speech to Textual content Instance'),
),
physique: Column(kids: [
HeaderWidget(),
Container(
child: Column(
children: <Widget>[
InitSpeechWidget(_hasSpeech, initSpeechState),
SpeechControlWidget(_hasSpeech, speech.isListening,
startListening, stopListening, cancelListening),
SessionOptionsWidget(
_currentLocaleId,
_switchLang,
_localeNames,
_logEvents,
_switchLogging,
_pauseForController,
_listenForController,
_onDevice,
_switchOnDevice,
),
],
),
),
Expanded(
flex: 1,
little one: RecognitionResultsWidget(lastWords: lastWords, stage: stage),
),
Expanded(
flex: 3,
little one: InAppWebView(
key: webViewKey,
initialUrlRequest: URLRequest(
url: Uri.parse('https://get.webgl.org/')),
initialOptions: choices,
onWebViewCreated: (controller) {
webViewController = controller;
},
androidOnPermissionRequest: (InAppWebViewController controller, String origin, Listing<String> assets) async {
return PermissionRequestResponse(assets: assets, motion: PermissionRequestResponseAction.GRANT);
},
onLoadStart: (controller, url) {
print("onLoadStart $url");
},
onLoadStop: (controller, url) async {
print("onLoadStop $url");
},
onConsoleMessage: (controller, consoleMessage) {
print("onConsoleMessage $consoleMessage");
},
),
),
SpeechStatusWidget(speech: speech),
]),
),
);
}
// That is referred to as every time the customers desires to begin a brand new speech
// recognition session
void startListening() {
_logEvent('begin listening');
lastWords="";
lastError="";
last pauseFor = int.tryParse(_pauseForController.textual content);
last listenFor = int.tryParse(_listenForController.textual content);
// Observe that `listenFor` is the utmost, not the minimun, on some
// techniques recognition can be stopped earlier than this worth is reached.
// Equally `pauseFor` is a most not a minimal and could also be ignored
// on some units.
speech.pay attention(
onResult: resultListener,
listenFor: Period(seconds: listenFor ?? 30),
pauseFor: Period(seconds: pauseFor ?? 3),
partialResults: true,
localeId: _currentLocaleId,
onSoundLevelChange: soundLevelListener,
cancelOnError: true,
listenMode: ListenMode.affirmation,
onDevice: _onDevice,
);
setState(() {});
}
void stopListening() {
_logEvent('cease');
speech.cease();
setState(() {
stage = 0.0;
});
}
void cancelListening() {
_logEvent('cancel');
speech.cancel();
setState(() {
stage = 0.0;
});
}
/// This callback is invoked every time new recognition outcomes are
/// out there after `pay attention` is named.
void resultListener(SpeechRecognitionResult end result) {
_logEvent(
'End result listener last: ${end result.finalResult}, phrases: ${end result.recognizedWords}');
setState(() {
lastWords="${end result.recognizedWords} - ${end result.finalResult}";
});
}
void soundLevelListener(double stage) {
minSoundLevel = min(minSoundLevel, stage);
maxSoundLevel = max(maxSoundLevel, stage);
// _logEvent('sound stage $stage: $minSoundLevel - $maxSoundLevel ');
setState(() {
this.stage = stage;
});
}
void errorListener(SpeechRecognitionError error) {
_logEvent(
'Obtained error standing: $error, listening: ${speech.isListening}');
setState(() {
lastError="${error.errorMsg} - ${error.everlasting}";
});
}
void statusListener(String standing) {
_logEvent(
'Obtained listener standing: $standing, listening: ${speech.isListening}');
setState(() {
lastStatus="$standing";
});
}
void _switchLang(selectedVal) {
setState(() {
_currentLocaleId = selectedVal;
});
print(selectedVal);
}
void _logEvent(String eventDescription) {
if (_logEvents) {
var eventTime = DateTime.now().toIso8601String();
print('$eventTime $eventDescription');
}
}
void _switchLogging(bool? val) {
setState(() {
_logEvents = val ?? false;
});
}
void _switchOnDevice(bool? val) {
setState(() {
_onDevice = val ?? false;
});
}
}
/// Shows essentially the most just lately acknowledged phrases and the sound stage.
class RecognitionResultsWidget extends StatelessWidget {
const RecognitionResultsWidget({
Key? key,
required this.lastWords,
required this.stage,
}) : tremendous(key: key);
last String lastWords;
last double stage;
@override
Widget construct(BuildContext context) {
return Column(
kids: <Widget>[
Center(
child: Text(
'Recognized Words',
style: TextStyle(fontSize: 22.0),
),
),
Expanded(
child: Stack(
children: <Widget>[
Container(
color: Theme.of(context).selectedRowColor,
child: Center(
child: Text(
lastWords,
textAlign: TextAlign.center,
),
),
),
Positioned.fill(
bottom: 10,
child: Align(
alignment: Alignment.bottomCenter,
child: Container(
width: 40,
height: 40,
alignment: Alignment.center,
decoration: BoxDecoration(
boxShadow: [
BoxShadow(
blurRadius: .26,
spreadRadius: level * 1.5,
color: Colors.black.withOpacity(.05))
],
shade: Colours.white,
borderRadius: BorderRadius.all(Radius.round(50)),
),
little one: IconButton(
icon: Icon(Icons.mic),
onPressed: () => null,
),
),
),
),
],
),
),
],
);
}
}
class HeaderWidget extends StatelessWidget {
const HeaderWidget({
Key? key,
}) : tremendous(key: key);
@override
Widget construct(BuildContext context) {
return Heart(
little one: Textual content(
'Speech recognition out there',
model: TextStyle(fontSize: 22.0),
),
);
}
}
/// Show the present error standing from the speech
/// recognizer
class ErrorWidget extends StatelessWidget {
const ErrorWidget({
Key? key,
required this.lastError,
}) : tremendous(key: key);
last String lastError;
@override
Widget construct(BuildContext context) {
return Column(
kids: <Widget>[
Center(
child: Text(
'Error Status',
style: TextStyle(fontSize: 22.0),
),
),
Center(
child: Text(lastError),
),
],
);
}
}
/// Controls to begin and cease speech recognition
class SpeechControlWidget extends StatelessWidget {
const SpeechControlWidget(this.hasSpeech, this.isListening,
this.startListening, this.stopListening, this.cancelListening,
{Key? key})
: tremendous(key: key);
last bool hasSpeech;
last bool isListening;
last void Operate() startListening;
last void Operate() stopListening;
last void Operate() cancelListening;
@override
Widget construct(BuildContext context) isListening ? null : startListening,
child: Text('Start'),
),
TextButton(
onPressed: isListening ? stopListening : null,
child: Text('Stop'),
),
TextButton(
onPressed: isListening ? cancelListening : null,
child: Text('Cancel'),
)
],
);
}
class SessionOptionsWidget extends StatelessWidget {
const SessionOptionsWidget(
this.currentLocaleId,
this.switchLang,
this.localeNames,
this.logEvents,
this.switchLogging,
this.pauseForController,
this.listenForController,
this.onDevice,
this.switchOnDevice,
{Key? key})
: tremendous(key: key);
last String currentLocaleId;
last void Operate(String?) switchLang;
last void Operate(bool?) switchLogging;
last void Operate(bool?) switchOnDevice;
last TextEditingController pauseForController;
last TextEditingController listenForController;
last Listing<LocaleName> localeNames;
last bool logEvents;
last bool onDevice;
@override
Widget construct(BuildContext context) {
return Padding(
padding: const EdgeInsets.all(8.0),
little one: Column(
mainAxisAlignment: MainAxisAlignment.spaceBetween,
kids: <Widget>[
Row(
children: [
Text('Language: '),
DropdownButton<String>(
onChanged: (selectedVal) => switchLang(selectedVal),
value: currentLocaleId,
items: localeNames
.map(
(localeName) => DropdownMenuItem(
value: localeName.localeId,
child: Text(localeName.name),
),
)
.toList(),
),
],
),
Row(
kids: [
Text('pauseFor: '),
Container(
padding: EdgeInsets.only(left: 8),
width: 80,
child: TextFormField(
controller: pauseForController,
)),
Container(
padding: EdgeInsets.only(left: 16),
child: Text('listenFor: ')),
Container(
padding: EdgeInsets.only(left: 8),
width: 80,
child: TextFormField(
controller: listenForController,
)),
],
),
Row(
kids: [
Text('On device: '),
Checkbox(
value: onDevice,
onChanged: switchOnDevice,
),
Text('Log events: '),
Checkbox(
value: logEvents,
onChanged: switchLogging,
),
],
),
],
),
);
}
}
class InitSpeechWidget extends StatelessWidget {
const InitSpeechWidget(this.hasSpeech, this.initSpeechState, {Key? key})
: tremendous(key: key);
last bool hasSpeech;
last Future<void> Operate() initSpeechState;
@override
Widget construct(BuildContext context) {
return Row(
mainAxisAlignment: MainAxisAlignment.spaceAround,
kids: <Widget>[
TextButton(
onPressed: hasSpeech ? null : initSpeechState,
child: Text('Initialize'),
),
],
);
}
}
/// Show the present standing of the listener
class SpeechStatusWidget extends StatelessWidget {
const SpeechStatusWidget({
Key? key,
required this.speech,
}) : tremendous(key: key);
last SpeechToText speech;
@override
Widget construct(BuildContext context) {
return Container(
padding: EdgeInsets.symmetric(vertical: 20),
shade: Theme.of(context).backgroundColor,
little one: Heart(
little one: speech.isListening
? Textual content(
"I am listening...",
model: TextStyle(fontWeight: FontWeight.daring),
)
: Textual content(
'Not listening',
model: TextStyle(fontWeight: FontWeight.daring),
),
),
);
}
}