summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTuomas Tuononen <tuomas.tuononen@code-q.fi>2015-09-16 18:17:57 +0300
committerTuomas Tuononen <tuomas.tuononen@code-q.fi>2015-10-14 07:18:04 +0000
commitc780cd0a1d750b2f186acea99f254fcb2cc0e1b5 (patch)
tree8cd7ecd86b2388c671825544e849c096a1b51fc6
parent6ab8c10340e3a588c4a9ca5e6fa1e1651fef8d34 (diff)
SpeechRecognition: Use pre-defined string constants
- In engine parameter keys - In error signal parameter keys - In result signal parameter keys Change-Id: I49ab6400655219025c2fa5b1c2f92a30bf1125d4 Reviewed-by: Johannes Oikarinen <johannes.oikarinen@digia.com> Reviewed-by: Frederik Gladhorn <frederik.gladhorn@theqtcompany.com>
-rw-r--r--examples/speech/qmlspeech/main.qml26
-rwxr-xr-xsrc/asr/qspeechrecognition.cpp69
-rw-r--r--src/asr/qspeechrecognition.h11
-rwxr-xr-xsrc/asr/qspeechrecognitionengine.cpp100
-rw-r--r--src/asr/qspeechrecognitionengine.h11
-rwxr-xr-xsrc/asr/qspeechrecognitionmanager.cpp28
-rwxr-xr-xsrc/asr/qspeechrecognitionpluginengine.cpp67
-rw-r--r--src/plugins/asr/pocketsphinx/doc/src/pocketsphinx.qdoc18
-rwxr-xr-xsrc/plugins/asr/pocketsphinx/qspeechrecognitionengine_pocketsphinx.cpp48
9 files changed, 273 insertions, 105 deletions
diff --git a/examples/speech/qmlspeech/main.qml b/examples/speech/qmlspeech/main.qml
index 2bf54b0..08b200a 100644
--- a/examples/speech/qmlspeech/main.qml
+++ b/examples/speech/qmlspeech/main.qml
@@ -53,10 +53,10 @@ ApplicationWindow {
property url mainGrammarFile: "qrc:grammar/main"
property url yesNoGrammarFile: "qrc:grammar/yesno"
property var engine: createEngine("local", "pocketsphinx",
- { "locale" : "en_US",
- "resourceDirectory" : resourceDir,
- //"debugAudioDirectory" : "/tmp",
- "dictionary" : dictionaryFile })
+ { "Locale" : "en_US",
+ "ResourceDirectory" : resourceDir,
+ //"DebugAudioDirectory" : "/tmp",
+ "Dictionary" : dictionaryFile })
property var mainGrammar: createGrammar(engine, "main", mainGrammarFile)
property var yesNoGrammar: createGrammar(engine, "yesno", yesNoGrammarFile)
@@ -64,13 +64,13 @@ ApplicationWindow {
}
onResult: {
- mainForm.resultText = grammarName + ": " + resultData["transcription"]
+ mainForm.resultText = grammarName + ": " + resultData["Transcription"]
mainForm.statusText = "Ready"
}
onError: {
var errorText = "Error " + errorCode
- if (parameters["reason"] !== undefined) {
- errorText += ": " + parameters["reason"]
+ if (parameters["Reason"] !== undefined) {
+ errorText += ": " + parameters["Reason"]
}
mainForm.resultText = ""
mainForm.statusText = errorText
@@ -86,7 +86,7 @@ ApplicationWindow {
mainForm.statusText = "Ready"
}
onAttributeUpdated: {
- if (key == "audioLevel")
+ if (key == "AudioLevel")
mainForm.audioLevel = value
}
onStateChanged: {
@@ -104,14 +104,14 @@ ApplicationWindow {
var supportedParameters = speech.engine.supportedParameters()
console.log("Supported engine parameters: " + supportedParameters)
// Switch to "pulse" audio device if available
- if (supportedParameters.indexOf("audioInputDevices") !== -1) {
- var inputDevices = speech.engine.parameter("audioInputDevices")
+ if (supportedParameters.indexOf("AudioInputDevices") !== -1) {
+ var inputDevices = speech.engine.parameter("AudioInputDevices")
if (inputDevices.indexOf("pulse") !== -1)
- speech.engine.setParameter("audioInputDevice", "pulse")
+ speech.engine.setParameter("AudioInputDevice", "pulse")
}
// Example: recognize audio clip instead of live audio:
- //if (supportedParameters.indexOf("audioInputFile") !== -1) {
- // speech.engine.setParameter("audioInputFile", homeDir + "/asr_input_1.wav")
+ //if (supportedParameters.indexOf("AudioInputFile") !== -1) {
+ // speech.engine.setParameter("AudioInputFile", homeDir + "/asr_input_1.wav")
//}
}
}
diff --git a/src/asr/qspeechrecognition.cpp b/src/asr/qspeechrecognition.cpp
index 3cc19ee..fee094e 100755
--- a/src/asr/qspeechrecognition.cpp
+++ b/src/asr/qspeechrecognition.cpp
@@ -46,6 +46,67 @@ QT_BEGIN_NAMESPACE
Q_LOGGING_CATEGORY(lcSpeechAsr, "qt.speech.asr")
+/*
+ When these conditions are satisfied, QStringLiteral is implemented by
+ gcc's statement-expression extension. However, in this file it will
+ not work, because "statement-expressions are not allowed outside functions
+ nor in template-argument lists".
+ MSVC 2012 produces an internal compiler error on encountering
+ QStringLiteral in this context.
+
+ Fall back to the less-performant QLatin1String in this case.
+*/
+#if defined(Q_CC_GNU) && defined(Q_COMPILER_LAMBDA)
+# define Q_DEFINE_ASR_ATTRIBUTE(key) const QString QSpeechRecognition::key(QStringLiteral(#key))
+# define Q_DEFINE_ASR_ERROR_PARAMETER(key) const QString QSpeechRecognition::key(QStringLiteral(#key))
+# define Q_DEFINE_ASR_RESULT_PARAMETER(key) const QString QSpeechRecognition::key(QStringLiteral(#key))
+#else
+# define Q_DEFINE_ASR_ATTRIBUTE(key) const QString QSpeechRecognition::key(QLatin1String(#key))
+# define Q_DEFINE_ASR_ERROR_PARAMETER(key) const QString QSpeechRecognition::key(QLatin1String(#key))
+# define Q_DEFINE_ASR_RESULT_PARAMETER(key) const QString QSpeechRecognition::key(QLatin1String(#key))
+#endif
+
+// Run-time attributes:
+
+/*! \variable QSpeechRecognition::AudioLevel
+
+ This constant is used as the key for a speech recognition run-time attribute.
+ See attributeUpdated().
+*/
+Q_DEFINE_ASR_ATTRIBUTE(AudioLevel);
+
+// Error parameters:
+
+/*! \variable QSpeechRecognition::Reason
+
+ This constant is used as the key for a speech recognition error parameter.
+ See error().
+*/
+Q_DEFINE_ASR_ERROR_PARAMETER(Reason);
+
+/*! \variable QSpeechRecognition::Engine
+
+ This constant is used as the key for a speech recognition error parameter.
+ See error().
+*/
+Q_DEFINE_ASR_ERROR_PARAMETER(Engine);
+
+/*! \variable QSpeechRecognition::Grammar
+
+ This constant is used as the key for a speech recognition error parameter.
+ See error().
+*/
+Q_DEFINE_ASR_ERROR_PARAMETER(Grammar);
+
+// Result parameters:
+
+/*! \variable QSpeechRecognition::Transcription
+
+ This constant is used as the key for a speech recognition result parameter.
+ See result().
+*/
+Q_DEFINE_ASR_RESULT_PARAMETER(Transcription);
+
/*!
\class QSpeechRecognition
\inmodule QtSpeech
@@ -157,7 +218,7 @@ Q_LOGGING_CATEGORY(lcSpeechAsr, "qt.speech.asr")
\li Error codes that use the parameter
\li Description
\row
- \li reason
+ \li \l Reason
\li QString
\li any
\li Human-readable description of the error
@@ -180,7 +241,7 @@ Q_LOGGING_CATEGORY(lcSpeechAsr, "qt.speech.asr")
\li Value type
\li Description
\row
- \li transcription
+ \li \l Transcription
\li QString
\li Transcription of what was recognized
\endtable
@@ -229,7 +290,7 @@ Q_LOGGING_CATEGORY(lcSpeechAsr, "qt.speech.asr")
\li Value type
\li Description
\row
- \li audioLevel
+ \li \l AudioLevel
\li qreal
\li Audio level between 0.0 and 1.0. Only updated when listening for commands.
\endtable
@@ -694,7 +755,7 @@ void QSpeechRecognitionPrivate::onError(int session, QSpeechRecognition::Error e
}
break;
case QSpeechRecognition::GrammarInitError:
- grammar = m_grammars.value(parameters.value(QLatin1String("grammar")).toString(), 0);
+ grammar = m_grammars.value(parameters.value(QSpeechRecognition::Grammar).toString(), 0);
if (grammar) {
grammar->setState(QSpeechRecognitionGrammar::ErrorState);
emit q->error(errorCode, parameters);
diff --git a/src/asr/qspeechrecognition.h b/src/asr/qspeechrecognition.h
index 0d5c639..14ba61c 100644
--- a/src/asr/qspeechrecognition.h
+++ b/src/asr/qspeechrecognition.h
@@ -100,6 +100,17 @@ public:
Q_INVOKABLE void reset();
Q_INVOKABLE void dispatchMessage(const QString &message, const QVariantMap &parameters = QVariantMap());
+ // Common attribute keys:
+ static const QString AudioLevel;
+
+ // Common error parameter keys:
+ static const QString Reason;
+ static const QString Engine;
+ static const QString Grammar;
+
+ // Common result parameter keys:
+ static const QString Transcription;
+
Q_SIGNALS:
void stateChanged();
void muteChanged();
diff --git a/src/asr/qspeechrecognitionengine.cpp b/src/asr/qspeechrecognitionengine.cpp
index 5312bc4..b301d83 100755
--- a/src/asr/qspeechrecognitionengine.cpp
+++ b/src/asr/qspeechrecognitionengine.cpp
@@ -40,6 +40,85 @@
QT_BEGIN_NAMESPACE
+/*
+ When these conditions are satisfied, QStringLiteral is implemented by
+ gcc's statement-expression extension. However, in this file it will
+ not work, because "statement-expressions are not allowed outside functions
+ nor in template-argument lists".
+ MSVC 2012 produces an internal compiler error on encountering
+ QStringLiteral in this context.
+
+ Fall back to the less-performant QLatin1String in this case.
+*/
+#if defined(Q_CC_GNU) && defined(Q_COMPILER_LAMBDA)
+# define Q_DEFINE_ASR_ENGINE_PARAMETER(key) const QString QSpeechRecognitionEngine::key(QStringLiteral(#key))
+#else
+# define Q_DEFINE_ASR_ENGINE_PARAMETER(key) const QString QSpeechRecognitionEngine::key(QLatin1String(#key))
+#endif
+
+/*! \variable QSpeechRecognitionEngine::Locale
+
+ This constant is used as the key for a speech recognition engine parameter.
+ See supportedParameters().
+*/
+Q_DEFINE_ASR_ENGINE_PARAMETER(Locale);
+
+/*! \variable QSpeechRecognitionEngine::Dictionary
+
+ This constant is used as the key for a speech recognition engine parameter.
+ See supportedParameters().
+*/
+Q_DEFINE_ASR_ENGINE_PARAMETER(Dictionary);
+
+/*! \variable QSpeechRecognitionEngine::ResourceDirectory
+
+ This constant is used as the key for a speech recognition engine parameter.
+ See supportedParameters().
+*/
+Q_DEFINE_ASR_ENGINE_PARAMETER(ResourceDirectory);
+
+/*! \variable QSpeechRecognitionEngine::DataDirectory
+
+ This constant is used as the key for a speech recognition engine parameter.
+ See supportedParameters().
+*/
+Q_DEFINE_ASR_ENGINE_PARAMETER(DataDirectory);
+
+/*! \variable QSpeechRecognitionEngine::DebugAudioDirectory
+
+ This constant is used as the key for a speech recognition engine parameter.
+ See supportedParameters().
+*/
+Q_DEFINE_ASR_ENGINE_PARAMETER(DebugAudioDirectory);
+
+/*! \variable QSpeechRecognitionEngine::AudioSampleRate
+
+ This constant is used as the key for a speech recognition engine parameter.
+ See supportedParameters().
+*/
+Q_DEFINE_ASR_ENGINE_PARAMETER(AudioSampleRate);
+
+/*! \variable QSpeechRecognitionEngine::AudioInputFile
+
+ This constant is used as the key for a speech recognition engine parameter.
+ See supportedParameters().
+*/
+Q_DEFINE_ASR_ENGINE_PARAMETER(AudioInputFile);
+
+/*! \variable QSpeechRecognitionEngine::AudioInputDevice
+
+ This constant is used as the key for a speech recognition engine parameter.
+ See supportedParameters().
+*/
+Q_DEFINE_ASR_ENGINE_PARAMETER(AudioInputDevice);
+
+/*! \variable QSpeechRecognitionEngine::AudioInputDevices
+
+ This constant is used as the key for a speech recognition engine parameter.
+ See supportedParameters().
+*/
+Q_DEFINE_ASR_ENGINE_PARAMETER(AudioInputDevices);
+
/*!
\class QSpeechRecognitionEngine
\inmodule QtSpeech
@@ -96,11 +175,11 @@ QT_BEGIN_NAMESPACE
\li Value type
\li Description
\row
- \li locale
+ \li \l Locale
\li QLocale
\li The locale for speech recognition
\row
- \li dictionary
+ \li \l Dictionary
\li QUrl
\li Location of the speech recognition dictionary (default lexicon).
If the URL contains a relative file path, the dictionary is loaded
@@ -109,7 +188,7 @@ QT_BEGIN_NAMESPACE
The format of the dictionary is engine-specific; some engines or grammars may not need
a dictionary at all.
\row
- \li resourceDirectory
+ \li \l ResourceDirectory
\li QString
\li Path to the directory where engine-specific resource files are located.
If not given, the program's working directory is used.
@@ -117,23 +196,23 @@ QT_BEGIN_NAMESPACE
under this root directory, in which case BCP 47 names should be used for
the sub-directories.
\row
- \li dataDirectory
+ \li \l DataDirectory
\li QString
\li Path to a persistent directory where any engine-specific data
can be stored between application restarts. If not given, the program's
working directory is used.
\row
- \li debugAudioDirectory
+ \li \l DebugAudioDirectory
\li QString
\li Path to a directory where the engine should write all the audio clips that go
to the recognizer. If not given (or empty), no audio clips will be produced.
This feature is meant to be used only for debugging purposes.
\row
- \li audioSampleRate
+ \li \l AudioSampleRate
\li int
\li Samples per second in the input audio. Default: 16000.
\row
- \li audioInputFile
+ \li \l AudioInputFile
\li QString
\li Path to an audio file that should be read instead of an audio input device.
The given file will be read once for each recognition session.
@@ -141,17 +220,20 @@ QT_BEGIN_NAMESPACE
until mute is released. The recognition session will be automatically stopped
when the entire file has been read.
\row
- \li audioInputDevices
+ \li \l AudioInputDevices
\li QStringList
\li Names of the supported audio input devices (read-only).
\row
- \li audioInputDevice
+ \li \l AudioInputDevice
\li QString
\li Name of the currently selected audio input device.
If not set, the system default will be used.
\endtable
Returns the names of the supported engine parameters.
+
+ \sa setParameter()
+ \sa parameter()
*/
/*!
diff --git a/src/asr/qspeechrecognitionengine.h b/src/asr/qspeechrecognitionengine.h
index 40464a7..7681f17 100644
--- a/src/asr/qspeechrecognitionengine.h
+++ b/src/asr/qspeechrecognitionengine.h
@@ -63,6 +63,17 @@ public:
Q_INVOKABLE virtual QList<QString> supportedParameters() const = 0;
virtual bool isCreated() = 0;
+ // Common engine parameter keys:
+ static const QString Locale;
+ static const QString Dictionary;
+ static const QString ResourceDirectory;
+ static const QString DataDirectory;
+ static const QString DebugAudioDirectory;
+ static const QString AudioSampleRate;
+ static const QString AudioInputFile;
+ static const QString AudioInputDevice;
+ static const QString AudioInputDevices;
+
Q_SIGNALS:
void created();
};
diff --git a/src/asr/qspeechrecognitionmanager.cpp b/src/asr/qspeechrecognitionmanager.cpp
index 2b429f5..ccc1309 100755
--- a/src/asr/qspeechrecognitionmanager.cpp
+++ b/src/asr/qspeechrecognitionmanager.cpp
@@ -91,7 +91,7 @@ void QSpeechRecognitionManager::setSession(int session)
void QSpeechRecognitionManager::createEngine(const QString &engineName, const QString &provider, const QVariantMap &parameters)
{
QVariantMap errorParams;
- errorParams.insert(QLatin1String("engine"), engineName);
+ errorParams.insert(QSpeechRecognition::Engine, engineName);
if (!m_engines.contains(engineName)) {
QSpeechRecognitionPluginEngine *engine = 0;
QSpeechRecognitionPluginLoader *engineLoader = m_engineLoaders.value(provider, 0);
@@ -117,11 +117,11 @@ void QSpeechRecognitionManager::createEngine(const QString &engineName, const QS
} else {
delete engineLoader;
if (!errorString.isEmpty())
- errorParams.insert(QLatin1String("reason"), errorString);
+ errorParams.insert(QSpeechRecognition::Reason, errorString);
emit error(NO_SESSION, QSpeechRecognition::EngineInitError, errorParams);
}
} else {
- errorParams.insert(QLatin1String("reason"), QLatin1String("Engine with the given name already exists"));
+ errorParams.insert(QSpeechRecognition::Reason, QLatin1String("Engine with the given name already exists"));
emit error(NO_SESSION, QSpeechRecognition::EngineInitError, errorParams);
}
}
@@ -130,8 +130,8 @@ void QSpeechRecognitionManager::createGrammar(const QString &engineName, const Q
{
qCDebug(lcSpeechAsr) << "QSpeechRecognitionManager::createGrammar()";
QVariantMap errorParams;
- errorParams.insert(QLatin1String("engine"), engineName);
- errorParams.insert(QLatin1String("grammar"), grammarName);
+ errorParams.insert(QSpeechRecognition::Engine, engineName);
+ errorParams.insert(QSpeechRecognition::Grammar, grammarName);
if (!m_grammars.contains(grammarName)) {
QSpeechRecognitionPluginEngine* engine = m_engines.value(engineName, 0);
if (engine) {
@@ -144,15 +144,15 @@ void QSpeechRecognitionManager::createGrammar(const QString &engineName, const Q
emit grammarCreated(grammarName);
} else {
if (!errorString.isEmpty())
- errorParams.insert(QLatin1String("reason"), errorString);
+ errorParams.insert(QSpeechRecognition::Reason, errorString);
emit error(NO_SESSION, QSpeechRecognition::GrammarInitError, errorParams);
}
} else {
- errorParams.insert(QLatin1String("reason"), QLatin1String("The given engine was not properly initialized"));
+ errorParams.insert(QSpeechRecognition::Reason, QLatin1String("The given engine was not properly initialized"));
emit error(NO_SESSION, QSpeechRecognition::GrammarInitError, errorParams);
}
} else {
- errorParams.insert(QLatin1String("reason"), QLatin1String("Grammar with the given name already exists"));
+ errorParams.insert(QSpeechRecognition::Reason, QLatin1String("Grammar with the given name already exists"));
emit error(NO_SESSION, QSpeechRecognition::GrammarInitError, errorParams);
}
}
@@ -172,7 +172,7 @@ void QSpeechRecognitionManager::setGrammar(const QString &grammarName)
{
qCDebug(lcSpeechAsr) << "QSpeechRecognitionManager::setGrammar()";
QVariantMap errorParams;
- errorParams.insert(QLatin1String("grammar"), grammarName);
+ errorParams.insert(QSpeechRecognition::Grammar, grammarName);
GrammarInfo grammar = m_grammars.value(grammarName, GrammarInfo());
if (m_grammar.engine && grammar.engine && grammar.engine != m_grammar.engine)
m_grammar.engine->reset(); // Make sure any audio resources are released
@@ -191,7 +191,7 @@ void QSpeechRecognitionManager::setGrammar(const QString &grammarName)
m_grammar = grammar;
} else {
if (!errorString.isEmpty())
- errorParams.insert(QLatin1String("reason"), errorString);
+ errorParams.insert(QSpeechRecognition::Reason, errorString);
emit error(m_session, errorCode, errorParams);
}
}
@@ -218,9 +218,9 @@ void QSpeechRecognitionManager::startListening()
} else {
emit notListening(m_session);
QVariantMap errorParams;
- errorParams.insert(QLatin1String("engine"), m_grammar.engine->name());
+ errorParams.insert(QSpeechRecognition::Engine, m_grammar.engine->name());
if (!errorString.isEmpty())
- errorParams.insert(QLatin1String("reason"), errorString);
+ errorParams.insert(QSpeechRecognition::Reason, errorString);
emit error(m_session, errorCode, errorParams);
}
}
@@ -305,9 +305,9 @@ void QSpeechRecognitionManager::setEngineParameter(const QString &engineName, co
emit engineParameterUpdated(engineName, key, value);
} else {
QVariantMap errorParams;
- errorParams.insert(QLatin1String("engine"), engineName);
+ errorParams.insert(QSpeechRecognition::Engine, engineName);
if (!errorString.isEmpty())
- errorParams.insert(QLatin1String("reason"), errorString);
+ errorParams.insert(QSpeechRecognition::Reason, errorString);
emit error(m_session, errorCode, errorParams);
}
}
diff --git a/src/asr/qspeechrecognitionpluginengine.cpp b/src/asr/qspeechrecognitionpluginengine.cpp
index e86778f..bee840e 100755
--- a/src/asr/qspeechrecognitionpluginengine.cpp
+++ b/src/asr/qspeechrecognitionpluginengine.cpp
@@ -268,16 +268,16 @@ QSpeechRecognitionPluginEngine::QSpeechRecognitionPluginEngine(const QString &na
QVariantMap knownParameters;
Q_D(QSpeechRecognitionPluginEngine);
// Initialize built-in parameters to their default values if the value is not set:
- if (!initialParameters.contains(QLatin1String("locale")))
- initialParameters.insert(QLatin1String("locale"), locale());
- if (!initialParameters.contains(QLatin1String("audioSampleRate")))
- initialParameters.insert(QLatin1String("audioSampleRate"), audioSampleRate());
- if (!initialParameters.contains(QLatin1String("resourceDirectory")))
- initialParameters.insert(QLatin1String("resourceDirectory"), resourceDirectory().path());
- if (!initialParameters.contains(QLatin1String("dataDirectory")))
- initialParameters.insert(QLatin1String("dataDirectory"), dataDirectory().path());
- if (!initialParameters.contains(QLatin1String("dictionary")))
- initialParameters.insert(QLatin1String("dictionary"), dictionaryLocation());
+ if (!initialParameters.contains(QSpeechRecognitionEngine::Locale))
+ initialParameters.insert(QSpeechRecognitionEngine::Locale, locale());
+ if (!initialParameters.contains(QSpeechRecognitionEngine::AudioSampleRate))
+ initialParameters.insert(QSpeechRecognitionEngine::AudioSampleRate, audioSampleRate());
+ if (!initialParameters.contains(QSpeechRecognitionEngine::ResourceDirectory))
+ initialParameters.insert(QSpeechRecognitionEngine::ResourceDirectory, resourceDirectory().path());
+ if (!initialParameters.contains(QSpeechRecognitionEngine::DataDirectory))
+ initialParameters.insert(QSpeechRecognitionEngine::DataDirectory, dataDirectory().path());
+ if (!initialParameters.contains(QSpeechRecognitionEngine::Dictionary))
+ initialParameters.insert(QSpeechRecognitionEngine::Dictionary, dictionaryLocation());
// Filter out unknown parameters:
for (QVariantMap::const_iterator param = initialParameters.begin(); param != initialParameters.end(); ++param) {
if (supportedParameters.contains(param.key()))
@@ -333,13 +333,13 @@ const QVariantMap &QSpeechRecognitionPluginEngine::parameters() const
Extracts the locale from engine parameters.
If not set, returns the default value.
- Key "locale" should be listed in the supported engine parameters.
+ Key QSpeechRecognitionEngine::Locale should be listed in the supported engine parameters.
*/
QLocale QSpeechRecognitionPluginEngine::locale() const
{
Q_D(const QSpeechRecognitionPluginEngine);
- if (d->m_parameters.contains(QLatin1String("locale"))) {
- const QVariant &locale = d->m_parameters[QLatin1String("locale")];
+ if (d->m_parameters.contains(QSpeechRecognitionEngine::Locale)) {
+ const QVariant &locale = d->m_parameters[QSpeechRecognitionEngine::Locale];
if (locale.userType() == QMetaType::QLocale)
return locale.toLocale();
}
@@ -350,13 +350,14 @@ QLocale QSpeechRecognitionPluginEngine::locale() const
Extracts the engine resource directory from engine parameters.
If not set, returns the default value.
- Key "resourceDirectory" should be listed in the supported engine parameters.
+ Key QSpeechRecognitionEngine::ResourceDirectory should be listed in the supported
+ engine parameters.
*/
QDir QSpeechRecognitionPluginEngine::resourceDirectory() const
{
Q_D(const QSpeechRecognitionPluginEngine);
- if (d->m_parameters.contains(QLatin1String("resourceDirectory"))) {
- const QVariant &resourceDirectory = d->m_parameters[QLatin1String("resourceDirectory")];
+ if (d->m_parameters.contains(QSpeechRecognitionEngine::ResourceDirectory)) {
+ const QVariant &resourceDirectory = d->m_parameters[QSpeechRecognitionEngine::ResourceDirectory];
if (resourceDirectory.userType() == QMetaType::QString)
return QDir(resourceDirectory.toString());
}
@@ -367,13 +368,13 @@ QDir QSpeechRecognitionPluginEngine::resourceDirectory() const
Extracts the writable data directory from engine parameters.
If not set, returns the default value.
- Key "dataDirectory" should be listed in the supported engine parameters.
+ Key QSpeechRecognitionEngine::DataDirectory should be listed in the supported engine parameters.
*/
QDir QSpeechRecognitionPluginEngine::dataDirectory() const
{
Q_D(const QSpeechRecognitionPluginEngine);
- if (d->m_parameters.contains(QLatin1String("dataDirectory"))) {
- const QVariant &dataDirectory = d->m_parameters[QLatin1String("dataDirectory")];
+ if (d->m_parameters.contains(QSpeechRecognitionEngine::DataDirectory)) {
+ const QVariant &dataDirectory = d->m_parameters[QSpeechRecognitionEngine::DataDirectory];
if (dataDirectory.userType() == QMetaType::QString) {
return QDir(dataDirectory.toString());
}
@@ -385,13 +386,13 @@ QDir QSpeechRecognitionPluginEngine::dataDirectory() const
Extracts the dictionary URL from the engine parameters.
If not set, returns the default value.
- Key "dictionary" should be listed in the supported engine parameters.
+ Key QSpeechRecognitionEngine::Dictionary should be listed in the supported engine parameters.
*/
QUrl QSpeechRecognitionPluginEngine::dictionaryLocation() const
{
Q_D(const QSpeechRecognitionPluginEngine);
- if (d->m_parameters.contains(QLatin1String("dictionary"))) {
- const QVariant &dictionaryLocation = d->m_parameters[QLatin1String("dictionary")];
+ if (d->m_parameters.contains(QSpeechRecognitionEngine::Dictionary)) {
+ const QVariant &dictionaryLocation = d->m_parameters[QSpeechRecognitionEngine::Dictionary];
if (dictionaryLocation.userType() == QMetaType::QUrl) {
return dictionaryLocation.toUrl();
}
@@ -403,13 +404,14 @@ QUrl QSpeechRecognitionPluginEngine::dictionaryLocation() const
Extracts the audio sample rate from the engine parameters.
If not set, returns the default value.
- Key "audioSampleRate" should be listed in the supported engine parameters.
+ Key QSpeechRecognitionEngine::AudioSampleRate should be listed in the supported
+ engine parameters.
*/
int QSpeechRecognitionPluginEngine::audioSampleRate() const
{
Q_D(const QSpeechRecognitionPluginEngine);
- if (d->m_parameters.contains(QLatin1String("audioSampleRate"))) {
- const QVariant &audioSampleRate = d->m_parameters[QLatin1String("audioSampleRate")];
+ if (d->m_parameters.contains(QSpeechRecognitionEngine::AudioSampleRate)) {
+ const QVariant &audioSampleRate = d->m_parameters[QSpeechRecognitionEngine::AudioSampleRate];
if (audioSampleRate.userType() == QMetaType::Int)
return audioSampleRate.toInt();
}
@@ -420,13 +422,13 @@ int QSpeechRecognitionPluginEngine::audioSampleRate() const
Extracts the audio input file path from the engine parameters.
If not set, returns an empty string.
- Key "audioInputFile" should be listed in the supported engine parameters.
+ Key QSpeechRecognitionEngine::AudioInputFile should be listed in the supported engine parameters.
*/
QString QSpeechRecognitionPluginEngine::audioInputFile() const
{
Q_D(const QSpeechRecognitionPluginEngine);
- if (d->m_parameters.contains(QLatin1String("audioInputFile"))) {
- const QVariant &audioInputFile = d->m_parameters[QLatin1String("audioInputFile")];
+ if (d->m_parameters.contains(QSpeechRecognitionEngine::AudioInputFile)) {
+ const QVariant &audioInputFile = d->m_parameters[QSpeechRecognitionEngine::AudioInputFile];
if (audioInputFile.userType() == QMetaType::QString) {
return audioInputFile.toString();
}
@@ -478,8 +480,9 @@ QString QSpeechRecognitionPluginEngine::localizedFilePath(const QString &filePat
Creates a WAV-file for writing debug audio.
If \a filePath is an absolute path, always attempts to create the file. If a relative
- file path is given, only creates the file if engine parameter "debugAudioDirectory" has
- been set (see QSpeechRecognition::createEngine()).
+ file path is given, only creates the file if engine parameter
+ QSpeechRecognitionEngine::DebugAudioDirectory has been set (see
+ QSpeechRecognition::createEngine()).
Parameters \a sampleRate, \a sampleSize and \a channelCount specify the type of audio data
that will be written to the file. Sample size should be expressed in bits.
@@ -496,8 +499,8 @@ QFile *QSpeechRecognitionPluginEngine::openDebugWavFile(const QString &filePath,
QString finalPath;
if (QDir::isAbsolutePath(filePath)) {
finalPath = filePath;
- } else if (d->m_parameters.contains(QLatin1String("debugAudioDirectory"))) {
- QString audioDirPath = d->m_parameters.value(QLatin1String("debugAudioDirectory")).toString();
+ } else if (d->m_parameters.contains(QSpeechRecognitionEngine::DebugAudioDirectory)) {
+ QString audioDirPath = d->m_parameters.value(QSpeechRecognitionEngine::DebugAudioDirectory).toString();
if (!audioDirPath.isEmpty()) {
QDir audioDir(audioDirPath);
if (audioDir.exists())
diff --git a/src/plugins/asr/pocketsphinx/doc/src/pocketsphinx.qdoc b/src/plugins/asr/pocketsphinx/doc/src/pocketsphinx.qdoc
index 6fbce56..77019fe 100644
--- a/src/plugins/asr/pocketsphinx/doc/src/pocketsphinx.qdoc
+++ b/src/plugins/asr/pocketsphinx/doc/src/pocketsphinx.qdoc
@@ -65,41 +65,41 @@
\li Value type
\li Description
\row
- \li locale
+ \li Locale
\li QLocale
\li
\row
- \li dictionary
+ \li Dictionary
\li QUrl
\li PocketSphinx (CMU) format dictionary file.
If not given, file "lexicon.dict" in the locale-specific resource directory is used.
Loading the dictionary from Qt resources is not supported.
\row
- \li resourceDirectory
+ \li ResourceDirectory
\li QString
\li
\row
- \li dataDirectory
+ \li DataDirectory
\li QString
\li
\row
- \li debugAudioDirectory
+ \li DebugAudioDirectory
\li QString
\li
\row
- \li audioSampleRate
+ \li AudioSampleRate
\li int
\li
\row
- \li audioInputFile
+ \li AudioInputFile
\li QString
\li
\row
- \li audioInputDevices
+ \li AudioInputDevices
\li QStringList
\li
\row
- \li audioInputDevice
+ \li AudioInputDevice
\li QString
\li
\endtable
diff --git a/src/plugins/asr/pocketsphinx/qspeechrecognitionengine_pocketsphinx.cpp b/src/plugins/asr/pocketsphinx/qspeechrecognitionengine_pocketsphinx.cpp
index a4c5c8c..8afb937 100755
--- a/src/plugins/asr/pocketsphinx/qspeechrecognitionengine_pocketsphinx.cpp
+++ b/src/plugins/asr/pocketsphinx/qspeechrecognitionengine_pocketsphinx.cpp
@@ -68,15 +68,15 @@ void PocketShpinxErrorCb(void *user_data, err_lvl_t lvl, const char *fmt, ...)
QSpeechRecognitionEnginePocketSphinx::QSpeechRecognitionEnginePocketSphinx(const QString &name,
const QVariantMap &parameters, QObject *parent)
: QSpeechRecognitionPluginEngine(name, createEngineParameters(parameters),
- QStringList() << "locale"
- << "dictionary"
- << "resourceDirectory"
- << "dataDirectory"
- << "debugAudioDirectory"
- << "audioSampleRate"
- << "audioInputFile"
- << "audioInputDevices"
- << "audioInputDevice",
+ QStringList() << QSpeechRecognitionEngine::Locale
+ << QSpeechRecognitionEngine::Dictionary
+ << QSpeechRecognitionEngine::ResourceDirectory
+ << QSpeechRecognitionEngine::DataDirectory
+ << QSpeechRecognitionEngine::DebugAudioDirectory
+ << QSpeechRecognitionEngine::AudioSampleRate
+ << QSpeechRecognitionEngine::AudioInputFile
+ << QSpeechRecognitionEngine::AudioInputDevices
+ << QSpeechRecognitionEngine::AudioInputDevice,
parent),
m_session(0),
m_decoder(0),
@@ -91,7 +91,7 @@ QSpeechRecognitionEnginePocketSphinx::QSpeechRecognitionEnginePocketSphinx(const
m_cmnSize(0)
{
const QVariantMap &engineParams = QSpeechRecognitionPluginEngine::parameters();
- QString inputDeviceName = engineParams["audioInputDevice"].toString();
+ QString inputDeviceName = engineParams[QSpeechRecognitionEngine::AudioInputDevice].toString();
QList<QAudioDeviceInfo> audioDevices = QAudioDeviceInfo::availableDevices(QAudio::AudioInput);
foreach (QAudioDeviceInfo device, audioDevices) {
if (!inputDeviceName.isEmpty() && device.deviceName() == inputDeviceName) {
@@ -194,7 +194,7 @@ void QSpeechRecognitionEnginePocketSphinx::createAudioInput()
QSpeechRecognition::Error QSpeechRecognitionEnginePocketSphinx::updateParameter(const QString &key, const QVariant &value, QString *errorString)
{
- if (key == "audioInputDevice") {
+ if (key == QSpeechRecognitionEngine::AudioInputDevice) {
if (m_sessionStarted) {
*errorString = "Cannot set audio input device while the engine is busy";
return QSpeechRecognition::EngineParameterError;
@@ -213,7 +213,7 @@ QSpeechRecognition::Error QSpeechRecognitionEnginePocketSphinx::updateParameter(
}
}
*errorString = QString("Audio input device with name \"") + value.toString() + "\" does not exist";
- } else if (key == "audioInputFile") {
+ } else if (key == QSpeechRecognitionEngine::AudioInputFile) {
if (value.type() != QVariant::String) {
*errorString = QString("Parameter \"") + key + "\" has invalid type";
return QSpeechRecognition::EngineParameterError;
@@ -322,7 +322,7 @@ void QSpeechRecognitionEnginePocketSphinx::stopListening(qint64 timestamp)
QString transcription(hyp);
qCDebug(lcSpeechAsrPocketSphinx) << "Result: " + transcription;
QVariantMap params;
- params.insert("transcription", QVariant(transcription));
+ params.insert(QSpeechRecognition::Transcription, QVariant(transcription));
emit result(m_session, m_grammar, params);
// Store the adapted CMN values:
storeCmn();
@@ -337,7 +337,7 @@ void QSpeechRecognitionEnginePocketSphinx::stopListening(qint64 timestamp)
delete m_debugAudioFile;
m_debugAudioFile = 0;
m_sessionStarted = false;
- emit attributeUpdated(NO_SESSION, "audioLevel", QVariant((qreal)0.0));
+ emit attributeUpdated(NO_SESSION, QSpeechRecognition::AudioLevel, QVariant((qreal)0.0));
}
}
@@ -352,7 +352,7 @@ void QSpeechRecognitionEnginePocketSphinx::abortListening()
if (!m_muted)
ps_end_utt(m_decoder);
m_sessionStarted = false;
- emit attributeUpdated(NO_SESSION, "audioLevel", QVariant((qreal)0.0));
+ emit attributeUpdated(NO_SESSION, QSpeechRecognition::AudioLevel, QVariant((qreal)0.0));
}
}
@@ -384,20 +384,20 @@ void QSpeechRecognitionEnginePocketSphinx::onAudioDataAvailable()
{
if (m_sessionStarted && !m_muted) {
emit requestProcess();
- emit attributeUpdated(m_session, "audioLevel", QVariant(m_audioBuffer->audioLevel()));
+ emit attributeUpdated(m_session, QSpeechRecognition::AudioLevel, QVariant(m_audioBuffer->audioLevel()));
}
}
void QSpeechRecognitionEnginePocketSphinx::onAudioStateChanged(QAudio::State state)
{
QVariantMap errorParams;
- errorParams.insert("engine", name());
+ errorParams.insert(QSpeechRecognition::Engine, name());
switch (state) {
case QAudio::ActiveState:
break;
case QAudio::StoppedState:
if (m_audioInput->error() != QAudio::NoError) {
- errorParams.insert("reason", QString("Error (") + QString::number(m_audioInput->error()) + ") in QAudioInput");
+ errorParams.insert(QSpeechRecognition::Reason, QString("Error (") + QString::number(m_audioInput->error()) + ") in QAudioInput");
emit error(m_session, QSpeechRecognition::AudioError, errorParams);
}
break;
@@ -409,12 +409,12 @@ void QSpeechRecognitionEnginePocketSphinx::onAudioStateChanged(QAudio::State sta
void QSpeechRecognitionEnginePocketSphinx::onAudioDecoderError(QAudioDecoder::Error errorCode)
{
QVariantMap errorParams;
- errorParams.insert("engine", name());
+ errorParams.insert(QSpeechRecognition::Engine, name());
switch (errorCode) {
case QAudioDecoder::NoError:
break;
default:
- errorParams.insert("reason", QString("QAudioDecoder error (") + QString::number(errorCode) + "): " + m_inputFileDecoder.errorString());
+ errorParams.insert(QSpeechRecognition::Reason, QString("QAudioDecoder error (") + QString::number(errorCode) + "): " + m_inputFileDecoder.errorString());
emit error(m_session, QSpeechRecognition::AudioError, errorParams);
break;
}
@@ -467,16 +467,16 @@ QVariantMap QSpeechRecognitionEnginePocketSphinx::createEngineParameters(const Q
QStringList audioDeviceNames;
QString inputDeviceName;
QAudioDeviceInfo inputDevice = QAudioDeviceInfo::defaultInputDevice();
- if (inputParameters.contains("audioInputDevice"))
- inputDeviceName = inputParameters["audioInputDevice"].toString();
+ if (inputParameters.contains(QSpeechRecognitionEngine::AudioInputDevice))
+ inputDeviceName = inputParameters[QSpeechRecognitionEngine::AudioInputDevice].toString();
QList<QAudioDeviceInfo> audioDevices = QAudioDeviceInfo::availableDevices(QAudio::AudioInput);
foreach (QAudioDeviceInfo device, audioDevices) {
audioDeviceNames.append(device.deviceName());
if (!inputDeviceName.isEmpty() && device.deviceName() == inputDeviceName)
inputDevice = device;
}
- newParameters.insert("audioInputDevices", audioDeviceNames);
- newParameters.insert("audioInputDevice", inputDevice.deviceName());
+ newParameters.insert(QSpeechRecognitionEngine::AudioInputDevices, audioDeviceNames);
+ newParameters.insert(QSpeechRecognitionEngine::AudioInputDevice, inputDevice.deviceName());
return newParameters;
}