summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorVolker Hilsheimer <volker.hilsheimer@qt.io>2023-02-18 11:44:14 +0100
committerVolker Hilsheimer <volker.hilsheimer@qt.io>2023-03-11 13:15:22 +0100
commit0ff6f999673ff05d406c5d9ca552f47f2f51cc63 (patch)
tree207b3f36821814f09322580b562152bf61faf200 /tests
parenta7ecb6423fe7003e4bc7a45ea2133aa872d94b3f (diff)
Add support for enqueuing text
Add a sayNext() slot that doesn't stop any ongoing speech, and instead enqueues the new text. The cross-platform implementation keeps track of enqueued texts, and processes the next text when the engine's state changes to ready. Make this the default behavior for synthesize(). It makes no sense to interrupt an ongoing process, the application can just stop an ongoing process and discard the PCM data it doesn't want. The stateChanged signal does not get emitted when the engine's state changes to Ready and there are texts in the queue. To allow applications to keep track of the text that is about to be spoken, add a new aboutToSynthesize signal that gets emitted each time text is about to be passed down to the engine. This also allows applications to make last-minute changes to the voice attributes. To accurately keep track of which text within the data structure of the application is about to be spoken or finished, applications do need to keep track of the text segments passed QTextToSpeech and update their "current" iterator with the aboutToSynthesize signal. Task-number: QTBUG-102355 Change-Id: I7b8621e15ee8d520b156e1fd771e120ded731fd8 Reviewed-by: Qt CI Bot <qt_ci_bot@qt-project.org> Reviewed-by: Axel Spoerl <axel.spoerl@qt.io>
Diffstat (limited to 'tests')
-rw-r--r--tests/auto/qtexttospeech/tst_qtexttospeech.cpp104
1 files changed, 104 insertions, 0 deletions
diff --git a/tests/auto/qtexttospeech/tst_qtexttospeech.cpp b/tests/auto/qtexttospeech/tst_qtexttospeech.cpp
index b83c11e..a7c2a3c 100644
--- a/tests/auto/qtexttospeech/tst_qtexttospeech.cpp
+++ b/tests/auto/qtexttospeech/tst_qtexttospeech.cpp
@@ -48,6 +48,12 @@ private slots:
void sayWithVoices();
void sayWithRates();
+ void sayMultiple_data();
+ void sayMultiple();
+
+ void pauseAtUtterance_data();
+ void pauseAtUtterance();
+
void sayingWord_data();
void sayingWord();
@@ -490,6 +496,104 @@ void tst_QTextToSpeech::sayWithRates()
logger.dismiss();
}
+void tst_QTextToSpeech::sayMultiple_data()
+{
+ QTest::addColumn<QStringList>("textList");
+
+ QTest::addRow("one") << QStringList{"one"};
+ QTest::addRow("three") << QStringList{"one", "two", "three"};
+}
+
+void tst_QTextToSpeech::sayMultiple()
+{
+ QFETCH_GLOBAL(QString, engine);
+ if (engine != "mock" && !hasDefaultAudioOutput())
+ QSKIP("No audio device present");
+
+ QTextToSpeech tts(engine);
+ QTRY_COMPARE(tts.state(), QTextToSpeech::Ready);
+ selectWorkingVoice(&tts);
+ auto logger = qScopeGuard([&tts]{
+ qWarning() << "Failure with voice" << tts.voice();
+ });
+
+ int speakingCount = 0;
+ bool doneSpeaking = false;
+ connect(&tts, &QTextToSpeech::stateChanged, this, [&](QTextToSpeech::State state){
+ if (state == QTextToSpeech::Speaking)
+ ++speakingCount;
+ else if (state == QTextToSpeech::Ready)
+ doneSpeaking = true;
+ });
+ QSignalSpy aboutToSynthesizeSpy(&tts, &QTextToSpeech::aboutToSynthesize);
+
+ QFETCH(const QStringList, textList);
+ for (qsizetype i = 0; i < textList.count(); ++i) {
+ const QString &text = textList.at(i);
+ tts.sayNext(text);
+ if (!i) // wait for the engine to start speaking
+ QTRY_COMPARE(tts.state(), QTextToSpeech::Speaking);
+ }
+
+ QTRY_VERIFY(doneSpeaking);
+ QCOMPARE(aboutToSynthesizeSpy.count(), textList.size());
+ QCOMPARE(speakingCount, 1);
+
+ logger.dismiss();
+}
+
+void tst_QTextToSpeech::pauseAtUtterance_data()
+{
+ sayMultiple_data();
+}
+
+void tst_QTextToSpeech::pauseAtUtterance()
+{
+ QFETCH_GLOBAL(QString, engine);
+ if (engine != "mock" && !hasDefaultAudioOutput())
+ QSKIP("No audio device present");
+
+ QFETCH(const QStringList, textList);
+
+ QTextToSpeech tts(engine);
+ QTRY_COMPARE(tts.state(), QTextToSpeech::Ready);
+ selectWorkingVoice(&tts);
+
+ int atIndex = -1;
+ bool paused = false;
+ connect(&tts, &QTextToSpeech::aboutToSynthesize, [&]{
+ ++atIndex;
+ if (atIndex == 1 && !paused) {
+ tts.pause(QTextToSpeech::BoundaryHint::Utterance);
+ paused = true;
+ --atIndex;
+ }
+ });
+ QStringList wordsSpoken;
+ connect(&tts, &QTextToSpeech::sayingWord, [&](int at, int length){
+ wordsSpoken += textList.at(atIndex).mid(at, length);
+ });
+
+ for (qsizetype i = 0; i < textList.count(); ++i) {
+ const QString &text = textList.at(i);
+ tts.sayNext(text);
+ if (!i)
+ QTRY_COMPARE(tts.state(), QTextToSpeech::Speaking);
+ }
+ if (textList.count() == 1)
+ QTRY_COMPARE(tts.state(), QTextToSpeech::Ready);
+ else
+ QTRY_COMPARE(tts.state(), QTextToSpeech::Paused);
+ if (tts.engineCapabilities() & QTextToSpeech::Capability::WordByWordProgress)
+ QCOMPARE(wordsSpoken, QStringList{textList.first()});
+ tts.resume();
+ QTRY_COMPARE(tts.state(), QTextToSpeech::Ready);
+ if (tts.engineCapabilities() & QTextToSpeech::Capability::WordByWordProgress)
+ QCOMPARE(wordsSpoken, textList);
+ else
+ qInfo("Skipping test of spoken words");
+}
+
void tst_QTextToSpeech::sayingWord_data()
{
QTest::addColumn<QString>("text");