summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJason McDonald <jason.mcdonald@nokia.com>2012-02-17 13:39:30 +1000
committerQt by Nokia <qt-info@nokia.com>2012-02-20 08:05:58 +0100
commiteb52d78e90ad8870a055f586d9d14d7068acd95b (patch)
treef9a9bcb5df79f3e7a67e6e6b52e8bbec3b227202 /src
parent4cb09aea6a4f26f36961da8934819e823cfe2d4d (diff)
testlib: Report one test result per benchmark test.
Prior to this commit, a benchmark test could report 0..n passes and 0..m fails or skips, where n is the number of accumulation iterations used to collect benchmark data and m is the number of times the test function was invoked. Depending on the type of benchmark measurer being used, this could result in a very large volume of test output and inconsistent pass, fail and skip counts between test runs. This commit changes the behaviour so that each benchmark test reports one pass, fail or skip, regardless of the number of iterations used to collect benchmark data. This commit also prevents benchmark data being reported in the test output if the benchmark test failed or skipped, as any benchmark data is of dubious value in such cases. The latter change in behaviour requires a minor modification to the badxml selftest, which now tests quoting of literal strings in xml test output for both passing and failing benchmarks. Finally, this commit also adds a new selftest specifically for verifying correct behaviour for benchmarks that fail or skip. Task-number: QTBUG-24313 Change-Id: I3426dc659a7511b62fd183a031c7235bc753f497 Reviewed-by: Rohan McGovern <rohan.mcgovern@nokia.com>
Diffstat (limited to 'src')
-rw-r--r--src/testlib/qtestcase.cpp39
1 files changed, 26 insertions, 13 deletions
diff --git a/src/testlib/qtestcase.cpp b/src/testlib/qtestcase.cpp
index 8b9ba6d748..56959ba62d 100644
--- a/src/testlib/qtestcase.cpp
+++ b/src/testlib/qtestcase.cpp
@@ -1490,6 +1490,7 @@ static void qInvokeTestMethodDataEntry(char *slot)
{
/* Benchmarking: for each median iteration*/
+ bool isBenchmark = false;
int i = (QBenchmarkGlobalData::current->measurer->needsWarmupIteration()) ? -1 : 0;
QList<QBenchmarkResult> results;
@@ -1516,25 +1517,30 @@ static void qInvokeTestMethodDataEntry(char *slot)
if (!invokeOk)
QTestResult::addFailure("Unable to execute slot", __FILE__, __LINE__);
+ isBenchmark = QBenchmarkTestMethodData::current->isBenchmark();
+
QTestResult::finishedCurrentTestData();
invokeMethod(QTest::currentTestObject, "cleanup()");
- QTestResult::finishedCurrentTestDataCleanup();
+
+ // If the test isn't a benchmark, finalize the result after cleanup() has finished.
+ if (!isBenchmark)
+ QTestResult::finishedCurrentTestDataCleanup();
// If this test method has a benchmark, repeat until all measurements are
// acceptable.
// The QBENCHMARK macro increases the number of iterations for each run until
// this happens.
- } while (invokeOk
- && QBenchmarkTestMethodData::current->isBenchmark()
- && QBenchmarkTestMethodData::current->resultsAccepted() == false);
+ } while (invokeOk && isBenchmark
+ && QBenchmarkTestMethodData::current->resultsAccepted() == false
+ && !QTestResult::skipCurrentTest() && !QTestResult::currentTestFailed());
QBenchmarkTestMethodData::current->endDataRun();
- if (i > -1) // iteration -1 is the warmup iteration.
- results.append(QBenchmarkTestMethodData::current->result);
+ if (!QTestResult::skipCurrentTest() && !QTestResult::currentTestFailed()) {
+ if (i > -1) // iteration -1 is the warmup iteration.
+ results.append(QBenchmarkTestMethodData::current->result);
- if (QBenchmarkTestMethodData::current->isBenchmark() &&
- QBenchmarkGlobalData::current->verboseOutput) {
+ if (isBenchmark && QBenchmarkGlobalData::current->verboseOutput) {
if (i == -1) {
QTestLog::info(qPrintable(
QString::fromLatin1("warmup stage result : %1")
@@ -1545,12 +1551,19 @@ static void qInvokeTestMethodDataEntry(char *slot)
.arg(QBenchmarkTestMethodData::current->result.value)), 0, 0);
}
}
- } while (QBenchmarkTestMethodData::current->isBenchmark()
- && (++i < QBenchmarkGlobalData::current->adjustMedianIterationCount()));
+ }
+ } while (isBenchmark
+ && (++i < QBenchmarkGlobalData::current->adjustMedianIterationCount())
+ && !QTestResult::skipCurrentTest() && !QTestResult::currentTestFailed());
- if (QBenchmarkTestMethodData::current->isBenchmark()
- && QBenchmarkTestMethodData::current->resultsAccepted())
- QTestLog::addBenchmarkResult(qMedian(results));
+ // If the test is a benchmark, finalize the result after all iterations have finished.
+ if (isBenchmark) {
+ bool testPassed = !QTestResult::skipCurrentTest() && !QTestResult::currentTestFailed();
+ QTestResult::finishedCurrentTestDataCleanup();
+ // Only report benchmark figures if the test passed
+ if (testPassed && QBenchmarkTestMethodData::current->resultsAccepted())
+ QTestLog::addBenchmarkResult(qMedian(results));
+ }
}
/*!