diff options
Diffstat (limited to 'tests/auto/qml/ecmascripttests')
-rw-r--r-- | tests/auto/qml/ecmascripttests/CMakeLists.txt | 13 | ||||
-rw-r--r-- | tests/auto/qml/ecmascripttests/TestExpectations | 10 | ||||
-rw-r--r-- | tests/auto/qml/ecmascripttests/qjstest/CMakeLists.txt | 28 | ||||
-rw-r--r-- | tests/auto/qml/ecmascripttests/qjstest/main.cpp | 90 | ||||
-rwxr-xr-x | tests/auto/qml/ecmascripttests/test262.py | 611 | ||||
-rw-r--r-- | tests/auto/qml/ecmascripttests/test262runner.cpp (renamed from tests/auto/qml/ecmascripttests/qjstest/test262runner.cpp) | 529 | ||||
-rw-r--r-- | tests/auto/qml/ecmascripttests/test262runner.h (renamed from tests/auto/qml/ecmascripttests/qjstest/test262runner.h) | 54 | ||||
-rw-r--r-- | tests/auto/qml/ecmascripttests/tst_ecmascripttests.cpp | 94 |
8 files changed, 490 insertions, 939 deletions
diff --git a/tests/auto/qml/ecmascripttests/CMakeLists.txt b/tests/auto/qml/ecmascripttests/CMakeLists.txt index d3da3adb53..1ee70cb101 100644 --- a/tests/auto/qml/ecmascripttests/CMakeLists.txt +++ b/tests/auto/qml/ecmascripttests/CMakeLists.txt @@ -5,15 +5,22 @@ ## tst_ecmascripttests Test: ##################################################################### +if(NOT QT_BUILD_STANDALONE_TESTS AND NOT QT_BUILDING_QT) + cmake_minimum_required(VERSION 3.16) + project(tst_ecmascripttests LANGUAGES CXX) + find_package(Qt6BuildInternals REQUIRED COMPONENTS STANDALONE_TEST) +endif() + # Collect test data file(GLOB_RECURSE test_data_glob RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} test262/*) list(APPEND test_data ${test_data_glob}) +list(FILTER test_data EXCLUDE REGEX ".git") qt_internal_add_test(tst_ecmascripttests SOURCES - qjstest/test262runner.cpp qjstest/test262runner.h + test262runner.cpp test262runner.h tst_ecmascripttests.cpp LIBRARIES Qt::QmlPrivate @@ -40,7 +47,3 @@ else() QT_QMLTEST_DATADIR="${CMAKE_CURRENT_SOURCE_DIR}/test262" ) endif() - -if(NOT CMAKE_CROSSCOMPILING) - add_subdirectory(qjstest) -endif() diff --git a/tests/auto/qml/ecmascripttests/TestExpectations b/tests/auto/qml/ecmascripttests/TestExpectations index 75fdd1cb0c..cc5eae456d 100644 --- a/tests/auto/qml/ecmascripttests/TestExpectations +++ b/tests/auto/qml/ecmascripttests/TestExpectations @@ -18,6 +18,12 @@ language/statements/labeled/let-identifier-with-newline.js sloppyFails language/statements/while/let-identifier-with-newline.js sloppyFails language/statements/with/let-identifier-with-newline.js sloppyFails +# These failures are a defect in the Yarr regexp engine we are using. +# They all amount to some variation of: /\udf06/u.exec('\ud834\udf06') +built-ins/RegExp/prototype/Symbol.match/builtin-infer-unicode.js +built-ins/RegExp/prototype/Symbol.search/u-lastindex-advance.js +built-ins/RegExp/prototype/exec/u-lastindex-adv.js + # The ES6/7 spec says that [[DefineOwnProperty]] on the module namespace exotic object # always returns false. This was changed in https://github.com/tc39/ecma262/pull/858 # but it's not in the published spec yet. @@ -92,7 +98,6 @@ built-ins/Array/prototype/slice/length-exceeding-integer-limit-proxied-array.js built-ins/Array/prototype/slice/length-exceeding-integer-limit.js fails built-ins/Array/prototype/some/15.4.4.17-3-28.js fails built-ins/Array/prototype/some/15.4.4.17-3-29.js fails -built-ins/Array/prototype/sort/comparefn-nonfunction-call-throws.js fails built-ins/Array/prototype/splice/S15.4.4.12_A3_T1.js fails built-ins/Array/prototype/splice/clamps-length-to-integer-limit.js fails built-ins/Array/prototype/splice/create-ctor-non-object.js fails @@ -211,7 +216,6 @@ built-ins/Promise/prototype/then/ctor-throws.js fails built-ins/Promise/race/ctx-ctor.js fails built-ins/Proxy/ownKeys/return-duplicate-entries-throws.js fails built-ins/Proxy/ownKeys/return-duplicate-symbol-entries-throws.js fails -built-ins/RegExp/prototype/Symbol.match/builtin-success-u-return-val-groups.js fails built-ins/RegExp/prototype/Symbol.split/species-ctor.js fails built-ins/RegExp/prototype/exec/S15.10.6.2_A5_T3.js fails built-ins/RegExp/prototype/exec/failure-lastindex-access.js fails @@ -249,8 +253,6 @@ built-ins/String/prototype/toLocaleLowerCase/special_casing_conditional.js fails built-ins/String/prototype/toLowerCase/Final_Sigma_U180E.js fails built-ins/String/prototype/toLowerCase/special_casing_conditional.js fails built-ins/TypedArray/prototype/constructor.js fails -built-ins/TypedArray/prototype/fill/fill-values-conversion-operations-consistent-nan.js fails -built-ins/TypedArray/prototype/slice/bit-precision.js fails built-ins/TypedArray/prototype/sort/arraylength-internal.js fails built-ins/TypedArray/prototype/sort/comparefn-call-throws.js fails built-ins/TypedArray/prototype/sort/comparefn-calls.js fails diff --git a/tests/auto/qml/ecmascripttests/qjstest/CMakeLists.txt b/tests/auto/qml/ecmascripttests/qjstest/CMakeLists.txt deleted file mode 100644 index 86ca5f97a3..0000000000 --- a/tests/auto/qml/ecmascripttests/qjstest/CMakeLists.txt +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2022 The Qt Company Ltd. -# SPDX-License-Identifier: BSD-3-Clause - -# Generated from qjstest.pro. - -##################################################################### -## qjstest Tool: -##################################################################### - -qt_get_tool_target_name(target_name qjstest) -qt_internal_add_tool(${target_name} - TARGET_DESCRIPTION "Javascript test runner" - SOURCES - main.cpp - test262runner.cpp test262runner.h - DEFINES - QT_DEPRECATED_WARNINGS - INCLUDE_DIRECTORIES - . - LIBRARIES - Qt::Gui - Qt::QmlPrivate -) -qt_internal_return_unless_building_tools() - -#### Keys ignored in scope 1:.:.:qjstest.pro:<TRUE>: -# QMAKE_TARGET_DESCRIPTION = "Javascript" "test" "runner" -# TEMPLATE = "app" diff --git a/tests/auto/qml/ecmascripttests/qjstest/main.cpp b/tests/auto/qml/ecmascripttests/qjstest/main.cpp deleted file mode 100644 index 7bffedae81..0000000000 --- a/tests/auto/qml/ecmascripttests/qjstest/main.cpp +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (C) 2016 The Qt Company Ltd. -// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0 -#include <QJSEngine> -#include <QCoreApplication> -#include <QCommandLineParser> -#include <qdebug.h> -#include <stdlib.h> - -#include "test262runner.h" - -int main(int argc, char **argv) -{ - QCoreApplication app(argc, argv); - - - QCommandLineParser parser; - parser.addHelpOption(); - parser.addVersionOption(); - QCommandLineOption verbose("verbose", "Verbose output"); - parser.addOption(verbose); - QCommandLineOption commandOption("command", "Javascript command line interpreter", "command"); - parser.addOption(commandOption); - QCommandLineOption testDir("tests", "path to the tests", "tests", "test262"); - parser.addOption(testDir); - QCommandLineOption cat("cat", "Print packaged test code that would be run"); - parser.addOption(cat); - QCommandLineOption parallel("parallel", "Run tests in parallel"); - parser.addOption(parallel); - QCommandLineOption jit("jit", "JIT all code"); - parser.addOption(jit); - QCommandLineOption bytecode("interpret", "Run using the bytecode interpreter"); - parser.addOption(bytecode); - QCommandLineOption withExpectations("with-test-expectations", "Parse TestExpectations to deal with known failures"); - parser.addOption(withExpectations); - QCommandLineOption updateExpectations("update-expectations", "Update TestExpectations to remove unexepected passes"); - parser.addOption(updateExpectations); - QCommandLineOption writeExpectations("write-expectations", "Generate a new TestExpectations file based on the results of the run"); - parser.addOption(writeExpectations); - parser.addPositionalArgument("[filter]", "Only run tests that contain filter in their name"); - - parser.process(app); - - Test262Runner testRunner(parser.value(commandOption), parser.value(testDir), QStringLiteral("TestExpectations")); - - QStringList otherArgs = parser.positionalArguments(); - if (otherArgs.size() > 1) { - qWarning() << "too many arguments"; - return 1; - } else if (otherArgs.size()) { - testRunner.setFilter(otherArgs.at(0)); - } - - if (parser.isSet(cat)) { - testRunner.cat(); - return 0; - } - - if (parser.isSet(updateExpectations) && parser.isSet(writeExpectations)) { - qWarning() << "Can only specify one of --update-expectations and --write-expectations."; - exit(1); - } - - if (parser.isSet(jit) && parser.isSet(bytecode)) { - qWarning() << "Can only specify one of --jit and --interpret."; - exit(1); - } - - int flags = 0; - if (parser.isSet(verbose)) - - flags |= Test262Runner::Verbose; - if (parser.isSet(parallel)) - flags |= Test262Runner::Parallel; - if (parser.isSet(jit)) - flags |= Test262Runner::ForceJIT; - if (parser.isSet(bytecode)) - flags |= Test262Runner::ForceBytecode; - if (parser.isSet(withExpectations)) - flags |= Test262Runner::WithTestExpectations; - if (parser.isSet(updateExpectations)) - flags |= Test262Runner::UpdateTestExpectations; - if (parser.isSet(writeExpectations)) - flags |= Test262Runner::WriteTestExpectations; - testRunner.setFlags(flags); - - if (testRunner.run()) - return EXIT_SUCCESS; - else - return EXIT_FAILURE; -} diff --git a/tests/auto/qml/ecmascripttests/test262.py b/tests/auto/qml/ecmascripttests/test262.py deleted file mode 100755 index 01c990950c..0000000000 --- a/tests/auto/qml/ecmascripttests/test262.py +++ /dev/null @@ -1,611 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2017 The Qt Company Ltd. -# SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0 - -# Copyright 2009 the Sputnik authors. All rights reserved. -# This code is governed by the BSD license found in the LICENSE file. - -# This is derived from sputnik.py, the Sputnik console test runner, -# with elements from packager.py, which is separately -# copyrighted. TODO: Refactor so there is less duplication between -# test262.py and packager.py. - -import sys -from os import path -rootDir = path.dirname(path.realpath(__file__)) -sys.path.insert(0, path.abspath(rootDir + "/test262/tools/packaging")) - -import logging -import optparse -import os -import platform -import re -import subprocess -import tempfile -import time -import xml.dom.minidom -import datetime -import shutil -import json -import stat -import multiprocessing -import signal - - -from parseTestRecord import parseTestRecord, stripHeader - -from packagerConfig import * - -# excluded features that are still experimental and not part of any official standard -# see also the features.txt file in test262/ -excludedFeatures = [ - "BigInt", - "class-fields-public", - "class-fields-private", - "Promise.prototype.finally", - "async-iteration", - "Symbol.asyncIterator", - "object-rest", - "object-spread", - "optional-catch-binding", - "regexp-dotall", - "regexp-lookbehind", - "regexp-named-groups", - "regexp-unicode-property-escapes", - "Atomics", - "SharedArrayBuffer", - "Array.prototype.flatten", - "Array.prototype.flatMap", - "string-trimming", - "String.prototype.trimEnd", - "String.prototype.trimStart", - "numeric-separator-literal", - - # optional features, not supported by us - "caller" -] - -# ############# Helpers needed for parallel multi-process test execution ############ - -def runTest(case, args): - return case.Run(args) - -def runTestVarArgs(args): - return runTest(*args) - -def initWorkerProcess(): - signal.signal(signal.SIGINT, signal.SIG_IGN) - -# ############# - -class Test262Error(Exception): - def __init__(self, message): - self.message = message - -def ReportError(s): - raise Test262Error(s) - - -class TestExpectations: - def __init__(self, enabled): - self.testsToSkip = [] - self.failingTests = [] - f = open(rootDir + "/TestExpectations") - if not enabled: - return - for line in f.read().splitlines(): - line = line.strip() - if len(line) == 0 or line[0] == "#": - continue - record = line.split() - if len(record) == 1: - self.failingTests.append(record[0]) - else: - test = record[0] - expectation = record[1] - if expectation == "skip": - self.testsToSkip.append(test) - f.close() - - def update(self, progress): - unexpectedPasses = [c.case.name for c in progress.failed_tests if c.case.IsNegative()] - - # If a test fails that we expected to fail, then it actually passed unexpectedly. - failures = [c.case.name for c in progress.failed_tests if not c.case.IsNegative()] - for failure in failures: - if failure in self.failingTests: - unexpectedPasses.append(failure) - - f = open(rootDir + "/TestExpectations") - lines = f.read().splitlines() - oldLen = len(lines) - for result in unexpectedPasses: - expectationLine = result - try: - lines.remove(expectationLine) - except ValueError: - pass - - f.close() - if len(lines) != oldLen: - f = open(rootDir + "/TestExpectations", "w") - f.write("\n".join(lines)) - f.close() - print "Changes to TestExpectations written!" - - -if not os.path.exists(EXCLUDED_FILENAME): - print "Cannot generate (JSON) test262 tests without a file," + \ - " %s, showing which tests have been disabled!" % EXCLUDED_FILENAME - sys.exit(1) -EXCLUDE_LIST = xml.dom.minidom.parse(EXCLUDED_FILENAME) -EXCLUDE_LIST = EXCLUDE_LIST.getElementsByTagName("test") -EXCLUDE_LIST = [x.getAttribute("id") for x in EXCLUDE_LIST] - - -def BuildOptions(): - result = optparse.OptionParser() - result.add_option("--command", default="qmljs", help="The command-line to run") - result.add_option("--tests", default=path.abspath(rootDir + '/test262'), - help="Path to the tests") - result.add_option("--cat", default=False, action="store_true", - help="Print packaged test code that would be run") - result.add_option("--summary", default=True, action="store_true", - help="Print summary after running tests") - result.add_option("--full-summary", default=False, action="store_true", - help="Print summary and test output after running tests") - result.add_option("--strict_only", default=False, action="store_true", - help="Test only strict mode") - result.add_option("--non_strict_only", default=False, action="store_true", - help="Test only non-strict mode") - result.add_option("--parallel", default=False, action="store_true", - help="Run tests in parallel") - result.add_option("--with-test-expectations", default=False, action="store_true", - help="Parse TestExpectations to deal with tests known to fail") - result.add_option("--update-expectations", default=False, action="store_true", - help="Update test expectations fail when a test passes that was expected to fail") - # TODO: Once enough tests are made strict compat, change the default - # to "both" - result.add_option("--unmarked_default", default="non_strict", - help="default mode for tests of unspecified strictness") - return result - - -def ValidateOptions(options): - if not options.command: - ReportError("A --command must be specified.") - if not path.exists(options.tests): - ReportError("Couldn't find test path '%s'" % options.tests) - - -placeHolderPattern = re.compile(r"\{\{(\w+)\}\}") - - -def IsWindows(): - p = platform.system() - return (p == 'Windows') or (p == 'Microsoft') - - -class TempFile(object): - - def __init__(self, suffix="", prefix="tmp", text=False): - self.suffix = suffix - self.prefix = prefix - self.text = text - self.fd = None - self.name = None - self.is_closed = False - self.Open() - - def Open(self): - (self.fd, self.name) = tempfile.mkstemp( - suffix = self.suffix, - prefix = self.prefix, - text = self.text) - - def Write(self, str): - os.write(self.fd, str) - - def Read(self): - f = file(self.name) - result = f.read() - f.close() - return result - - def Close(self): - if not self.is_closed: - self.is_closed = True - os.close(self.fd) - - def Dispose(self): - try: - self.Close() - os.unlink(self.name) - except OSError, e: - logging.error("Error disposing temp file: %s", str(e)) - - -class TestResult(object): - - def __init__(self, exit_code, stdout, stderr, case): - self.exit_code = exit_code - self.stdout = stdout - self.stderr = stderr - self.case = case - - def ReportOutcome(self, long_format): - name = self.case.GetName() - mode = self.case.GetMode() - if self.HasUnexpectedOutcome(): - if self.case.IsNegative(): - print "=== %s was expected to fail in %s, but didn't ===" % (name, mode) - else: - if long_format: - print "=== %s failed in %s ===" % (name, mode) - else: - print "%s in %s: " % (name, mode) - out = self.stdout.strip() - if len(out) > 0: - print "--- output ---" - print out - err = self.stderr.strip() - if len(err) > 0: - print "--- errors ---" - print err - if long_format: - print "===" - elif self.case.IsNegative(): - print "%s failed in %s as expected" % (name, mode) - else: - print "%s passed in %s" % (name, mode) - - def HasFailed(self): - return self.exit_code != 0 - - def HasUnexpectedOutcome(self): - if self.case.IsNegative(): - return not self.HasFailed() - else: - return self.HasFailed() - - -class TestCase(object): - - def __init__(self, suite, name, full_path, strict_mode): - self.suite = suite - self.name = name - self.full_path = full_path - self.strict_mode = strict_mode - f = open(self.full_path) - self.contents = f.read() - f.close() - testRecord = parseTestRecord(self.contents, name) - self.test = testRecord["test"] - if 'features' in testRecord: - self.features = testRecord["features"]; - else: - self.features = [] - del testRecord["test"] - del testRecord["header"] - self.testRecord = testRecord; - - - def GetName(self): - return self.name - - def GetMode(self): - if self.strict_mode: - return "strict mode" - else: - return "non-strict mode" - - def GetPath(self): - return self.name - - def NegateResult(self): - if self.IsNegative(): - del self.testRecord['negative'] - else: - self.testRecord['negative'] = "Some failure"; - - def IsNegative(self): - return 'negative' in self.testRecord - - def IsOnlyStrict(self): - return 'onlyStrict' in self.testRecord - - def IsNoStrict(self): - return 'noStrict' in self.testRecord - - def IsExperimental(self): - for f in self.features: - if excludedFeatures.count(f) >= 1: - return True; - return False - - def GetSource(self): - # "var testDescrip = " + str(self.testRecord) + ';\n\n' + \ - source = self.suite.GetInclude("assert.js") + \ - self.suite.GetInclude("sta.js") + \ - self.test + '\n' - if 'includes' in self.testRecord: - for inc in self.testRecord['includes']: - source += self.suite.GetInclude(inc); - - if self.strict_mode: - source = '"use strict";\nvar strict_mode = true;\n' + source - else: - source = "var strict_mode = false; \n" + source - return source - - def InstantiateTemplate(self, template, params): - def GetParameter(match): - key = match.group(1) - return params.get(key, match.group(0)) - return placeHolderPattern.sub(GetParameter, template) - - def Execute(self, command): - if IsWindows(): - args = '%s' % command - else: - args = command.split(" ") - stdout = TempFile(prefix="test262-out-") - stderr = TempFile(prefix="test262-err-") - try: - logging.info("exec: %s", str(args)) - process = subprocess.Popen( - args, - shell = IsWindows(), - stdout = stdout.fd, - stderr = stderr.fd - ) - code = process.wait() - out = stdout.Read() - err = stderr.Read() - finally: - stdout.Dispose() - stderr.Dispose() - return (code, out, err) - - def RunTestIn(self, command_template, tmp): - tmp.Write(self.GetSource()) - tmp.Close() - command = self.InstantiateTemplate(command_template, { - 'path': tmp.name - }) - (code, out, err) = self.Execute(command) - return TestResult(code, out, err, self) - - def Run(self, command_template): - tmp = TempFile(suffix=".js", prefix="test262-", text=True) - try: - result = self.RunTestIn(command_template, tmp) - finally: - tmp.Dispose() - return result - - def Print(self): - print self.GetSource() - - -class ProgressIndicator(object): - - def __init__(self, count): - self.count = count - self.succeeded = 0 - self.failed = 0 - self.failed_tests = [] - - def HasRun(self, result): - result.ReportOutcome(True) - if result.HasUnexpectedOutcome(): - self.failed += 1 - self.failed_tests.append(result) - else: - self.succeeded += 1 - - -def MakePlural(n): - if (n == 1): - return (n, "") - else: - return (n, "s") - - -class TestSuite(object): - - def __init__(self, root, strict_only, non_strict_only, unmarked_default, load_expectations): - # TODO: derive from packagerConfig.py - self.test_root = path.join(root, 'test') - self.lib_root = path.join(root, 'harness') - self.strict_only = strict_only - self.non_strict_only = non_strict_only - self.unmarked_default = unmarked_default - self.include_cache = { } - self.expectations = TestExpectations(load_expectations) - - def IsExcludedTest(self, path): - if path.startswith('annexB'): - return True; - if path.startswith('harness'): - return True; - if path.startswith('intl402'): - return True; - return False; - - def Validate(self): - if not path.exists(self.test_root): - ReportError("No test repository found") - if not path.exists(self.lib_root): - ReportError("No test library found") - - def IsHidden(self, path): - return path.startswith('.') or path == 'CVS' - - def IsTestCase(self, path): - return path.endswith('.js') - - def ShouldRun(self, rel_path, tests): - if len(tests) == 0: - return True - for test in tests: - if test in rel_path: - return True - return False - - def GetInclude(self, name): - if not name in self.include_cache: - static = path.join(self.lib_root, name) - if path.exists(static): - f = open(static) - contents = stripHeader(f.read()) - contents = re.sub(r'\r\n', '\n', contents) - self.include_cache[name] = contents + "\n" - f.close() - else: - ReportError("Can't find: " + static) - return self.include_cache[name] - - def EnumerateTests(self, tests): - logging.info("Listing tests in %s", self.test_root) - cases = [] - for root, dirs, files in os.walk(self.test_root): - for f in [x for x in dirs if self.IsHidden(x)]: - dirs.remove(f) - dirs.sort() - for f in sorted(files): - if self.IsTestCase(f): - full_path = path.join(root, f) - if full_path.startswith(self.test_root): - rel_path = full_path[len(self.test_root)+1:] - else: - logging.warning("Unexpected path %s", full_path) - rel_path = full_path - if self.ShouldRun(rel_path, tests) and not self.IsExcludedTest(rel_path): - basename = path.basename(full_path)[:-3] - name = rel_path.replace('.js', '') - if EXCLUDE_LIST.count(basename) >= 1 or self.expectations.testsToSkip.count(name) >= 1: - print 'Excluded: ' + rel_path - else: - if not self.non_strict_only: - strict_case = TestCase(self, name, full_path, True) - if self.expectations.failingTests.count(name) >= 1: - strict_case.NegateResult() - if not strict_case.IsNoStrict() and not strict_case.IsExperimental(): - if strict_case.IsOnlyStrict() or \ - self.unmarked_default in ['both', 'strict']: - cases.append(strict_case) - if not self.strict_only: - non_strict_case = TestCase(self, name, full_path, False) - if self.expectations.failingTests.count(name) >= 1: - non_strict_case.NegateResult() - if not non_strict_case.IsOnlyStrict() and not non_strict_case.IsExperimental(): - if non_strict_case.IsNoStrict() or \ - self.unmarked_default in ['both', 'non_strict']: - cases.append(non_strict_case) - logging.info("Done listing tests") - return cases - - def PrintSummary(self, progress): - print - print "=== Summary ===" - count = progress.count - succeeded = progress.succeeded - failed = progress.failed - print " - Ran %i test%s" % MakePlural(count) - if progress.failed == 0: - print " - All tests succeeded" - else: - percent = ((100.0 * succeeded) / count,) - print " - Passed %i test%s (%.1f%%)" % (MakePlural(succeeded) + percent) - percent = ((100.0 * failed) / count,) - print " - Failed %i test%s (%.1f%%)" % (MakePlural(failed) + percent) - positive = [c for c in progress.failed_tests if not c.case.IsNegative()] - negative = [c for c in progress.failed_tests if c.case.IsNegative()] - if len(positive) > 0: - print - print "Failed tests" - for result in positive: - print " %s in %s" % (result.case.GetName(), result.case.GetMode()) - if len(negative) > 0: - print - print "Expected to fail but passed ---" - for result in negative: - print " %s in %s" % (result.case.GetName(), result.case.GetMode()) - - def PrintFailureOutput(self, progress): - for result in progress.failed_tests: - print - result.ReportOutcome(False) - - def Run(self, command_template, tests, print_summary, full_summary, parallel, update_expectations): - if not "{{path}}" in command_template: - command_template += " {{path}}" - cases = self.EnumerateTests(tests) - if len(cases) == 0: - ReportError("No tests to run") - progress = ProgressIndicator(len(cases)) - - if parallel: - pool = multiprocessing.Pool(processes=multiprocessing.cpu_count(), initializer=initWorkerProcess) - results = pool.imap_unordered(func=runTestVarArgs, iterable=[(case, command_template) for case in cases], chunksize=multiprocessing.cpu_count() * 8) - for result in results: - progress.HasRun(result) - else: - for case in cases: - result = case.Run(command_template) - progress.HasRun(result) - if print_summary: - self.PrintSummary(progress) - if full_summary: - self.PrintFailureOutput(progress) - else: - print - print "Use --full-summary to see output from failed tests" - print - if update_expectations: - self.expectations.update(progress) - return progress.failed == 0 - - def Print(self, tests): - cases = self.EnumerateTests(tests) - if len(cases) > 0: - cases[0].Print() - - -def Main(): - # Uncomment the next line for more logging info. - #logging.basicConfig(level=logging.DEBUG) - # Some date tests rely on being run in pacific time and the USA's locale: - os.environ["TZ"] = "America/Los_Angeles" # it *matters* that this is (7m8s) *East* of PST's nominal meridian ! - os.environ["LANG"] = "en_US.UTF-8" - os.environ["LC_TIME"] = "en_US.UTF-8" - parser = BuildOptions() - (options, args) = parser.parse_args() - ValidateOptions(options) - test_suite = TestSuite(options.tests, - options.strict_only, - options.non_strict_only, - options.unmarked_default, - options.with_test_expectations) - test_suite.Validate() - if options.cat: - test_suite.Print(args) - return 0 - else: - if test_suite.Run(options.command, args, - options.summary or options.full_summary, - options.full_summary, - options.parallel, - options.update_expectations): - return 0 - else: - return 1 - - -if __name__ == '__main__': - try: - sys.exit(Main()) - except Test262Error, e: - print "Error: %s" % e.message - sys.exit(1) diff --git a/tests/auto/qml/ecmascripttests/qjstest/test262runner.cpp b/tests/auto/qml/ecmascripttests/test262runner.cpp index fc09182f19..ff45b1b657 100644 --- a/tests/auto/qml/ecmascripttests/qjstest/test262runner.cpp +++ b/tests/auto/qml/ecmascripttests/test262runner.cpp @@ -1,22 +1,26 @@ // Copyright (C) 2016 The Qt Company Ltd. -// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0 +// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only #include "test262runner.h" -#include <qfile.h> +#include <qdebug.h> #include <qdir.h> #include <qdiriterator.h> -#include <qdebug.h> +#include <qfile.h> +#include <qjsondocument.h> +#include <qjsonobject.h> +#include <qlibraryinfo.h> #include <qprocess.h> #include <qtemporaryfile.h> +#include <qthread.h> -#include <private/qv4script_p.h> -#include "private/qv4globalobject_p.h" #include "private/qqmlbuiltinfunctions_p.h" #include "private/qv4arraybuffer_p.h" +#include "private/qv4globalobject_p.h" #include <QtCore/QLoggingCategory> +#include <private/qv4script_p.h> -#include "qrunnable.h" +using namespace Qt::StringLiterals; static const char *excludedFeatures[] = { "BigInt", @@ -72,7 +76,7 @@ static ReturnedValue method_detachArrayBuffer(const FunctionObject *f, const Val return Encode::null(); } -static void initD262(ExecutionEngine *e) +void initD262(ExecutionEngine *e) { Scope scope(e); ScopedObject d262(scope, e->newObject()); @@ -85,10 +89,7 @@ static void initD262(ExecutionEngine *e) } -QT_END_NAMESPACE - -Q_DECLARE_LOGGING_CATEGORY(lcJsTest); -Q_LOGGING_CATEGORY(lcJsTest, "qt.v4.ecma262.tests", QtWarningMsg); +Q_STATIC_LOGGING_CATEGORY(lcJsTest, "qt.v4.ecma262.tests", QtWarningMsg); Test262Runner::Test262Runner(const QString &command, const QString &dir, const QString &expectationsFile) : command(command), testDir(dir), expectationsFile(expectationsFile) @@ -99,7 +100,8 @@ Test262Runner::Test262Runner(const QString &command, const QString &dir, const Q Test262Runner::~Test262Runner() { - delete threadPool; + if (threadPool) + delete threadPool; } void Test262Runner::cat() @@ -113,18 +115,282 @@ void Test262Runner::cat() printf("%s", data.content.constData()); } +void Test262Runner::assignTaskOrTerminate(int processIndex) +{ + if (tasks.isEmpty()) { + sendDone(processIndex); + return; + } + + currentTasks[processIndex] = tasks.dequeue(); + TestData &task = currentTasks[processIndex]; + + // Sloppy run + maybe strict run later + if (task.runInSloppyMode) { + if (task.runInStrictMode) + task.stillNeedStrictRun = true; + assignSloppy(processIndex); + return; + } + + // Only strict run + if (task.runInStrictMode) { + assignStrict(processIndex); + return; + } + + // TODO: Start a timer for timeouts? +} + +void Test262Runner::assignSloppy(int processIndex) +{ + QProcess &p = *processes[processIndex]; + TestData &task = currentTasks[processIndex]; + + QJsonObject json; + json.insert("mode", "sloppy"); + json.insert("testData", QString::fromUtf8(task.content)); + json.insert("runAsModule", false); + json.insert("testCasePath", ""); + json.insert("harnessForModules", ""); + p.write(QJsonDocument(json).toJson(QJsonDocument::Compact)); + p.write("\r\n"); +} + +void Test262Runner::assignStrict(int processIndex) +{ + QProcess &p = *processes[processIndex]; + TestData &task = currentTasks[processIndex]; + + QJsonObject json; + json.insert("mode", "strict"); + QString strictContent = "'use strict';\n" + QString::fromUtf8(task.content); + json.insert("testData", strictContent); + json.insert("runAsModule", task.runAsModuleCode); + json.insert("testCasePath", QFileInfo(testDir + "/test/" + task.test).absoluteFilePath()); + json.insert("harnessForModules", QString::fromUtf8(task.harness)); + p.write(QJsonDocument(json).toJson(QJsonDocument::Compact)); + p.write("\r\n"); +} + +void Test262Runner::sendDone(int processIndex) +{ + QProcess &p = *processes[processIndex]; + + QJsonObject json; + json.insert("done", true); + p.write(QJsonDocument(json).toJson(QJsonDocument::Compact)); + p.write("\r\n"); +} + +void Test262Runner::createProcesses() +{ + const int processCount = QThread::idealThreadCount(); + qDebug() << "Running in parallel with" << processCount << "processes"; + for (int i = 0; i < processCount; ++i) { + processes.emplace_back(std::make_unique<QProcess>()); + QProcess &p = *processes[i]; + QProcess::connect(&p, &QProcess::started, this, [&, i]() { + assignTaskOrTerminate(i); + }); + + QProcess::connect(&p, &QIODevice::readyRead, this, [&, i]() { + QProcess &p = *processes[i]; + QString output; + while (output.isEmpty()) + output = p.readLine(); + QJsonDocument response = QJsonDocument::fromJson(output.toUtf8()); + + TestData &testData(currentTasks[i]); + auto mode = response["mode"].toString(); + auto state = TestCase::State(response["resultState"].toInt(int(TestCase::State::Fails))); + auto errorMessage = response["resultErrorMessage"].toString(); + + auto &result = mode == "strict" ? testData.strictResult : testData.sloppyResult; + result = TestCase::Result(state, errorMessage); + if (testData.negative) + result.negateResult(); + + if (testData.stillNeedStrictRun) { + testData.stillNeedStrictRun = false; + assignStrict(i); + } else { + addResult(testData); + assignTaskOrTerminate(i); + } + }); + + QObject::connect(&p, &QProcess::finished, this, + [this, processCount, i](int exitCode, QProcess::ExitStatus status) { + if (status != QProcess::NormalExit || exitCode != 0) { + TestData &testData(currentTasks[i]); + + auto &result = testData.stillNeedStrictRun + ? testData.sloppyResult + : testData.strictResult; + result = TestCase::Result( + TestCase::Crashes, + QStringLiteral("Process %1 of %2 exited with a non-normal status") + .arg(i).arg(processCount - 1)); + + addResult(testData); + } + + --runningCount; + if (runningCount == 0) + loop.exit(); + }); + + p.setProgram(QCoreApplication::applicationFilePath()); + QProcessEnvironment env = QProcessEnvironment::systemEnvironment(); + env.insert(u"runnerProcess"_s, u"1"_s); + p.setProcessEnvironment(env); + ++runningCount; + p.start(); + } +} + +class SingleTest : public QRunnable +{ +public: + SingleTest(Test262Runner *runner, const TestData &data) + : runner(runner), data(data) + {} + void run() override; + + Test262Runner *runner; + TestData data; +}; + +TestCase::Result getTestExecutionResult(QV4::ExecutionEngine &vm) +{ + TestCase::State state; + QString errorMessage; + if (vm.hasException) { + state = TestCase::State::Fails; + QV4::Scope scope(&vm); + QV4::ScopedValue val(scope, vm.catchException()); + errorMessage = val->toQString(); + } else { + state = TestCase::State::Passes; + } + return TestCase::Result(state, errorMessage); +} + +void SingleTest::run() +{ + if (data.runInSloppyMode) { + QV4::ExecutionEngine vm; + Test262Runner::executeTest(vm, data.content); + TestCase::Result ok = getTestExecutionResult(vm); + + if (data.negative) + ok.negateResult(); + + data.sloppyResult = ok; + } else { + data.sloppyResult = TestCase::Result(TestCase::Skipped); + } + if (data.runInStrictMode) { + QString testCasePath = QFileInfo(runner->testDirectory() + "/test/" + data.test).absoluteFilePath(); + QByteArray c = "'use strict';\n" + data.content; + + QV4::ExecutionEngine vm; + Test262Runner::executeTest(vm, c, testCasePath, data.harness, data.runAsModuleCode); + TestCase::Result ok = getTestExecutionResult(vm); + + if (data.negative) + ok.negateResult(); + + data.strictResult = ok; + } else { + data.strictResult = TestCase::Result(TestCase::Skipped); + } + runner->addResult(data); +} + +void Test262Runner::executeTest(QV4::ExecutionEngine &vm, const QString &testData, + const QString &testCasePath, const QString &harnessForModules, + bool runAsModule) +{ + QV4::Scope scope(&vm); + QV4::GlobalExtensions::init(vm.globalObject, + QJSEngine::ConsoleExtension | QJSEngine::GarbageCollectionExtension); + QV4::initD262(&vm); + + if (runAsModule) { + const QUrl rootModuleUrl = QUrl::fromLocalFile(testCasePath); + // inject all modules with the harness + QVector<QUrl> modulesToLoad = { rootModuleUrl }; + while (!modulesToLoad.isEmpty()) { + QUrl url = modulesToLoad.takeFirst(); + QQmlRefPointer<QV4::ExecutableCompilationUnit> module; + + QFile f(url.toLocalFile()); + if (f.open(QIODevice::ReadOnly)) { + QByteArray content = harnessForModules.toLocal8Bit() + f.readAll(); + module = vm.compileModule(url.toString(), + QString::fromUtf8(content.constData(),content.size()), + QFileInfo(f).lastModified()); + if (vm.hasException) + break; + } else { + vm.throwError(QStringLiteral("Could not load module")); + break; + } + + const QStringList moduleRequests = module->baseCompilationUnit()->moduleRequests(); + for (const QString &request: moduleRequests) { + const QUrl absoluteRequest = module->finalUrl().resolved(QUrl(request)); + const auto module = vm.moduleForUrl(absoluteRequest); + if (module.native == nullptr && module.compiled == nullptr) + modulesToLoad << absoluteRequest; + } + } + + if (!vm.hasException) { + const auto rootModule = vm.loadModule(rootModuleUrl); + if (rootModule.compiled && rootModule.compiled->instantiate()) + rootModule.compiled->evaluate(); + } + } else { + QV4::ScopedContext ctx(scope, vm.rootContext()); + + QV4::Script script(ctx, QV4::Compiler::ContextType::Global, testData); + script.parse(); + + if (!vm.hasException) + script.run(); + } +} + +void Test262Runner::runWithThreadPool() +{ + threadPool = new QThreadPool(); + threadPool->setStackSize(16*1024*1024); + qDebug() << "Running in parallel with" << QThread::idealThreadCount() << "threads"; + + for (const TestCase &testCase : std::as_const(testCases)) { + TestData testData = getTestData(testCase); + if (testData.isExcluded || testData.async) + continue; + SingleTest *test = new SingleTest(this, testData); + threadPool->start(test); + } + + while (!threadPool->waitForDone(10'000)) { + if (lcJsTest().isEnabled(QtDebugMsg)) { + // heartbeat, only needed when there is no other debug output + qDebug("test262: in progress..."); + } + } +} + bool Test262Runner::run() { if (!loadTests()) return false; - if (flags & Parallel) { - threadPool = new QThreadPool; - threadPool->setStackSize(16*1024*1024); - if (flags & Verbose) - qDebug() << "Running in parallel with" << QThread::idealThreadCount() << "threads."; - } - if (flags & ForceJIT) qputenv("QV4_JIT_CALL_THRESHOLD", QByteArray("0")); else if (flags & ForceBytecode) @@ -136,14 +402,24 @@ bool Test262Runner::run() for (auto it = testCases.constBegin(); it != testCases.constEnd(); ++it) { auto c = it.value(); if (!c.skipTestCase) { - int result = runSingleTest(c); - if (result == -2) - return false; + TestData data = getTestData(c); + if (data.isExcluded || data.async) + continue; + + tasks.append(data); } } - if (threadPool) - threadPool->waitForDone(); + if (command.isEmpty()) { +#if QT_CONFIG(process) + createProcesses(); + loop.exec(); +#else + runWithThreadPool(); +#endif + } else { + runAsExternalTests(); + } const bool testsOk = report(); @@ -167,7 +443,7 @@ bool Test262Runner::report() if (c.strictResult.state == c.strictExpectation.state && c.sloppyResult.state == c.sloppyExpectation.state) continue; - auto report = [&](TestCase::Result expected, TestCase::Result result, const char *s) { + auto report = [&](const TestCase::Result &expected, const TestCase::Result &result, const char *s) { if (result.state == TestCase::Crashes) crashes << (it.key() + " crashed in " + s + " mode"); if (result.state == TestCase::Fails && expected.state == TestCase::Passes) @@ -420,7 +696,10 @@ void Test262Runner::updateTestExpectations() } QTemporaryFile updatedExpectations; - updatedExpectations.open(); + if (!updatedExpectations.open()) { + qFatal("Could not open temporary TestExpectations file: %s", + qPrintable(updatedExpectations.errorString())); + } while (!file.atEnd()) { QByteArray originalLine = file.readLine(); @@ -458,9 +737,12 @@ void Test262Runner::writeTestExpectations() QFile file(expectationsFile); QTemporaryFile expectations; - expectations.open(); + if (!expectations.open()) { + qFatal("Could not open temporary TestExpectations file: %s", + qPrintable(expectations.errorString())); + } - for (auto c : std::as_const(testCases)) { + for (const auto &c : std::as_const(testCases)) { TestExpectationLine line = TestExpectationLine::fromTestCase(c); expectations.write(line.toLine()); } @@ -474,175 +756,50 @@ void Test262Runner::writeTestExpectations() qWarning() << "Could not write new TestExpectations file at" << expectationsFile; } -static TestCase::Result executeTest(const QByteArray &data, bool runAsModule = false, - const QString &testCasePath = QString(), - const QByteArray &harnessForModules = QByteArray()) +void Test262Runner::runAsExternalTests() { - QString testData = QString::fromUtf8(data.constData(), data.size()); - - QV4::ExecutionEngine vm; - - QV4::Scope scope(&vm); - - QV4::GlobalExtensions::init(vm.globalObject, QJSEngine::ConsoleExtension | QJSEngine::GarbageCollectionExtension); - QV4::initD262(&vm); - - if (runAsModule) { - const QUrl rootModuleUrl = QUrl::fromLocalFile(testCasePath); - // inject all modules with the harness - QVector<QUrl> modulesToLoad = { rootModuleUrl }; - while (!modulesToLoad.isEmpty()) { - QUrl url = modulesToLoad.takeFirst(); - QQmlRefPointer<QV4::ExecutableCompilationUnit> module; - - QFile f(url.toLocalFile()); - if (f.open(QIODevice::ReadOnly)) { - QByteArray content = harnessForModules + f.readAll(); - module = vm.compileModule(url.toString(), QString::fromUtf8(content.constData(), content.size()), QFileInfo(f).lastModified()); - if (vm.hasException) - break; - vm.injectCompiledModule(module); - } else { - vm.throwError(QStringLiteral("Could not load module")); - break; + for (TestData &testData : tasks) { + auto runTest = [&] (const char *header, TestCase::Result *result) { + QTemporaryFile tempFile; + if (!tempFile.open()) { + qFatal("Could not open temporary test data file: %s", + qPrintable(tempFile.errorString())); } - - for (const QString &request: module->moduleRequests()) { - const QUrl absoluteRequest = module->finalUrl().resolved(QUrl(request)); - const auto module = vm.moduleForUrl(absoluteRequest); - if (module.native == nullptr && module.compiled == nullptr) - modulesToLoad << absoluteRequest; + tempFile.write(header); + tempFile.write(testData.content); + tempFile.close(); + + QProcess process; + process.start(command, QStringList(tempFile.fileName())); + if (!process.waitForFinished(-1) || process.error() == QProcess::FailedToStart) { + qWarning() << "Could not execute" << command; + *result = TestCase::Result(TestCase::Crashes); } - } - - if (!vm.hasException) { - const auto rootModule = vm.loadModule(rootModuleUrl); - if (rootModule.compiled && rootModule.compiled->instantiate(&vm)) - rootModule.compiled->evaluate(); - } - } else { - QV4::ScopedContext ctx(scope, vm.rootContext()); - - QV4::Script script(ctx, QV4::Compiler::ContextType::Global, testData); - script.parse(); - - if (!vm.hasException) - script.run(); - } - - if (vm.hasException) { - QV4::Scope scope(&vm); - QV4::ScopedValue val(scope, vm.catchException()); - return TestCase::Result(TestCase::Fails, val->toQString()); - } - return TestCase::Result(TestCase::Passes); -} - -class SingleTest : public QRunnable -{ -public: - SingleTest(Test262Runner *runner, const TestData &data) - : runner(runner), data(data) - { - command = runner->command; - } - void run() override; - - void runExternalTest(); - - QString command; - Test262Runner *runner; - TestData data; -}; - -void SingleTest::run() -{ - if (!command.isEmpty()) { - runExternalTest(); - return; - } - - if (data.runInSloppyMode) { - TestCase::Result ok = ::executeTest(data.content); - if (data.negative) - ok.negateResult(); - - data.sloppyResult = ok; - } else { - data.sloppyResult = TestCase::Result(TestCase::Skipped); - } - if (data.runInStrictMode) { - const QString testCasePath = QFileInfo(runner->testDir + "/test/" + data.test).absoluteFilePath(); - QByteArray c = "'use strict';\n" + data.content; - TestCase::Result ok = ::executeTest(c, data.runAsModuleCode, testCasePath, data.harness); - if (data.negative) - ok.negateResult(); - - data.strictResult = ok; - } else { - data.strictResult = TestCase::Result(TestCase::Skipped); - } - runner->addResult(data); -} - -void SingleTest::runExternalTest() -{ - auto runTest = [this] (const char *header, TestCase::Result *result) { - QTemporaryFile tempFile; - tempFile.open(); - tempFile.write(header); - tempFile.write(data.content); - tempFile.close(); - - QProcess process; -// if (flags & Verbose) -// process.setReadChannelMode(QProcess::ForwardedChannels); - - process.start(command, QStringList(tempFile.fileName())); - if (!process.waitForFinished(-1) || process.error() == QProcess::FailedToStart) { - qWarning() << "Could not execute" << command; - *result = TestCase::Result(TestCase::Crashes); - } - if (process.exitStatus() != QProcess::NormalExit) { - *result = TestCase::Result(TestCase::Crashes); - } - bool ok = (process.exitCode() == EXIT_SUCCESS); - if (data.negative) - ok = !ok; - *result = ok ? TestCase::Result(TestCase::Passes) - : TestCase::Result(TestCase::Fails, process.readAllStandardError()); - }; - - if (data.runInSloppyMode) - runTest("", &data.sloppyResult); - if (data.runInStrictMode) - runTest("'use strict';\n", &data.strictResult); - - runner->addResult(data); -} - -int Test262Runner::runSingleTest(TestCase testCase) -{ - TestData data = getTestData(testCase); -// qDebug() << "starting test" << data.test; + if (process.exitStatus() != QProcess::NormalExit) { + *result = TestCase::Result(TestCase::Crashes); + } + bool ok = (process.exitCode() == EXIT_SUCCESS); + if (testData.negative) + ok = !ok; + *result = ok ? TestCase::Result(TestCase::Passes) + : TestCase::Result(TestCase::Fails, process.readAllStandardError()); + }; - if (data.isExcluded || data.async) - return 0; + if (testData.runInSloppyMode) + runTest("", &testData.sloppyResult); + if (testData.runInStrictMode) + runTest("'use strict';\n", &testData.strictResult); - if (threadPool) { - SingleTest *test = new SingleTest(this, data); - threadPool->start(test); - return 0; + addResult(testData); } - SingleTest test(this, data); - test.run(); - return 0; } void Test262Runner::addResult(TestCase result) { { +#if !QT_CONFIG(process) QMutexLocker locker(&mutex); +#endif Q_ASSERT(result.strictExpectation.state == testCases[result.test].strictExpectation.state); Q_ASSERT(result.sloppyExpectation.state == testCases[result.test].sloppyExpectation.state); testCases[result.test] = result; @@ -847,3 +1004,5 @@ QByteArray Test262Runner::harness(const QByteArray &name) harnessFiles.insert(name, content); return content; } + +QT_END_NAMESPACE diff --git a/tests/auto/qml/ecmascripttests/qjstest/test262runner.h b/tests/auto/qml/ecmascripttests/test262runner.h index e2bf26296f..a989ac5188 100644 --- a/tests/auto/qml/ecmascripttests/qjstest/test262runner.h +++ b/tests/auto/qml/ecmascripttests/test262runner.h @@ -1,14 +1,24 @@ // Copyright (C) 2016 The Qt Company Ltd. -// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0 +// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only + #ifndef TEST262RUNNER_H #define TEST262RUNNER_H -#include <qstring.h> -#include <qstringlist.h> -#include <qset.h> + +#include <qeventloop.h> #include <qmap.h> #include <qmutex.h> +#include <qprocess.h> +#include <qqueue.h> +#include <qset.h> #include <qthreadpool.h> +QT_BEGIN_NAMESPACE + +namespace QV4 { +struct ExecutionEngine; +void initD262(ExecutionEngine *e); +} + struct TestCase { TestCase() = default; TestCase(const QString &test) @@ -40,16 +50,18 @@ struct TestCase { } }; - bool skipTestCase = false; Result strictExpectation = Result(Passes); Result sloppyExpectation = Result(Passes); Result strictResult = Result(Skipped); Result sloppyResult = Result(Skipped); + bool skipTestCase = false; + bool stillNeedStrictRun = false; QString test; }; struct TestData : TestCase { + TestData() = default; TestData(const TestCase &testCase) : TestCase(testCase) {} // flags @@ -67,8 +79,12 @@ struct TestData : TestCase { QByteArray content; }; -class Test262Runner +class SingleTest; + +class Test262Runner : public QObject { + Q_OBJECT + public: Test262Runner(const QString &command, const QString &testDir, const QString &expectationsFile); ~Test262Runner(); @@ -95,6 +111,12 @@ public: bool run(); bool report(); + QString testDirectory() const { return testDir; } + + static void executeTest(QV4::ExecutionEngine &vm, const QString &testData, + const QString &testCasePath = QString(), + const QString &harnessForModules = QString(), + bool runAsModule = false); private: friend class SingleTest; @@ -102,7 +124,16 @@ private: void loadTestExpectations(); void updateTestExpectations(); void writeTestExpectations(); - int runSingleTest(TestCase testCase); + + void runWithThreadPool(); + + void runAsExternalTests(); + void createProcesses(); + void assignTaskOrTerminate(int processIndex); + void assignSloppy(int processIndex); + void assignStrict(int processIndex); + void sendDone(int processIndex); + QString readUntilNull(QProcess &p); TestData getTestData(const TestCase &testCase); void parseYaml(const QByteArray &content, TestData *data); @@ -116,14 +147,21 @@ private: QString expectationsFile; int flags = 0; - QMutex mutex; QString filter; QMap<QString, TestCase> testCases; QHash<QByteArray, QByteArray> harnessFiles; QThreadPool *threadPool = nullptr; + QMutex mutex; + + QEventLoop loop; + std::vector<std::unique_ptr<QProcess>> processes; + int runningCount = 0; + QQueue<TestData> tasks; + QHash<int, TestData> currentTasks; }; +QT_END_NAMESPACE #endif diff --git a/tests/auto/qml/ecmascripttests/tst_ecmascripttests.cpp b/tests/auto/qml/ecmascripttests/tst_ecmascripttests.cpp index 03c5b18474..11d724f795 100644 --- a/tests/auto/qml/ecmascripttests/tst_ecmascripttests.cpp +++ b/tests/auto/qml/ecmascripttests/tst_ecmascripttests.cpp @@ -1,11 +1,22 @@ // Copyright (C) 2017 The Qt Company Ltd. -// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only WITH Qt-GPL-exception-1.0 +// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR GPL-3.0-only -#include <QtTest/QtTest> -#include <QProcess> +#include <QFileInfo> +#include <QJSEngine> +#include <QJsonDocument> +#include <QJsonObject> #include <QLibraryInfo> -#include <qjstest/test262runner.h> +#include <QProcess> #include <QtQuickTestUtils/private/qmlutils_p.h> +#include <QtTest/QtTest> + +#include "test262runner.h" +#include "private/qqmlbuiltinfunctions_p.h" +#include "private/qv4arraybuffer_p.h" +#include "private/qv4globalobject_p.h" +#include "private/qv4script_p.h" + +#include <stdio.h> class tst_EcmaScriptTests : public QQmlDataTest { @@ -40,7 +51,7 @@ static inline bool isNoise(QByteArrayView name) #ifdef QT_V4_WANT_ES262_WARNINGS return false; #else - const QByteArrayView noisy("qt.qml.compiler"); + const QByteArrayView noisy("qt.qml.usedbeforedeclared"); return name.startsWith(noisy) && (name.size() <= noisy.size() || name[noisy.size()] == '.'); #endif } @@ -59,7 +70,7 @@ void tst_EcmaScriptTests::filterCategories(QLoggingCategory *category) void tst_EcmaScriptTests::initTestCase() { QQmlDataTest::initTestCase(); - /* Suppress lcQmlCompiler's "qt.qml.compiler" warnings; we aren't in a + /* Suppress lcQmlCompiler's "qt.qml.usedbeforedeclared" warnings; we aren't in a position to fix test262's many warnings and they flood messages so we didn't get to see actual failures unless we passed -maxwarnings with a huge value on the command-line (resulting in huge log output). @@ -94,7 +105,74 @@ void tst_EcmaScriptTests::runJitted() QVERIFY(result); } -QTEST_GUILESS_MAIN(tst_EcmaScriptTests) +//// v RUNNER PROCESS MODE v //// -#include "tst_ecmascripttests.moc" +void readInput(bool &done, QString &mode, QString &testData, QString &testCasePath, + QString &harnessForModules, bool &runAsModule) +{ + QTextStream in(stdin); + QString input; + while (input.isEmpty()) + input = in.readLine(); + + QJsonDocument json = QJsonDocument::fromJson(input.toUtf8()); + done = json["done"].toBool(false); + mode = json["mode"].toString(); + testData = json["testData"].toString(); + testCasePath = json["testCasePath"].toString(); + harnessForModules = json["harnessForModules"].toString(); + runAsModule = json["runAsModule"].toBool(false); +} + +void printResult(QV4::ExecutionEngine &vm, const QString &mode) +{ + QJsonObject result; + result.insert("mode", mode); + if (vm.hasException) { + QV4::Scope scope(&vm); + QV4::ScopedValue val(scope, vm.catchException()); + + result.insert("resultState", int(TestCase::State::Fails)); + result.insert("resultErrorMessage", val->toQString()); + } else { + result.insert("resultState", int(TestCase::State::Passes)); + } + QTextStream(stdout) << QJsonDocument(result).toJson(QJsonDocument::Compact) << "\r\n"; +} + +void doRunnerProcess() +{ + bool done = false; + QString mode; + QString testData; + QString testCasePath; + QString harnessForModules; + bool runAsModule = false; + + while (!done) { + QV4::ExecutionEngine vm; + readInput(done, mode, testData, testCasePath, harnessForModules, runAsModule); + if (done) + break; + Test262Runner::executeTest(vm, testData, testCasePath, harnessForModules, runAsModule); + printResult(vm, mode); + } +} + +//// ^ RUNNER PROCESS MODE ^ //// + +int main(int argc, char *argv[]) +{ + QCoreApplication app(argc, argv); + + if (qEnvironmentVariableIntValue("runnerProcess") == 1) { + doRunnerProcess(); + } else { + tst_EcmaScriptTests tc; + QTEST_SET_MAIN_SOURCE_PATH + return QTest::qExec(&tc, argc, argv); + } +} + +#include "tst_ecmascripttests.moc" |