diff options
Diffstat (limited to 'tests/auto/qml/ecmascripttests/test262.py')
-rwxr-xr-x | tests/auto/qml/ecmascripttests/test262.py | 88 |
1 files changed, 66 insertions, 22 deletions
diff --git a/tests/auto/qml/ecmascripttests/test262.py b/tests/auto/qml/ecmascripttests/test262.py index e055c7a8e7..59e642e575 100755 --- a/tests/auto/qml/ecmascripttests/test262.py +++ b/tests/auto/qml/ecmascripttests/test262.py @@ -61,6 +61,32 @@ from parseTestRecord import parseTestRecord, stripHeader from packagerConfig import * +# excluded features that are still experimental and not part of any official standard +# see also the features.txt file in test262/ +excludedFeatures = [ + "BigInt", + "class-fields-public", + "class-fields-private", + "Promise.prototype.finally", + "async-iteration", + "Symbol.asyncIterator", + "object-rest", + "object-spread", + "optional-catch-binding", + "regexp-dotall", + "regexp-lookbehind", + "regexp-named-groups", + "regexp-unicode-property-escapes", + "Atomics", + "SharedArrayBuffer", + "Array.prototype.flatten", + "Array.prototype.flatMap", + "string-trimming", + "String.prototype.trimEnd", + "String.prototype.trimStart", + "numeric-separator-literal" +] + # ############# Helpers needed for parallel multi-process test execution ############ def runTest(case, args): @@ -95,19 +121,19 @@ class TestExpectations: continue record = line.split() if len(record) == 1: - self.testsToSkip.append(record[0]) + self.failingTests.append(record[0]) else: test = record[0] expectation = record[1] - if expectation == "failing": - self.failingTests.append(test) + if expectation == "skip": + self.testsToSkip.append(test) f.close() def update(self, progress): - unexpectedPasses = [c.case.name[-1] for c in progress.failed_tests if c.case.IsNegative()] + unexpectedPasses = [c.case.name for c in progress.failed_tests if c.case.IsNegative()] # If a test fails that we expected to fail, then it actually passed unexpectedly. - failures = [c.case.name[-1] for c in progress.failed_tests if not c.case.IsNegative()] + failures = [c.case.name for c in progress.failed_tests if not c.case.IsNegative()] for failure in failures: if failure in self.failingTests: unexpectedPasses.append(failure) @@ -116,7 +142,7 @@ class TestExpectations: lines = f.read().splitlines() oldLen = len(lines) for result in unexpectedPasses: - expectationLine = result + " failing" + expectationLine = result try: lines.remove(expectationLine) except ValueError: @@ -277,14 +303,17 @@ class TestCase(object): f.close() testRecord = parseTestRecord(self.contents, name) self.test = testRecord["test"] + if 'features' in testRecord: + self.features = testRecord["features"]; + else: + self.features = [] del testRecord["test"] del testRecord["header"] - del testRecord["commentary"] self.testRecord = testRecord; def GetName(self): - return path.join(*self.name) + return self.name def GetMode(self): if self.strict_mode: @@ -310,14 +339,20 @@ class TestCase(object): def IsNoStrict(self): return 'noStrict' in self.testRecord + def IsExperimental(self): + for f in self.features: + if excludedFeatures.count(f) >= 1: + return True; + return False + def GetSource(self): # "var testDescrip = " + str(self.testRecord) + ';\n\n' + \ - source = self.suite.GetInclude("cth.js") + \ + source = self.suite.GetInclude("assert.js") + \ self.suite.GetInclude("sta.js") + \ - self.suite.GetInclude("ed.js") + \ - self.suite.GetInclude("testBuiltInObject.js") + \ - self.suite.GetInclude("testIntl.js") + \ self.test + '\n' + if 'includes' in self.testRecord: + for inc in self.testRecord['includes']: + source += self.suite.GetInclude(inc); if self.strict_mode: source = '"use strict";\nvar strict_mode = true;\n' + source @@ -403,14 +438,23 @@ class TestSuite(object): def __init__(self, root, strict_only, non_strict_only, unmarked_default, load_expectations): # TODO: derive from packagerConfig.py - self.test_root = path.join(root, 'test', 'suite') - self.lib_root = path.join(root, 'test', 'harness') + self.test_root = path.join(root, 'test') + self.lib_root = path.join(root, 'harness') self.strict_only = strict_only self.non_strict_only = non_strict_only self.unmarked_default = unmarked_default self.include_cache = { } self.expectations = TestExpectations(load_expectations) + def IsExcludedTest(self, path): + if path.startswith('annexB'): + return True; + if path.startswith('harness'): + return True; + if path.startswith('intl402'): + return True; + return False; + def Validate(self): if not path.exists(self.test_root): ReportError("No test repository found") @@ -459,25 +503,25 @@ class TestSuite(object): else: logging.warning("Unexpected path %s", full_path) rel_path = full_path - if self.ShouldRun(rel_path, tests) and not rel_path.startswith("intl402" + os.sep): + if self.ShouldRun(rel_path, tests) and not self.IsExcludedTest(rel_path): basename = path.basename(full_path)[:-3] - name = rel_path.split(path.sep)[:-1] + [basename] - if EXCLUDE_LIST.count(basename) >= 1 or self.expectations.testsToSkip.count(basename) >= 1: - print 'Excluded: ' + basename + name = rel_path.replace('.js', '') + if EXCLUDE_LIST.count(basename) >= 1 or self.expectations.testsToSkip.count(name) >= 1: + print 'Excluded: ' + rel_path else: if not self.non_strict_only: strict_case = TestCase(self, name, full_path, True) - if self.expectations.failingTests.count(basename) >= 1: + if self.expectations.failingTests.count(name) >= 1: strict_case.NegateResult() - if not strict_case.IsNoStrict(): + if not strict_case.IsNoStrict() and not strict_case.IsExperimental(): if strict_case.IsOnlyStrict() or \ self.unmarked_default in ['both', 'strict']: cases.append(strict_case) if not self.strict_only: non_strict_case = TestCase(self, name, full_path, False) - if self.expectations.failingTests.count(basename) >= 1: + if self.expectations.failingTests.count(name) >= 1: non_strict_case.NegateResult() - if not non_strict_case.IsOnlyStrict(): + if not non_strict_case.IsOnlyStrict() and not non_strict_case.IsExperimental(): if non_strict_case.IsNoStrict() or \ self.unmarked_default in ['both', 'non_strict']: cases.append(non_strict_case) |