aboutsummaryrefslogtreecommitdiffstats
path: root/tests/auto/qml/ecmascripttests
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@qt.io>2017-03-27 14:53:52 +0200
committerSimon Hausmann <simon.hausmann@qt.io>2017-03-27 14:53:52 +0200
commite321dc6bee3645e2492914f99fc30f8019e9383b (patch)
tree4e1c42ea70967e11bf6c4920397b2478bdc55e4c /tests/auto/qml/ecmascripttests
parentd1c43e44572f5b554467e5f1c2db4914b549b569 (diff)
parente2863c80fed06104eedfe83bce2737f2d24908a6 (diff)
Merge remote-tracking branch 'origin/5.9' into dev
Conflicts: src/qml/jsruntime/qv4functionobject_p.h Change-Id: I4bbed45f1fe02cf64df3c8a5f92f811e38e772f3
Diffstat (limited to 'tests/auto/qml/ecmascripttests')
-rw-r--r--tests/auto/qml/ecmascripttests/TestExpectations178
-rw-r--r--tests/auto/qml/ecmascripttests/ecmascripttests.pro20
m---------tests/auto/qml/ecmascripttests/test2620
-rwxr-xr-xtests/auto/qml/ecmascripttests/test262.py600
-rw-r--r--tests/auto/qml/ecmascripttests/tst_ecmascripttests.cpp77
5 files changed, 875 insertions, 0 deletions
diff --git a/tests/auto/qml/ecmascripttests/TestExpectations b/tests/auto/qml/ecmascripttests/TestExpectations
new file mode 100644
index 0000000000..27498de473
--- /dev/null
+++ b/tests/auto/qml/ecmascripttests/TestExpectations
@@ -0,0 +1,178 @@
+# wrong tests
+# uses octal number
+15.2.3.6-2-17-1 failing
+
+# these fail after the update to Unicode 6.3.
+# the reason is that u+180e changed type from whitespace to control
+S9.3.1_A2
+S9.3.1_A3_T1
+S9.3.1_A3_T2
+S15.1.2.2_A2_T10
+S15.1.2.3_A2_T10
+15.5.4.20-3-2
+15.5.4.20-3-3
+15.5.4.20-3-4
+15.5.4.20-3-5
+15.5.4.20-3-6
+
+10.4.3-1-106 failing
+11.2.3-3_3 failing
+S13_A15_T4 failing
+S15.4.4.3_A1_T1 failing
+S15.4.4.3_A3_T1 failing
+S15.5.4.11_A5_T1 failing
+S15.2.4.4_A14 failing
+
+# Function declarations in conditionals. We allow them, because the real
+# world requires them.
+Sbp_12.5_A9_T3 failing
+Sbp_12.6.1_A13_T3 failing
+Sbp_12.6.2_A13_T3 failing
+Sbp_12.6.4_A13_T3 failing
+
+# es6: function length attributes are configurable, wasn't in es5
+S15.1.2.2_A9.2 failing
+S15.1.3.1_A5.2 failing
+S15.1.3.2_A5.2 failing
+S15.1.3.3_A5.2 failing
+S15.1.2.3_A7.2 failing
+S15.1.2.4_A2.2 failing
+S15.1.2.5_A2.2 failing
+S15.1.3.4_A5.2 failing
+15.2.3.3-4-186 failing
+S15.2.4.2_A9 failing
+S15.2.4.3_A9 failing
+S15.2.4.4_A9 failing
+S15.2.4.5_A9 failing
+S15.2.4.6_A9 failing
+S15.2.4.7_A9 failing
+15.3.3.2-1 failing
+15.4.4.2_A4.2
+S15.3.4.2_A9 failing
+S15.3.4.3_A9 failing
+S15.3.4.4_A9 failing
+15.3.4.5-15-2 failing
+S15.4.4.2_A4.2 failing
+S15.4.4.3_A4.2 failing
+S15.4.4.4_A4.2 failing
+S15.4.4.5_A6.2 failing
+S15.4.4.6_A5.2 failing
+S15.4.4.7_A6.2 failing
+S15.4.4.8_A5.2 failing
+S15.4.4.9_A5.2 failing
+S15.4.4.10_A5.2 failing
+S15.4.4.11_A7.2 failing
+S15.4.4.12_A5.2 failing
+S15.4.4.13_A5.2 failing
+S15.5.4.10_A9 failing
+S15.5.4.11_A9 failing
+S15.5.4.12_A9 failing
+S15.5.4.13_A9 failing
+S15.5.4.14_A9 failing
+S15.5.4.15_A9 failing
+S15.5.4.16_A9 failing
+S15.5.4.17_A9 failing
+S15.5.4.18_A9 failing
+S15.5.4.19_A9 failing
+S15.5.4.4_A9 failing
+S15.5.4.5_A9 failing
+S15.5.4.6_A9 failing
+S15.5.4.7_A9 failing
+S15.5.4.8_A9 failing
+S15.5.4.9_A9 failing
+S15.9.4.2_A3_T2 failing
+S15.9.4.3_A3_T2 failing
+S15.9.5.2_A3_T2 failing
+S15.9.5.3_A3_T2 failing
+S15.9.5.4_A3_T2 failing
+S15.9.5.5_A3_T2 failing
+S15.9.5.1_A3_T2 failing
+S15.9.5.10_A3_T2 failing
+S15.9.5.11_A3_T2 failing
+S15.9.5.12_A3_T2 failing
+S15.9.5.13_A3_T2 failing
+S15.9.5.14_A3_T2 failing
+S15.9.5.15_A3_T2 failing
+S15.9.5.16_A3_T2 failing
+S15.9.5.17_A3_T2 failing
+S15.9.5.18_A3_T2 failing
+S15.9.5.19_A3_T2 failing
+S15.9.5.20_A3_T2 failing
+S15.9.5.21_A3_T2 failing
+S15.9.5.22_A3_T2 failing
+S15.9.5.23_A3_T2 failing
+S15.9.5.24_A3_T2 failing
+S15.9.5.25_A3_T2 failing
+S15.9.5.26_A3_T2 failing
+S15.9.5.27_A3_T2 failing
+S15.9.5.28_A3_T2 failing
+S15.9.5.29_A3_T2 failing
+S15.9.5.30_A3_T2 failing
+S15.9.5.31_A3_T2 failing
+S15.9.5.32_A3_T2 failing
+S15.9.5.33_A3_T2 failing
+S15.9.5.34_A3_T2 failing
+S15.9.5.35_A3_T2 failing
+S15.9.5.36_A3_T2 failing
+S15.9.5.37_A3_T2 failing
+S15.9.5.38_A3_T2 failing
+S15.9.5.39_A3_T2 failing
+S15.9.5.40_A3_T2 failing
+S15.9.5.41_A3_T2 failing
+S15.9.5.42_A3_T2 failing
+S15.9.5.6_A3_T2 failing
+S15.9.5.7_A3_T2 failing
+S15.9.5.8_A3_T2 failing
+S15.9.5.9_A3_T2 failing
+S15.10.6.2_A9 failing
+S15.10.6.3_A9 failing
+S15.10.6.4_A9 failing
+
+# es6: Object.freeze(v) on a non-object returns v, no longer TypeError
+15.2.3.9-1 failing
+15.2.3.9-1-1 failing
+15.2.3.9-1-2 failing
+15.2.3.9-1-3 failing
+15.2.3.9-1-4 failing
+# es6: Object.preventExtensions(O) on a non-object, no longer TypeError
+15.2.3.10-1 failing
+15.2.3.10-1-3 failing
+15.2.3.10-1-4 failing
+# es6: Object.isSealed(O) on a non-object, no longer TypeError
+15.2.3.11-1
+# es6: Object.isFrozen(O) on a non-object, no longer TypeError
+15.2.3.12-1
+15.2.3.12-1-3
+15.2.3.12-1-4
+# es6: Object.isExtensible(O) on a non-object, no longer TypeError
+15.2.3.13-1
+15.2.3.13-1-3
+15.2.3.13-1-4
+# es6: Object.keys(O) on a non-object, no longer TypeError
+15.2.3.14-1-1
+15.2.3.14-1-2
+15.2.3.14-1-3
+15.2.3.14-1
+15.2.3.14-2
+15.2.3.14-3
+# es6: Object.getOwnPropertyDescriptor(O) on a non-object, no longer TypeError
+15.2.3.3-1
+15.2.3.3-1-3
+15.2.3.3-1-4
+# es6: Object.getPrototypeOf(O) on a non-object, no longer TypeError
+15.2.3.2-1
+15.2.3.2-1-3
+15.2.3.2-1-4
+# es6: Object.getOwnPropertyNames(O) on a non-object, no longer TypeError
+15.2.3.4-1
+15.2.3.4-1-4
+15.2.3.4-1-5
+# es6: Object.seal(O) on a non-object, no longer TypeError
+15.2.3.8-1
+15.2.3.8-1-1
+15.2.3.8-1-2
+15.2.3.8-1-3
+15.2.3.8-1-4
+
+# es6: Date.prototype is no longer a DateObject
+15.9.5.40_1 failing
diff --git a/tests/auto/qml/ecmascripttests/ecmascripttests.pro b/tests/auto/qml/ecmascripttests/ecmascripttests.pro
new file mode 100644
index 0000000000..6d3ee12307
--- /dev/null
+++ b/tests/auto/qml/ecmascripttests/ecmascripttests.pro
@@ -0,0 +1,20 @@
+CONFIG += testcase
+TARGET = tst_ecmascripttests
+QT += testlib
+macos:CONFIG -= app_bundle
+SOURCES += tst_ecmascripttests.cpp
+DEFINES += SRCDIR=\\\"$$PWD\\\"
+
+TESTSCRIPT=$$PWD/test262.py
+isEmpty(V4CMD): V4CMD = qmljs
+
+checkjittarget.target = check-jit
+checkjittarget.commands = python $$TESTSCRIPT --command=$$V4CMD --parallel --with-test-expectations --update-expectations
+checkjittarget.depends = all
+QMAKE_EXTRA_TARGETS += checkjittarget
+
+checkmothtarget.target = check-interpreter
+checkmothtarget.commands = python $$TESTSCRIPT --command=\"$$V4CMD --interpret\" --parallel --with-test-expectations
+checkmothtarget.depends = all
+QMAKE_EXTRA_TARGETS += checkmothtarget
+
diff --git a/tests/auto/qml/ecmascripttests/test262 b/tests/auto/qml/ecmascripttests/test262
new file mode 160000
+Subproject d60c4ed97e69639bc5bc1db43a98828debf80c8
diff --git a/tests/auto/qml/ecmascripttests/test262.py b/tests/auto/qml/ecmascripttests/test262.py
new file mode 100755
index 0000000000..99f029cffd
--- /dev/null
+++ b/tests/auto/qml/ecmascripttests/test262.py
@@ -0,0 +1,600 @@
+#!/usr/bin/env python
+#############################################################################
+##
+## Copyright (C) 2015 The Qt Company Ltd.
+## Contact: http://www.qt.io/licensing/
+##
+## This file is part of the test suite module of the Qt Toolkit.
+##
+## $QT_BEGIN_LICENSE:BSD$
+## You may use this file under the terms of the BSD license as follows:
+##
+## "Redistribution and use in source and binary forms, with or without
+## modification, are permitted provided that the following conditions are
+## met:
+## * Redistributions of source code must retain the above copyright
+## notice, this list of conditions and the following disclaimer.
+## * Redistributions in binary form must reproduce the above copyright
+## notice, this list of conditions and the following disclaimer in
+## the documentation and/or other materials provided with the
+## distribution.
+## * Neither the name of The Qt Company Ltd nor the names of its
+## contributors may be used to endorse or promote products derived
+## from this software without specific prior written permission.
+##
+##
+## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
+##
+## $QT_END_LICENSE$
+##
+#############################################################################
+
+# Copyright 2009 the Sputnik authors. All rights reserved.
+# This code is governed by the BSD license found in the LICENSE file.
+
+# This is derived from sputnik.py, the Sputnik console test runner,
+# with elements from packager.py, which is separately
+# copyrighted. TODO: Refactor so there is less duplication between
+# test262.py and packager.py.
+
+import sys
+from os import path
+rootDir = path.dirname(path.realpath(__file__))
+sys.path.insert(0, path.abspath(rootDir + "/test262/tools/packaging"))
+
+import logging
+import optparse
+import os
+import platform
+import re
+import subprocess
+import tempfile
+import time
+import xml.dom.minidom
+import datetime
+import shutil
+import json
+import stat
+import multiprocessing
+import signal
+
+
+from parseTestRecord import parseTestRecord, stripHeader
+
+from packagerConfig import *
+
+# ############# Helpers needed for parallel multi-process test execution ############
+
+def runTest(case, args):
+ return case.Run(args)
+
+def runTestVarArgs(args):
+ return runTest(*args)
+
+def initWorkerProcess():
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+# #############
+
+class Test262Error(Exception):
+ def __init__(self, message):
+ self.message = message
+
+def ReportError(s):
+ raise Test262Error(s)
+
+
+class TestExpectations:
+ def __init__(self, enabled):
+ self.testsToSkip = []
+ self.failingTests = []
+ f = open(rootDir + "/TestExpectations")
+ if not enabled:
+ return
+ for line in f.read().splitlines():
+ line = line.strip()
+ if len(line) == 0 or line[0] == "#":
+ continue
+ record = line.split()
+ if len(record) == 1:
+ self.testsToSkip.append(record[0])
+ else:
+ test = record[0]
+ expectation = record[1]
+ if expectation == "failing":
+ self.failingTests.append(test)
+ f.close()
+
+ def update(self, progress):
+ unexpectedPasses = [c.case.name[-1] for c in progress.failed_tests if c.case.IsNegative()]
+
+ # If a test fails that we expected to fail, then it actually passed unexpectedly.
+ failures = [c.case.name[-1] for c in progress.failed_tests if not c.case.IsNegative()]
+ for failure in failures:
+ if failure in self.failingTests:
+ unexpectedPasses.append(failure)
+
+ f = open(rootDir + "/TestExpectations")
+ lines = f.read().splitlines()
+ oldLen = len(lines)
+ for result in unexpectedPasses:
+ expectationLine = result + " failing"
+ try:
+ lines.remove(expectationLine)
+ except ValueError:
+ pass
+
+ f.close()
+ if len(lines) != oldLen:
+ f = open(rootDir + "/TestExpectations", "w")
+ f.write("\n".join(lines))
+ f.close()
+ print "Changes to TestExpectations written!"
+
+
+if not os.path.exists(EXCLUDED_FILENAME):
+ print "Cannot generate (JSON) test262 tests without a file," + \
+ " %s, showing which tests have been disabled!" % EXCLUDED_FILENAME
+ sys.exit(1)
+EXCLUDE_LIST = xml.dom.minidom.parse(EXCLUDED_FILENAME)
+EXCLUDE_LIST = EXCLUDE_LIST.getElementsByTagName("test")
+EXCLUDE_LIST = [x.getAttribute("id") for x in EXCLUDE_LIST]
+
+
+def BuildOptions():
+ result = optparse.OptionParser()
+ result.add_option("--command", default="qmljs", help="The command-line to run")
+ result.add_option("--tests", default=path.abspath(rootDir + '/test262'),
+ help="Path to the tests")
+ result.add_option("--cat", default=False, action="store_true",
+ help="Print packaged test code that would be run")
+ result.add_option("--summary", default=True, action="store_true",
+ help="Print summary after running tests")
+ result.add_option("--full-summary", default=False, action="store_true",
+ help="Print summary and test output after running tests")
+ result.add_option("--strict_only", default=False, action="store_true",
+ help="Test only strict mode")
+ result.add_option("--non_strict_only", default=False, action="store_true",
+ help="Test only non-strict mode")
+ result.add_option("--parallel", default=False, action="store_true",
+ help="Run tests in parallel")
+ result.add_option("--with-test-expectations", default=False, action="store_true",
+ help="Parse TestExpectations to deal with tests known to fail")
+ result.add_option("--update-expectations", default=False, action="store_true",
+ help="Update test expectations fail when a test passes that was expected to fail")
+ # TODO: Once enough tests are made strict compat, change the default
+ # to "both"
+ result.add_option("--unmarked_default", default="non_strict",
+ help="default mode for tests of unspecified strictness")
+ return result
+
+
+def ValidateOptions(options):
+ if not options.command:
+ ReportError("A --command must be specified.")
+ if not path.exists(options.tests):
+ ReportError("Couldn't find test path '%s'" % options.tests)
+
+
+placeHolderPattern = re.compile(r"\{\{(\w+)\}\}")
+
+
+def IsWindows():
+ p = platform.system()
+ return (p == 'Windows') or (p == 'Microsoft')
+
+
+class TempFile(object):
+
+ def __init__(self, suffix="", prefix="tmp", text=False):
+ self.suffix = suffix
+ self.prefix = prefix
+ self.text = text
+ self.fd = None
+ self.name = None
+ self.is_closed = False
+ self.Open()
+
+ def Open(self):
+ (self.fd, self.name) = tempfile.mkstemp(
+ suffix = self.suffix,
+ prefix = self.prefix,
+ text = self.text)
+
+ def Write(self, str):
+ os.write(self.fd, str)
+
+ def Read(self):
+ f = file(self.name)
+ result = f.read()
+ f.close()
+ return result
+
+ def Close(self):
+ if not self.is_closed:
+ self.is_closed = True
+ os.close(self.fd)
+
+ def Dispose(self):
+ try:
+ self.Close()
+ os.unlink(self.name)
+ except OSError, e:
+ logging.error("Error disposing temp file: %s", str(e))
+
+
+class TestResult(object):
+
+ def __init__(self, exit_code, stdout, stderr, case):
+ self.exit_code = exit_code
+ self.stdout = stdout
+ self.stderr = stderr
+ self.case = case
+
+ def ReportOutcome(self, long_format):
+ name = self.case.GetName()
+ mode = self.case.GetMode()
+ if self.HasUnexpectedOutcome():
+ if self.case.IsNegative():
+ print "=== %s was expected to fail in %s, but didn't ===" % (name, mode)
+ else:
+ if long_format:
+ print "=== %s failed in %s ===" % (name, mode)
+ else:
+ print "%s in %s: " % (name, mode)
+ out = self.stdout.strip()
+ if len(out) > 0:
+ print "--- output ---"
+ print out
+ err = self.stderr.strip()
+ if len(err) > 0:
+ print "--- errors ---"
+ print err
+ if long_format:
+ print "==="
+ elif self.case.IsNegative():
+ print "%s failed in %s as expected" % (name, mode)
+ else:
+ print "%s passed in %s" % (name, mode)
+
+ def HasFailed(self):
+ return self.exit_code != 0
+
+ def HasUnexpectedOutcome(self):
+ if self.case.IsNegative():
+ return not self.HasFailed()
+ else:
+ return self.HasFailed()
+
+
+class TestCase(object):
+
+ def __init__(self, suite, name, full_path, strict_mode):
+ self.suite = suite
+ self.name = name
+ self.full_path = full_path
+ self.strict_mode = strict_mode
+ f = open(self.full_path)
+ self.contents = f.read()
+ f.close()
+ testRecord = parseTestRecord(self.contents, name)
+ self.test = testRecord["test"]
+ del testRecord["test"]
+ del testRecord["header"]
+ del testRecord["commentary"]
+ self.testRecord = testRecord;
+
+
+ def GetName(self):
+ return path.join(*self.name)
+
+ def GetMode(self):
+ if self.strict_mode:
+ return "strict mode"
+ else:
+ return "non-strict mode"
+
+ def GetPath(self):
+ return self.name
+
+ def NegateResult(self):
+ if self.IsNegative():
+ del self.testRecord['negative']
+ else:
+ self.testRecord['negative'] = "Some failure";
+
+ def IsNegative(self):
+ return 'negative' in self.testRecord
+
+ def IsOnlyStrict(self):
+ return 'onlyStrict' in self.testRecord
+
+ def IsNoStrict(self):
+ return 'noStrict' in self.testRecord
+
+ def GetSource(self):
+ # "var testDescrip = " + str(self.testRecord) + ';\n\n' + \
+ source = self.suite.GetInclude("cth.js") + \
+ self.suite.GetInclude("sta.js") + \
+ self.suite.GetInclude("ed.js") + \
+ self.suite.GetInclude("testBuiltInObject.js") + \
+ self.suite.GetInclude("testIntl.js") + \
+ self.test + '\n'
+
+ if self.strict_mode:
+ source = '"use strict";\nvar strict_mode = true;\n' + source
+ else:
+ source = "var strict_mode = false; \n" + source
+ return source
+
+ def InstantiateTemplate(self, template, params):
+ def GetParameter(match):
+ key = match.group(1)
+ return params.get(key, match.group(0))
+ return placeHolderPattern.sub(GetParameter, template)
+
+ def Execute(self, command):
+ if IsWindows():
+ args = '%s' % command
+ else:
+ args = command.split(" ")
+ stdout = TempFile(prefix="test262-out-")
+ stderr = TempFile(prefix="test262-err-")
+ try:
+ logging.info("exec: %s", str(args))
+ process = subprocess.Popen(
+ args,
+ shell = IsWindows(),
+ stdout = stdout.fd,
+ stderr = stderr.fd
+ )
+ code = process.wait()
+ out = stdout.Read()
+ err = stderr.Read()
+ finally:
+ stdout.Dispose()
+ stderr.Dispose()
+ return (code, out, err)
+
+ def RunTestIn(self, command_template, tmp):
+ tmp.Write(self.GetSource())
+ tmp.Close()
+ command = self.InstantiateTemplate(command_template, {
+ 'path': tmp.name
+ })
+ (code, out, err) = self.Execute(command)
+ return TestResult(code, out, err, self)
+
+ def Run(self, command_template):
+ tmp = TempFile(suffix=".js", prefix="test262-", text=True)
+ try:
+ result = self.RunTestIn(command_template, tmp)
+ finally:
+ tmp.Dispose()
+ return result
+
+ def Print(self):
+ print self.GetSource()
+
+
+class ProgressIndicator(object):
+
+ def __init__(self, count):
+ self.count = count
+ self.succeeded = 0
+ self.failed = 0
+ self.failed_tests = []
+
+ def HasRun(self, result):
+ result.ReportOutcome(True)
+ if result.HasUnexpectedOutcome():
+ self.failed += 1
+ self.failed_tests.append(result)
+ else:
+ self.succeeded += 1
+
+
+def MakePlural(n):
+ if (n == 1):
+ return (n, "")
+ else:
+ return (n, "s")
+
+
+class TestSuite(object):
+
+ def __init__(self, root, strict_only, non_strict_only, unmarked_default, load_expectations):
+ # TODO: derive from packagerConfig.py
+ self.test_root = path.join(root, 'test', 'suite')
+ self.lib_root = path.join(root, 'test', 'harness')
+ self.strict_only = strict_only
+ self.non_strict_only = non_strict_only
+ self.unmarked_default = unmarked_default
+ self.include_cache = { }
+ self.expectations = TestExpectations(load_expectations)
+
+ def Validate(self):
+ if not path.exists(self.test_root):
+ ReportError("No test repository found")
+ if not path.exists(self.lib_root):
+ ReportError("No test library found")
+
+ def IsHidden(self, path):
+ return path.startswith('.') or path == 'CVS'
+
+ def IsTestCase(self, path):
+ return path.endswith('.js')
+
+ def ShouldRun(self, rel_path, tests):
+ if len(tests) == 0:
+ return True
+ for test in tests:
+ if test in rel_path:
+ return True
+ return False
+
+ def GetInclude(self, name):
+ if not name in self.include_cache:
+ static = path.join(self.lib_root, name)
+ if path.exists(static):
+ f = open(static)
+ contents = stripHeader(f.read())
+ contents = re.sub(r'\r\n', '\n', contents)
+ self.include_cache[name] = contents + "\n"
+ f.close()
+ else:
+ ReportError("Can't find: " + static)
+ return self.include_cache[name]
+
+ def EnumerateTests(self, tests):
+ logging.info("Listing tests in %s", self.test_root)
+ cases = []
+ for root, dirs, files in os.walk(self.test_root):
+ for f in [x for x in dirs if self.IsHidden(x)]:
+ dirs.remove(f)
+ dirs.sort()
+ for f in sorted(files):
+ if self.IsTestCase(f):
+ full_path = path.join(root, f)
+ if full_path.startswith(self.test_root):
+ rel_path = full_path[len(self.test_root)+1:]
+ else:
+ logging.warning("Unexpected path %s", full_path)
+ rel_path = full_path
+ if self.ShouldRun(rel_path, tests) and not rel_path.startswith("intl402" + os.sep):
+ basename = path.basename(full_path)[:-3]
+ name = rel_path.split(path.sep)[:-1] + [basename]
+ if EXCLUDE_LIST.count(basename) >= 1 or self.expectations.testsToSkip.count(basename) >= 1:
+ print 'Excluded: ' + basename
+ else:
+ if not self.non_strict_only:
+ strict_case = TestCase(self, name, full_path, True)
+ if self.expectations.failingTests.count(basename) >= 1:
+ strict_case.NegateResult()
+ if not strict_case.IsNoStrict():
+ if strict_case.IsOnlyStrict() or \
+ self.unmarked_default in ['both', 'strict']:
+ cases.append(strict_case)
+ if not self.strict_only:
+ non_strict_case = TestCase(self, name, full_path, False)
+ if self.expectations.failingTests.count(basename) >= 1:
+ non_strict_case.NegateResult()
+ if not non_strict_case.IsOnlyStrict():
+ if non_strict_case.IsNoStrict() or \
+ self.unmarked_default in ['both', 'non_strict']:
+ cases.append(non_strict_case)
+ logging.info("Done listing tests")
+ return cases
+
+ def PrintSummary(self, progress):
+ print
+ print "=== Summary ==="
+ count = progress.count
+ succeeded = progress.succeeded
+ failed = progress.failed
+ print " - Ran %i test%s" % MakePlural(count)
+ if progress.failed == 0:
+ print " - All tests succeeded"
+ else:
+ percent = ((100.0 * succeeded) / count,)
+ print " - Passed %i test%s (%.1f%%)" % (MakePlural(succeeded) + percent)
+ percent = ((100.0 * failed) / count,)
+ print " - Failed %i test%s (%.1f%%)" % (MakePlural(failed) + percent)
+ positive = [c for c in progress.failed_tests if not c.case.IsNegative()]
+ negative = [c for c in progress.failed_tests if c.case.IsNegative()]
+ if len(positive) > 0:
+ print
+ print "Failed tests"
+ for result in positive:
+ print " %s in %s" % (result.case.GetName(), result.case.GetMode())
+ if len(negative) > 0:
+ print
+ print "Expected to fail but passed ---"
+ for result in negative:
+ print " %s in %s" % (result.case.GetName(), result.case.GetMode())
+
+ def PrintFailureOutput(self, progress):
+ for result in progress.failed_tests:
+ print
+ result.ReportOutcome(False)
+
+ def Run(self, command_template, tests, print_summary, full_summary, parallel, update_expectations):
+ if not "{{path}}" in command_template:
+ command_template += " {{path}}"
+ cases = self.EnumerateTests(tests)
+ if len(cases) == 0:
+ ReportError("No tests to run")
+ progress = ProgressIndicator(len(cases))
+
+ if parallel:
+ pool = multiprocessing.Pool(processes=multiprocessing.cpu_count(), initializer=initWorkerProcess)
+ results = pool.imap_unordered(func=runTestVarArgs, iterable=[(case, command_template) for case in cases], chunksize=multiprocessing.cpu_count() * 8)
+ for result in results:
+ progress.HasRun(result)
+ else:
+ for case in cases:
+ result = case.Run(command_template)
+ progress.HasRun(result)
+ if print_summary:
+ self.PrintSummary(progress)
+ if full_summary:
+ self.PrintFailureOutput(progress)
+ else:
+ print
+ print "Use --full-summary to see output from failed tests"
+ print
+ if update_expectations:
+ self.expectations.update(progress)
+ return progress.failed == 0
+
+ def Print(self, tests):
+ cases = self.EnumerateTests(tests)
+ if len(cases) > 0:
+ cases[0].Print()
+
+
+def Main():
+ # Some date tests rely on being run in pacific time.
+ # Uncomment the next line for more logging info.
+ #logging.basicConfig(level=logging.DEBUG)
+ os.environ["TZ"] = "PST8PDT"
+ os.environ["LANG"] = "en_US.UTF-8"
+ parser = BuildOptions()
+ (options, args) = parser.parse_args()
+ ValidateOptions(options)
+ test_suite = TestSuite(options.tests,
+ options.strict_only,
+ options.non_strict_only,
+ options.unmarked_default,
+ options.with_test_expectations)
+ test_suite.Validate()
+ if options.cat:
+ test_suite.Print(args)
+ return 0
+ else:
+ if test_suite.Run(options.command, args,
+ options.summary or options.full_summary,
+ options.full_summary,
+ options.parallel,
+ options.update_expectations):
+ return 0
+ else:
+ return 1
+
+
+if __name__ == '__main__':
+ try:
+ sys.exit(Main())
+ except Test262Error, e:
+ print "Error: %s" % e.message
+ sys.exit(1)
diff --git a/tests/auto/qml/ecmascripttests/tst_ecmascripttests.cpp b/tests/auto/qml/ecmascripttests/tst_ecmascripttests.cpp
new file mode 100644
index 0000000000..2ce0cfe210
--- /dev/null
+++ b/tests/auto/qml/ecmascripttests/tst_ecmascripttests.cpp
@@ -0,0 +1,77 @@
+/****************************************************************************
+**
+** Copyright (C) 2017 The Qt Company Ltd.
+** Contact: https://www.qt.io/licensing/
+**
+** This file is part of the test suite of the Qt Toolkit.
+**
+** $QT_BEGIN_LICENSE:GPL-EXCEPT$
+** Commercial License Usage
+** Licensees holding valid commercial Qt licenses may use this file in
+** accordance with the commercial license agreement provided with the
+** Software or, alternatively, in accordance with the terms contained in
+** a written agreement between you and The Qt Company. For licensing terms
+** and conditions see https://www.qt.io/terms-conditions. For further
+** information use the contact form at https://www.qt.io/contact-us.
+**
+** GNU General Public License Usage
+** Alternatively, this file may be used under the terms of the GNU
+** General Public License version 3 as published by the Free Software
+** Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
+** included in the packaging of this file. Please review the following
+** information to ensure the GNU General Public License requirements will
+** be met: https://www.gnu.org/licenses/gpl-3.0.html.
+**
+** $QT_END_LICENSE$
+**
+****************************************************************************/
+
+
+#include <QtTest/QtTest>
+#include <QProcess>
+#include <QLibraryInfo>
+
+class tst_EcmaScriptTests : public QObject
+{
+ Q_OBJECT
+private slots:
+ void runTests_data();
+ void runTests();
+};
+
+void tst_EcmaScriptTests::runTests_data()
+{
+ QTest::addColumn<QString>("qmljsParameter");
+
+ QTest::newRow("jit") << QStringLiteral("--jit");
+ // Not passing yet: QTest::newRow("interpreter") << QStringLiteral("--interpret");
+}
+
+void tst_EcmaScriptTests::runTests()
+{
+#if defined(Q_OS_LINUX) && defined(Q_PROCESSOR_X86_64)
+ QFETCH(QString, qmljsParameter);
+
+ QProcess process;
+ process.setProcessChannelMode(QProcess::ForwardedChannels);
+ process.setWorkingDirectory(QLatin1String(SRCDIR));
+ process.setProgram("python");
+ process.setArguments(QStringList() << "test262.py" << "--command=" + QLibraryInfo::location(QLibraryInfo::BinariesPath) + "/qmljs " + qmljsParameter << "--parallel" << "--with-test-expectations");
+
+ qDebug() << "Going to run" << process.program() << process.arguments() << "in" << process.workingDirectory();
+
+ process.start();
+ QVERIFY(process.waitForStarted());
+ const int timeoutInMSecs = 20 * 60 * 1000;
+ QVERIFY2(process.waitForFinished(timeoutInMSecs), "Tests did not terminate in time -- see output above for details");
+ QVERIFY2(process.exitStatus() == QProcess::NormalExit, "Running the test harness failed -- see output above for details");
+ QVERIFY2(process.exitCode() == 0, "Tests failed -- see output above for details");
+#else
+ QSKIP("Currently the ecmascript tests are only run on Linux/x86-64");
+#endif
+}
+
+QTEST_MAIN(tst_EcmaScriptTests)
+
+#include "tst_ecmascripttests.moc"
+