aboutsummaryrefslogtreecommitdiffstats
path: root/tests/manual
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@qt.io>2017-02-03 10:28:28 +0100
committerSimon Hausmann <simon.hausmann@qt.io>2017-03-24 20:21:36 +0000
commit8bed5de48c3ec24f9ad335e5398d92d0a60c448c (patch)
tree5e40be1f1665be96ca15fcba13a7b76f33255c76 /tests/manual
parentc32468189941c61be6266da3913de5c1c469bd95 (diff)
Run ES5 tests on Linux/x86-64
This requires including the test suite as a submodule by default and bumping the module to a new sha1 that removes some particularly long paths. Also force the test suite to run under a US locale, as tests like ch15/15.5/15.5.4/15.5.4.7/S15.5.4.7_A1_T11 cannot deal with locale dependent output for date formatting. The test harness now returns a non-zero exit code when a test fails. This is used by the QTestLib wrapper to determine success or failure. The tests with JIT are run, but the tests with the interpreter are omitted at the moment until the last failure is fixed. The tests add about 10-15 minutes extra time to the total time it takes to run tests on Linux in the CI. Change-Id: Id01fd3b41350f9c9a6ce9e43236f51f7f0fb71c8 Reviewed-by: Robin Burchell <robin.burchell@crimson.no>
Diffstat (limited to 'tests/manual')
-rw-r--r--tests/manual/v4/TestExpectations31
m---------tests/manual/v4/test2620
-rwxr-xr-xtests/manual/v4/test262.py595
-rw-r--r--tests/manual/v4/tests.pro15
4 files changed, 0 insertions, 641 deletions
diff --git a/tests/manual/v4/TestExpectations b/tests/manual/v4/TestExpectations
deleted file mode 100644
index 49f107452a..0000000000
--- a/tests/manual/v4/TestExpectations
+++ /dev/null
@@ -1,31 +0,0 @@
-# wrong tests
-# uses octal number
-15.2.3.6-2-17-1 failing
-
-# these fail after the update to Unicode 6.3.
-# the reason is that u+180e changed type from whitespace to control
-S9.3.1_A2
-S9.3.1_A3_T1
-S9.3.1_A3_T2
-S15.1.2.2_A2_T10
-S15.1.2.3_A2_T10
-15.5.4.20-3-2
-15.5.4.20-3-3
-15.5.4.20-3-4
-15.5.4.20-3-5
-15.5.4.20-3-6
-
-10.4.3-1-106 failing
-11.2.3-3_3 failing
-S13_A15_T4 failing
-S15.4.4.3_A1_T1 failing
-S15.4.4.3_A3_T1 failing
-S15.5.4.11_A5_T1 failing
-S15.2.4.4_A14 failing
-
-# Function declarations in conditionals. We allow them, because the real
-# world requires them.
-Sbp_12.5_A9_T3 failing
-Sbp_12.6.1_A13_T3 failing
-Sbp_12.6.2_A13_T3 failing
-Sbp_12.6.4_A13_T3 failing
diff --git a/tests/manual/v4/test262 b/tests/manual/v4/test262
deleted file mode 160000
-Subproject 9741ac4655808ac46c127e3d1d8ba3d27ada618
diff --git a/tests/manual/v4/test262.py b/tests/manual/v4/test262.py
deleted file mode 100755
index 3b5bfa119a..0000000000
--- a/tests/manual/v4/test262.py
+++ /dev/null
@@ -1,595 +0,0 @@
-#!/usr/bin/env python
-#############################################################################
-##
-## Copyright (C) 2015 The Qt Company Ltd.
-## Contact: http://www.qt.io/licensing/
-##
-## This file is part of the test suite module of the Qt Toolkit.
-##
-## $QT_BEGIN_LICENSE:BSD$
-## You may use this file under the terms of the BSD license as follows:
-##
-## "Redistribution and use in source and binary forms, with or without
-## modification, are permitted provided that the following conditions are
-## met:
-## * Redistributions of source code must retain the above copyright
-## notice, this list of conditions and the following disclaimer.
-## * Redistributions in binary form must reproduce the above copyright
-## notice, this list of conditions and the following disclaimer in
-## the documentation and/or other materials provided with the
-## distribution.
-## * Neither the name of The Qt Company Ltd nor the names of its
-## contributors may be used to endorse or promote products derived
-## from this software without specific prior written permission.
-##
-##
-## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
-##
-## $QT_END_LICENSE$
-##
-#############################################################################
-
-# Copyright 2009 the Sputnik authors. All rights reserved.
-# This code is governed by the BSD license found in the LICENSE file.
-
-# This is derived from sputnik.py, the Sputnik console test runner,
-# with elements from packager.py, which is separately
-# copyrighted. TODO: Refactor so there is less duplication between
-# test262.py and packager.py.
-
-import sys
-from os import path
-rootDir = path.dirname(path.realpath(__file__))
-sys.path.insert(0, path.abspath(rootDir + "/test262/tools/packaging"))
-
-import logging
-import optparse
-import os
-import platform
-import re
-import subprocess
-import tempfile
-import time
-import xml.dom.minidom
-import datetime
-import shutil
-import json
-import stat
-import multiprocessing
-import signal
-
-
-from parseTestRecord import parseTestRecord, stripHeader
-
-from packagerConfig import *
-
-# ############# Helpers needed for parallel multi-process test execution ############
-
-def runTest(case, args):
- return case.Run(args)
-
-def runTestVarArgs(args):
- return runTest(*args)
-
-def initWorkerProcess():
- signal.signal(signal.SIGINT, signal.SIG_IGN)
-
-# #############
-
-class Test262Error(Exception):
- def __init__(self, message):
- self.message = message
-
-def ReportError(s):
- raise Test262Error(s)
-
-
-class TestExpectations:
- def __init__(self, enabled):
- self.testsToSkip = []
- self.failingTests = []
- f = open(rootDir + "/TestExpectations")
- if not enabled:
- return
- for line in f.read().splitlines():
- line = line.strip()
- if len(line) == 0 or line[0] == "#":
- continue
- record = line.split()
- if len(record) == 1:
- self.testsToSkip.append(record[0])
- else:
- test = record[0]
- expectation = record[1]
- if expectation == "failing":
- self.failingTests.append(test)
- f.close()
-
- def update(self, progress):
- unexpectedPasses = [c.case.name[-1] for c in progress.failed_tests if c.case.IsNegative()]
-
- # If a test fails that we expected to fail, then it actually passed unexpectedly.
- failures = [c.case.name[-1] for c in progress.failed_tests if not c.case.IsNegative()]
- for failure in failures:
- if failure in self.failingTests:
- unexpectedPasses.append(failure)
-
- f = open(rootDir + "/TestExpectations")
- lines = f.read().splitlines()
- oldLen = len(lines)
- for result in unexpectedPasses:
- expectationLine = result + " failing"
- try:
- lines.remove(expectationLine)
- except ValueError:
- pass
-
- f.close()
- if len(lines) != oldLen:
- f = open(rootDir + "/TestExpectations", "w")
- f.write("\n".join(lines))
- f.close()
- print "Changes to TestExpectations written!"
-
-
-if not os.path.exists(EXCLUDED_FILENAME):
- print "Cannot generate (JSON) test262 tests without a file," + \
- " %s, showing which tests have been disabled!" % EXCLUDED_FILENAME
- sys.exit(1)
-EXCLUDE_LIST = xml.dom.minidom.parse(EXCLUDED_FILENAME)
-EXCLUDE_LIST = EXCLUDE_LIST.getElementsByTagName("test")
-EXCLUDE_LIST = [x.getAttribute("id") for x in EXCLUDE_LIST]
-
-
-def BuildOptions():
- result = optparse.OptionParser()
- result.add_option("--command", default="qmljs", help="The command-line to run")
- result.add_option("--tests", default=path.abspath(rootDir + '/test262'),
- help="Path to the tests")
- result.add_option("--cat", default=False, action="store_true",
- help="Print packaged test code that would be run")
- result.add_option("--summary", default=True, action="store_true",
- help="Print summary after running tests")
- result.add_option("--full-summary", default=False, action="store_true",
- help="Print summary and test output after running tests")
- result.add_option("--strict_only", default=False, action="store_true",
- help="Test only strict mode")
- result.add_option("--non_strict_only", default=False, action="store_true",
- help="Test only non-strict mode")
- result.add_option("--parallel", default=False, action="store_true",
- help="Run tests in parallel")
- result.add_option("--with-test-expectations", default=False, action="store_true",
- help="Parse TestExpectations to deal with tests known to fail")
- result.add_option("--update-expectations", default=False, action="store_true",
- help="Update test expectations fail when a test passes that was expected to fail")
- # TODO: Once enough tests are made strict compat, change the default
- # to "both"
- result.add_option("--unmarked_default", default="non_strict",
- help="default mode for tests of unspecified strictness")
- return result
-
-
-def ValidateOptions(options):
- if not options.command:
- ReportError("A --command must be specified.")
- if not path.exists(options.tests):
- ReportError("Couldn't find test path '%s'" % options.tests)
-
-
-placeHolderPattern = re.compile(r"\{\{(\w+)\}\}")
-
-
-def IsWindows():
- p = platform.system()
- return (p == 'Windows') or (p == 'Microsoft')
-
-
-class TempFile(object):
-
- def __init__(self, suffix="", prefix="tmp", text=False):
- self.suffix = suffix
- self.prefix = prefix
- self.text = text
- self.fd = None
- self.name = None
- self.is_closed = False
- self.Open()
-
- def Open(self):
- (self.fd, self.name) = tempfile.mkstemp(
- suffix = self.suffix,
- prefix = self.prefix,
- text = self.text)
-
- def Write(self, str):
- os.write(self.fd, str)
-
- def Read(self):
- f = file(self.name)
- result = f.read()
- f.close()
- return result
-
- def Close(self):
- if not self.is_closed:
- self.is_closed = True
- os.close(self.fd)
-
- def Dispose(self):
- try:
- self.Close()
- os.unlink(self.name)
- except OSError, e:
- logging.error("Error disposing temp file: %s", str(e))
-
-
-class TestResult(object):
-
- def __init__(self, exit_code, stdout, stderr, case):
- self.exit_code = exit_code
- self.stdout = stdout
- self.stderr = stderr
- self.case = case
-
- def ReportOutcome(self, long_format):
- name = self.case.GetName()
- mode = self.case.GetMode()
- if self.HasUnexpectedOutcome():
- if self.case.IsNegative():
- print "=== %s was expected to fail in %s, but didn't ===" % (name, mode)
- else:
- if long_format:
- print "=== %s failed in %s ===" % (name, mode)
- else:
- print "%s in %s: " % (name, mode)
- out = self.stdout.strip()
- if len(out) > 0:
- print "--- output ---"
- print out
- err = self.stderr.strip()
- if len(err) > 0:
- print "--- errors ---"
- print err
- if long_format:
- print "==="
- elif self.case.IsNegative():
- print "%s failed in %s as expected" % (name, mode)
- else:
- print "%s passed in %s" % (name, mode)
-
- def HasFailed(self):
- return self.exit_code != 0
-
- def HasUnexpectedOutcome(self):
- if self.case.IsNegative():
- return not self.HasFailed()
- else:
- return self.HasFailed()
-
-
-class TestCase(object):
-
- def __init__(self, suite, name, full_path, strict_mode):
- self.suite = suite
- self.name = name
- self.full_path = full_path
- self.strict_mode = strict_mode
- f = open(self.full_path)
- self.contents = f.read()
- f.close()
- testRecord = parseTestRecord(self.contents, name)
- self.test = testRecord["test"]
- del testRecord["test"]
- del testRecord["header"]
- del testRecord["commentary"]
- self.testRecord = testRecord;
-
-
- def GetName(self):
- return path.join(*self.name)
-
- def GetMode(self):
- if self.strict_mode:
- return "strict mode"
- else:
- return "non-strict mode"
-
- def GetPath(self):
- return self.name
-
- def NegateResult(self):
- if self.IsNegative():
- del self.testRecord['negative']
- else:
- self.testRecord['negative'] = "Some failure";
-
- def IsNegative(self):
- return 'negative' in self.testRecord
-
- def IsOnlyStrict(self):
- return 'onlyStrict' in self.testRecord
-
- def IsNoStrict(self):
- return 'noStrict' in self.testRecord
-
- def GetSource(self):
- # "var testDescrip = " + str(self.testRecord) + ';\n\n' + \
- source = self.suite.GetInclude("cth.js") + \
- self.suite.GetInclude("sta.js") + \
- self.suite.GetInclude("ed.js") + \
- self.suite.GetInclude("testBuiltInObject.js") + \
- self.suite.GetInclude("testIntl.js") + \
- self.test + '\n'
-
- if self.strict_mode:
- source = '"use strict";\nvar strict_mode = true;\n' + source
- else:
- source = "var strict_mode = false; \n" + source
- return source
-
- def InstantiateTemplate(self, template, params):
- def GetParameter(match):
- key = match.group(1)
- return params.get(key, match.group(0))
- return placeHolderPattern.sub(GetParameter, template)
-
- def Execute(self, command):
- if IsWindows():
- args = '%s' % command
- else:
- args = command.split(" ")
- stdout = TempFile(prefix="test262-out-")
- stderr = TempFile(prefix="test262-err-")
- try:
- logging.info("exec: %s", str(args))
- process = subprocess.Popen(
- args,
- shell = IsWindows(),
- stdout = stdout.fd,
- stderr = stderr.fd
- )
- code = process.wait()
- out = stdout.Read()
- err = stderr.Read()
- finally:
- stdout.Dispose()
- stderr.Dispose()
- return (code, out, err)
-
- def RunTestIn(self, command_template, tmp):
- tmp.Write(self.GetSource())
- tmp.Close()
- command = self.InstantiateTemplate(command_template, {
- 'path': tmp.name
- })
- (code, out, err) = self.Execute(command)
- return TestResult(code, out, err, self)
-
- def Run(self, command_template):
- tmp = TempFile(suffix=".js", prefix="test262-", text=True)
- try:
- result = self.RunTestIn(command_template, tmp)
- finally:
- tmp.Dispose()
- return result
-
- def Print(self):
- print self.GetSource()
-
-
-class ProgressIndicator(object):
-
- def __init__(self, count):
- self.count = count
- self.succeeded = 0
- self.failed = 0
- self.failed_tests = []
-
- def HasRun(self, result):
- result.ReportOutcome(True)
- if result.HasUnexpectedOutcome():
- self.failed += 1
- self.failed_tests.append(result)
- else:
- self.succeeded += 1
-
-
-def MakePlural(n):
- if (n == 1):
- return (n, "")
- else:
- return (n, "s")
-
-
-class TestSuite(object):
-
- def __init__(self, root, strict_only, non_strict_only, unmarked_default, load_expectations):
- # TODO: derive from packagerConfig.py
- self.test_root = path.join(root, 'test', 'suite')
- self.lib_root = path.join(root, 'test', 'harness')
- self.strict_only = strict_only
- self.non_strict_only = non_strict_only
- self.unmarked_default = unmarked_default
- self.include_cache = { }
- self.expectations = TestExpectations(load_expectations)
-
- def Validate(self):
- if not path.exists(self.test_root):
- ReportError("No test repository found")
- if not path.exists(self.lib_root):
- ReportError("No test library found")
-
- def IsHidden(self, path):
- return path.startswith('.') or path == 'CVS'
-
- def IsTestCase(self, path):
- return path.endswith('.js')
-
- def ShouldRun(self, rel_path, tests):
- if len(tests) == 0:
- return True
- for test in tests:
- if test in rel_path:
- return True
- return False
-
- def GetInclude(self, name):
- if not name in self.include_cache:
- static = path.join(self.lib_root, name)
- if path.exists(static):
- f = open(static)
- contents = stripHeader(f.read())
- contents = re.sub(r'\r\n', '\n', contents)
- self.include_cache[name] = contents + "\n"
- f.close()
- else:
- ReportError("Can't find: " + static)
- return self.include_cache[name]
-
- def EnumerateTests(self, tests):
- logging.info("Listing tests in %s", self.test_root)
- cases = []
- for root, dirs, files in os.walk(self.test_root):
- for f in [x for x in dirs if self.IsHidden(x)]:
- dirs.remove(f)
- dirs.sort()
- for f in sorted(files):
- if self.IsTestCase(f):
- full_path = path.join(root, f)
- if full_path.startswith(self.test_root):
- rel_path = full_path[len(self.test_root)+1:]
- else:
- logging.warning("Unexpected path %s", full_path)
- rel_path = full_path
- if self.ShouldRun(rel_path, tests) and not rel_path.startswith("intl402" + os.sep):
- basename = path.basename(full_path)[:-3]
- name = rel_path.split(path.sep)[:-1] + [basename]
- if EXCLUDE_LIST.count(basename) >= 1 or self.expectations.testsToSkip.count(basename) >= 1:
- print 'Excluded: ' + basename
- else:
- if not self.non_strict_only:
- strict_case = TestCase(self, name, full_path, True)
- if self.expectations.failingTests.count(basename) >= 1:
- strict_case.NegateResult()
- if not strict_case.IsNoStrict():
- if strict_case.IsOnlyStrict() or \
- self.unmarked_default in ['both', 'strict']:
- cases.append(strict_case)
- if not self.strict_only:
- non_strict_case = TestCase(self, name, full_path, False)
- if self.expectations.failingTests.count(basename) >= 1:
- non_strict_case.NegateResult()
- if not non_strict_case.IsOnlyStrict():
- if non_strict_case.IsNoStrict() or \
- self.unmarked_default in ['both', 'non_strict']:
- cases.append(non_strict_case)
- logging.info("Done listing tests")
- return cases
-
- def PrintSummary(self, progress):
- print
- print "=== Summary ==="
- count = progress.count
- succeeded = progress.succeeded
- failed = progress.failed
- print " - Ran %i test%s" % MakePlural(count)
- if progress.failed == 0:
- print " - All tests succeeded"
- else:
- percent = ((100.0 * succeeded) / count,)
- print " - Passed %i test%s (%.1f%%)" % (MakePlural(succeeded) + percent)
- percent = ((100.0 * failed) / count,)
- print " - Failed %i test%s (%.1f%%)" % (MakePlural(failed) + percent)
- positive = [c for c in progress.failed_tests if not c.case.IsNegative()]
- negative = [c for c in progress.failed_tests if c.case.IsNegative()]
- if len(positive) > 0:
- print
- print "Failed tests"
- for result in positive:
- print " %s in %s" % (result.case.GetName(), result.case.GetMode())
- if len(negative) > 0:
- print
- print "Expected to fail but passed ---"
- for result in negative:
- print " %s in %s" % (result.case.GetName(), result.case.GetMode())
-
- def PrintFailureOutput(self, progress):
- for result in progress.failed_tests:
- print
- result.ReportOutcome(False)
-
- def Run(self, command_template, tests, print_summary, full_summary, parallel, update_expectations):
- if not "{{path}}" in command_template:
- command_template += " {{path}}"
- cases = self.EnumerateTests(tests)
- if len(cases) == 0:
- ReportError("No tests to run")
- progress = ProgressIndicator(len(cases))
-
- if parallel:
- pool = multiprocessing.Pool(processes=multiprocessing.cpu_count(), initializer=initWorkerProcess)
- results = pool.imap_unordered(func=runTestVarArgs, iterable=[(case, command_template) for case in cases], chunksize=multiprocessing.cpu_count() * 8)
- for result in results:
- progress.HasRun(result)
- else:
- for case in cases:
- result = case.Run(command_template)
- progress.HasRun(result)
- if print_summary:
- self.PrintSummary(progress)
- if full_summary:
- self.PrintFailureOutput(progress)
- else:
- print
- print "Use --full-summary to see output from failed tests"
- print
- if update_expectations:
- self.expectations.update(progress)
-
- def Print(self, tests):
- cases = self.EnumerateTests(tests)
- if len(cases) > 0:
- cases[0].Print()
-
-
-def Main():
- # Some date tests rely on being run in pacific time.
- # Uncomment the next line for more logging info.
- #logging.basicConfig(level=logging.DEBUG)
- os.environ["TZ"] = "PST8PDT"
- parser = BuildOptions()
- (options, args) = parser.parse_args()
- ValidateOptions(options)
- test_suite = TestSuite(options.tests,
- options.strict_only,
- options.non_strict_only,
- options.unmarked_default,
- options.with_test_expectations)
- test_suite.Validate()
- if options.cat:
- test_suite.Print(args)
- else:
- test_suite.Run(options.command, args,
- options.summary or options.full_summary,
- options.full_summary,
- options.parallel,
- options.update_expectations)
-
-
-if __name__ == '__main__':
- try:
- Main()
- sys.exit(0)
- except Test262Error, e:
- print "Error: %s" % e.message
- sys.exit(1)
diff --git a/tests/manual/v4/tests.pro b/tests/manual/v4/tests.pro
deleted file mode 100644
index ce4a34f7a0..0000000000
--- a/tests/manual/v4/tests.pro
+++ /dev/null
@@ -1,15 +0,0 @@
-TEMPLATE = aux
-
-TESTSCRIPT=$$PWD/test262.py
-isEmpty(V4CMD): V4CMD = qmljs
-
-checktarget.target = check
-checktarget.commands = python $$TESTSCRIPT --command=$$V4CMD --parallel --with-test-expectations --update-expectations
-checktarget.depends = all
-QMAKE_EXTRA_TARGETS += checktarget
-
-checkmothtarget.target = check-interpreter
-checkmothtarget.commands = python $$TESTSCRIPT --command=\"$$V4CMD --interpret\" --parallel --with-test-expectations
-checkmothtarget.depends = all
-QMAKE_EXTRA_TARGETS += checkmothtarget
-