From 63ffae3fa3a2627401878e7b948cb730118dc226 Mon Sep 17 00:00:00 2001 From: Dimitrios Apostolou Date: Fri, 26 Nov 2021 23:27:23 +0100 Subject: Introduce qt-testrunner.py Script that wraps Qt test execution in order to iron-out flakiness. Task-number: QTBUG-96353 Change-Id: Ie8ee780e3f4f8d086c080a7784f9f68fd1867be2 Reviewed-by: Daniel Smith --- cmake/QtBaseGlobalTargets.cmake | 2 + cmake/QtWrapperScriptHelpers.cmake | 1 + .../cmake_run_ctest_enforce_exit_code.yaml | 17 +- .../cmake_setup_running_tests_env_vars.yaml | 32 -- util/testrunner/qt-testrunner.py | 348 +++++++++++++++++++++ util/testrunner/tests/qt_mock_test-log.xml | 32 ++ util/testrunner/tests/qt_mock_test.py | 203 ++++++++++++ util/testrunner/tests/tst_testrunner.py | 295 +++++++++++++++++ 8 files changed, 887 insertions(+), 43 deletions(-) create mode 100755 util/testrunner/qt-testrunner.py create mode 100644 util/testrunner/tests/qt_mock_test-log.xml create mode 100755 util/testrunner/tests/qt_mock_test.py create mode 100755 util/testrunner/tests/tst_testrunner.py diff --git a/cmake/QtBaseGlobalTargets.cmake b/cmake/QtBaseGlobalTargets.cmake index bddc597bdb..c8499e3921 100644 --- a/cmake/QtBaseGlobalTargets.cmake +++ b/cmake/QtBaseGlobalTargets.cmake @@ -351,3 +351,5 @@ endif() qt_path_join(__qt_libexec_install_dir "${QT_INSTALL_DIR}" "${INSTALL_LIBEXECDIR}") qt_copy_or_install(FILES coin/instructions/qmake/ensure_pro_file.cmake DESTINATION "${__qt_libexec_install_dir}") +qt_copy_or_install(PROGRAMS "util/testrunner/qt-testrunner.py" + DESTINATION "${__qt_libexec_install_dir}") diff --git a/cmake/QtWrapperScriptHelpers.cmake b/cmake/QtWrapperScriptHelpers.cmake index cee9bfd1ed..e7a3b3c4bd 100644 --- a/cmake/QtWrapperScriptHelpers.cmake +++ b/cmake/QtWrapperScriptHelpers.cmake @@ -3,6 +3,7 @@ # qt-cmake-private # qt-configure-module # qt-cmake-private-install +# And other helper scripts. function(qt_internal_create_wrapper_scripts) # Provide a convenience cmake wrapper. if(CMAKE_HOST_UNIX) diff --git a/coin/instructions/cmake_run_ctest_enforce_exit_code.yaml b/coin/instructions/cmake_run_ctest_enforce_exit_code.yaml index fecf283905..c2560be37e 100644 --- a/coin/instructions/cmake_run_ctest_enforce_exit_code.yaml +++ b/coin/instructions/cmake_run_ctest_enforce_exit_code.yaml @@ -10,19 +10,14 @@ instructions: equals_value: Windows - type: EnvironmentVariable variableName: TESTRUNNER - variableValue: "python3 {{.SourceDir}}/coin_ctest_runner.py" + variableValue: "{{.InstallDir}}/libexec/qt-testrunner.py --" disable_if: - condition: or - conditions: - - condition: property - property: host.os - equals_value: Windows - - condition: property - property: features - contains_value: AndroidTestRun + condition: property + property: host.os + equals_value: Windows - type: EnvironmentVariable variableName: TESTRUNNER - variableValue: "{{.SourceDir}}\\coin_ctest_runner.py" + variableValue: "{{.InstallDir}}\\bin\\qt-testrunner.py --" enable_if: condition: property property: host.os @@ -43,7 +38,7 @@ instructions: equals_value: Windows - type: EnvironmentVariable variableName: CTEST_ARGS - variableValue: "-V --rerun-failed --force-new-ctest-process --repeat until-pass:5" + variableValue: "-V" - type: AppendToEnvironmentVariable variableName: CTEST_ARGS variableValue: " --stop-on-failure" diff --git a/coin/instructions/cmake_setup_running_tests_env_vars.yaml b/coin/instructions/cmake_setup_running_tests_env_vars.yaml index c1a83eae07..b9387d74c6 100644 --- a/coin/instructions/cmake_setup_running_tests_env_vars.yaml +++ b/coin/instructions/cmake_setup_running_tests_env_vars.yaml @@ -5,38 +5,6 @@ instructions: - type: EnvironmentVariable variableName: QTEST_ENVIRONMENT variableValue: "ci" - - type: WriteFile - fileContents: | - #!/usr/bin/python3 - import subprocess - import calendar - import datetime - import time - import sys - import os - import re - from os.path import expanduser - home = expanduser("~") - - file=os.path.basename(sys.argv[1]) - timestamp = str(round(time.time() * 1000)) - results_file = home + "/work/testresults/" + file +"-" + timestamp + ".xml,xml" - testargs = ["-o", results_file, "-o", "-,txt"] - if re.search("testlib.selftests.tst_selftests", sys.argv[1]): - testargs = [] - exit(subprocess.call([sys.argv[1]] + testargs)) - filename: "{{.SourceDir}}/coin_ctest_runner.py" - fileMode: 755 - - type: ExecuteCommand - command: "chmod 755 {{.SourceDir}}/coin_ctest_runner.py" - maxTimeInSeconds: 10 - maxTimeBetweenOutput: 10 - userMessageOnFailure: > - Failed to change file permission. - disable_if: - condition: property - property: host.os - equals_value: Windows - type: Group instructions: - type: EnvironmentVariable diff --git a/util/testrunner/qt-testrunner.py b/util/testrunner/qt-testrunner.py new file mode 100755 index 0000000000..3f29e9d1d9 --- /dev/null +++ b/util/testrunner/qt-testrunner.py @@ -0,0 +1,348 @@ +#!/usr/bin/env python3 + + +############################################################################# +## +## Copyright (C) 2021 The Qt Company Ltd. +## Contact: https://www.qt.io/licensing/ +## +## This file is part of the release tools of the Qt Toolkit. +## +## $QT_BEGIN_LICENSE:GPL-EXCEPT$ +## Commercial License Usage +## Licensees holding valid commercial Qt licenses may use this file in +## accordance with the commercial license agreement provided with the +## Software or, alternatively, in accordance with the terms contained in +## a written agreement between you and The Qt Company. For licensing terms +## and conditions see https://www.qt.io/terms-conditions. For further +## information use the contact form at https://www.qt.io/contact-us. +## +## GNU General Public License Usage +## Alternatively, this file may be used under the terms of the GNU +## General Public License version 3 as published by the Free Software +## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT +## included in the packaging of this file. Please review the following +## information to ensure the GNU General Public License requirements will +## be met: https://www.gnu.org/licenses/gpl-3.0.html. +## +## $QT_END_LICENSE$ +## +############################################################################# + + +# !!!IMPORTANT!!! If you change anything to this script, run the testsuite +# manually and make sure it still passes, as it doesn't run automatically. +# Just execute the command line as such: +# +# ./util/testrunner/tests/tst_testrunner.py -v [--debug] +# +# This script wraps the execution of a Qt test executable, for example +# tst_whatever, and tries to iron out unpredictable test failures. +# In particular: +# +# + Appends output argument to it: "-o tst_whatever.xml,xml" +# + Checks the exit code. If it is zero, the script exits with zero, +# otherwise proceeds. +# + Reads the XML test log and Understands exactly which function +# of the test failed. +# + If no XML file is found or was invalid, the test executable +# probably CRASHed, so we *re-run the full test once again*. +# + If some testcases failed it executes only those individually +# until they pass, or until max-repeats times is reached. +# +# The regular way to use is to set the environment variable TESTRUNNER to +# point to this script before invoking ctest. +# +# NOTE: this script is crafted specifically for use with Qt tests and for +# using it in Qt's CI. For example it detects and acts specially if test +# executable is "tst_selftests" or "androidtestrunner". It also detects +# env var "COIN_CTEST_RESULTSDIR" and uses it as log-dir. +# +# TODO implement --dry-run. + +# Exit codes of this script: +# 0: PASS. Either no test failed, or failed initially but passed +# in the re-runs (FLAKY PASS). +# 1: Some unexpected error of this script. +# 2: FAIL! for at least one test, even after the re-runs. +# 3: CRASH! for the test executable even after re-running it once. + + + +import sys +if sys.version_info < (3, 6): + sys.stderr.write( + "Error: this test wrapper script requires Python version 3.6 at least\n") + sys.exit(1) + +import argparse +import subprocess +import os +import traceback +import timeit +import xml.etree.ElementTree as ET +import logging as L + +from pprint import pprint +from typing import NamedTuple, Tuple, List, Optional + +# Define a custom type for returning a fail incident +class WhatFailed(NamedTuple): + func: str + tag: Optional[str] = None + + +def parse_args(): + parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, + description=""" +Wrap Qt test execution. This is intended to be invoked via the TESTRUNNER +environment variable before running ctest in the CI environment. The purpose +of the script is to repeat failed tests in order to iron out transient errors +caused by unpredictable factors. Individual test functions that failed are +retried up to max-repeats times until the test passes. + """, + epilog=""" +Default flags: --max-repeats 5 --passes-needed 1 + """ + ) + parser.add_argument("testargs", metavar="TESTARGS", nargs="+", + help="Test executable and arguments") + parser.add_argument("--log-dir", metavar="DIR", + help="Where to write the XML log files with the test results of the primary test run;" + " by default write to CWD") + parser.add_argument("--max-repeats", type=int, default=5, metavar='N', + help="In case the test FAILs, repeat the failed cases this many times") + parser.add_argument("--passes-needed", type=int, default=1, metavar='M', + help="Number of repeats that need to succeed in order to return an overall PASS") + parser.add_argument("--parse-xml-testlog", metavar="file.xml", + help="Do not run the full test the first time, but parse this XML test log;" + " if the test log contains failures, then re-run the failed cases normally," + " as indicated by the other flags") + parser.add_argument("--dry-run", action="store_true", + help="(TODO - not implemented yet) Do not run anything, just describe what would happen") + parser.add_argument("--timeout", metavar="T", + help="Timeout for each test execution in seconds") + parser.add_argument("--no-extra-args", action="store_true", + help="Do not append any extra arguments to the test command line, like" + " -o log_file.xml -v2 -vs. This will disable some functionality like the" + " failed test repetition and the verbose output on failure. This is" + " activated by default when TESTARGS is tst_selftests.") + args = parser.parse_args() + args.self_name = os.path.basename(sys.argv[0]) + args.specific_extra_args = [] + + logging_format = args.self_name + " %(levelname)8s: %(message)s" + L.basicConfig(format=logging_format, level=L.DEBUG) + + if args.log_dir is None: + if "COIN_CTEST_RESULTSDIR" in os.environ: + args.log_dir = os.environ["COIN_CTEST_RESULTSDIR"] + L.info("Will write XML test logs to directory" + " COIN_CTEST_RESULTSDIR=%s", args.log_dir) + else: + args.log_dir = "." + + args.test_basename = os.path.basename(args.testargs[0]) + if args.test_basename.endswith(".exe"): + args.test_basename = args.test_basename[:-4] + + # On Android emulated platforms, "androidtestrunner" is invoked by CMake + # to wrap the tests. We have to append the test arguments to it after + # "--". Besides that we have to detect the basename to avoid saving the + # XML log as "androidtestrunner.xml" for all tests. + if args.test_basename == "androidtestrunner": + args.specific_extra_args = [ "--" ] + apk_arg = False + for a in args.testargs[1:]: + if a == "--apk": + apk_arg = True + elif apk_arg: + apk_arg = False + if a.endswith(".apk"): + args.test_basename = os.path.basename(a)[:-4] + break + L.info("Detected androidtestrunner, test will be handled specially. Detected test basename: %s", + args.test_basename) + + # The qtestlib selftests are implemented using an external test library + # (Catch), and they don't support the same command-line options. + if args.test_basename == "tst_selftests": + L.info("Detected special test not able to generate XML log! Will not repeat individual testcases.") + args.no_extra_args = True + args.max_repeats = 0 + + return args + + +def parse_log(results_file) -> List[WhatFailed]: + """Parse the XML test log file. Return the failed testcases, if any. + + Failures are considered the "fail" and "xpass" incidents. + A testcase is a function with an optional data tag.""" + start_timer = timeit.default_timer() + + try: + tree = ET.parse(results_file) + except FileNotFoundError: + L.error("XML log file not found: %s", results_file) + raise + except ET.ParseError: + L.error("Failed to parse the XML log file: %s", results_file) + with open(results_file, "rb") as f: + L.error("File Contents:\n%s\n\n", f.read().decode("utf-8", "ignore")) + raise + + root = tree.getroot() + if root.tag != "TestCase": + raise AssertionError( + f"The XML test log must have as root tag, but has: <{root.tag}>") + + failures = [] + n_passes = 0 + for e1 in root: + if e1.tag == "TestFunction": + for e2 in e1: # every can have many + if e2.tag == "Incident": + if e2.attrib["type"] in ("fail", "xpass"): + func = e1.attrib["name"] + e3 = e2.find("DataTag") # every might have a + if e3 is not None: + failures.append(WhatFailed(func, tag=e3.text)) + else: + failures.append(WhatFailed(func)) + else: + n_passes += 1 + + end_timer = timeit.default_timer() + t = end_timer - start_timer + L.info(f"Parsed XML file {results_file} in {t:.3f} seconds") + L.info(f"Found {n_passes} passes and {len(failures)} failures") + + return failures + + +def run_test(arg_list: List[str], timeout=None): + L.debug("Running test command line: %s", arg_list) + proc = subprocess.run(arg_list, timeout=timeout) + L.info("Test process exited with code: %d", proc.returncode) + + return proc + +# Returns tuple: (exit_code, xml_logfile) +def run_full_test(test_basename, testargs: List[str], output_dir: str, + no_extra_args=False, dryrun=False, + timeout=None, specific_extra_args=[]) \ + -> Tuple[int, Optional[str]]: + + results_files = [] + output_testargs = [] + + # Append arguments to write log to qtestlib XML file, + # to JUnit XML file, and text to stdout. + if not no_extra_args: + results_files.append(os.path.join(output_dir, test_basename + ".xml")) + results_files.append(os.path.join(output_dir, test_basename + "-junitxml.xml")) + output_testargs.extend(["-o", results_files[0] + ",xml"]) + output_testargs.extend(["-o", results_files[1] + ",junitxml"]) + output_testargs.extend(["-o", "-,txt"]) + + proc = run_test(testargs + specific_extra_args + output_testargs, timeout) + + return (proc.returncode, results_files[0] if results_files else None) + + +# TODO alter environment for logging: +# QT_LOGGING_RULES="*=true" +# QT_MESSAGE_PATTERN="[%{time process} %{if-debug}D%{endif}%{if-warning}W%{endif}%{if-critical}C%{endif}%{if-fatal}F%{endif}] %{category} %{file}:%{line} %{function}() - %{message}" +# add arg: -maxwarnings 0 (maybe -v2 -vs?) +def rerun_failed_testcase(testargs: List[str], what_failed: WhatFailed, + max_repeats, passes_needed, + dryrun=False, timeout=None) -> bool: + """Run a specific function:tag of a test, until it passes enough times, or + until max_repeats is reached. + + Return True if it passes eventually, False if it fails. + """ + assert passes_needed <= max_repeats + failed_arg = what_failed.func + if what_failed.tag: + failed_arg += ":" + what_failed.tag + + n_passes = 0 + for i in range(max_repeats): + L.info("Re-running testcase: %s", failed_arg) + proc = run_test(testargs + [failed_arg], timeout) + if proc.returncode == 0: + n_passes += 1 + if n_passes == passes_needed: + L.info("Test has PASSed as FLAKY after re-runs:%d, passes:%d, failures:%d", + i+1, n_passes, i+1-n_passes) + return True + + assert n_passes < passes_needed + assert n_passes <= max_repeats + n_failures = max_repeats - n_passes + L.info("Test has FAILed despite all repetitions! re-runs:%d failures:%d", + max_repeats, n_failures) + return False + + +def main(): + args = parse_args() + n_full_runs = 1 if args.parse_xml_testlog else 2 + + for i in range(n_full_runs): + try: + if i != 0: + L.info("Re-running the full test!") + if args.parse_xml_testlog: + retcode = 1 # pretend the test returned error + results_file = args.parse_xml_testlog + else: + (retcode, results_file) = \ + run_full_test(args.test_basename, args.testargs, args.log_dir, + args.no_extra_args, args.dry_run, args.timeout, + args.specific_extra_args) + if retcode == 0: + sys.exit(0) # PASS + + failed_functions = parse_log(results_file) + + if not args.parse_xml_testlog: + assert len(failed_functions) > 0, \ + "The XML test log should contain at least one failure!" + + break # go to re-running individual failed testcases + + except Exception as e: + L.exception("Uncontrolled test CRASH! Details:", exc_info=e) + if i < n_full_runs - 1: + L.info("Will re-run the full test executable again!") + else: # Failed on the final run + L.error("Full test run failed repeatedly, aborting!") + sys.exit(3) + + if args.max_repeats == 0: + sys.exit(2) # Some tests failed but no re-runs were asked + + L.info("Some tests failed, will re-run at most %d times.\n", + args.max_repeats) + + for what_failed in failed_functions: + try: + ret = rerun_failed_testcase(args.testargs, what_failed, + args.max_repeats, args.passes_needed, + dryrun=args.dry_run, timeout=args.timeout) + except Exception as e: + L.exception("Uncontrolled test CRASH! Details:", exc_info=e) + L.error("Test re-run exited unxpectedly, aborting!") + sys.exit(3) # Test re-run CRASH + + if not ret: + sys.exit(2) # Test re-run FAIL + + sys.exit(0) # All testcase re-runs PASSed + + +if __name__ == "__main__": + main() diff --git a/util/testrunner/tests/qt_mock_test-log.xml b/util/testrunner/tests/qt_mock_test-log.xml new file mode 100644 index 0000000000..0c316d71c3 --- /dev/null +++ b/util/testrunner/tests/qt_mock_test-log.xml @@ -0,0 +1,32 @@ + + + + MOCK + MOCK + 6.3.0 + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/util/testrunner/tests/qt_mock_test.py b/util/testrunner/tests/qt_mock_test.py new file mode 100755 index 0000000000..e170f3a6c0 --- /dev/null +++ b/util/testrunner/tests/qt_mock_test.py @@ -0,0 +1,203 @@ +#!/usr/bin/env python3 + + +############################################################################# +## +## Copyright (C) 2021 The Qt Company Ltd. +## Contact: https://www.qt.io/licensing/ +## +## This file is part of the release tools of the Qt Toolkit. +## +## $QT_BEGIN_LICENSE:GPL-EXCEPT$ +## Commercial License Usage +## Licensees holding valid commercial Qt licenses may use this file in +## accordance with the commercial license agreement provided with the +## Software or, alternatively, in accordance with the terms contained in +## a written agreement between you and The Qt Company. For licensing terms +## and conditions see https://www.qt.io/terms-conditions. For further +## information use the contact form at https://www.qt.io/contact-us. +## +## GNU General Public License Usage +## Alternatively, this file may be used under the terms of the GNU +## General Public License version 3 as published by the Free Software +## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT +## included in the packaging of this file. Please review the following +## information to ensure the GNU General Public License requirements will +## be met: https://www.gnu.org/licenses/gpl-3.0.html. +## +## $QT_END_LICENSE$ +## +############################################################################# + + +# This is an artificial test, mimicking the Qt tests, for example tst_whatever. +# Its purpose is to assist in testing qt-testrunner.py. +# +# Mode A: +# +# If invoked with a test function argument, it runs that test function. +# +# Usage: +# +# $0 always_pass +# $0 always_fail +# $0 always_crash +# $0 fail_then_pass:N # where N is the number of failing runs before passing +# +# Needs environment variable: +# + QT_MOCK_TEST_STATE_FILE :: points to a unique filename, to be written +# for keeping the state of the fail_then_pass:N tests. +# +# Mode B: +# +# If invoked without any argument, it runs the tests listed in the +# variable QT_MOCK_TEST_FAIL_LIST. If variable is empty it just runs +# the always_pass test. It also understands qtestlib's `-o outfile.xml,xml` +# option for writing a mock testlog in a file. Requires environment variables: +# + QT_MOCK_TEST_STATE_FILE :: See above +# + QT_MOCK_TEST_XML_TEMPLATE_FILE :: may point to the template XML file +# located in the same source directory. Without this variable, the +# option `-o outfile.xml,xml` will be ignored. +# + QT_MOCK_TEST_FAIL_LIST :: may contain a comma-separated list of test +# that should run. + + +import sys +import os +import traceback +from tst_testrunner import write_xml_log + + +MY_NAME = os.path.basename(sys.argv[0]) +STATE_FILE = None +XML_TEMPLATE = None +XML_OUTPUT_FILE = None + + +def put_failure(test_name): + with open(STATE_FILE, "a") as f: + f.write(test_name + "\n") +def get_failures(test_name): + n = 0 + try: + with open(STATE_FILE) as f: + for line in f: + if line.strip() == test_name: + n += 1 + except FileNotFoundError: + return 0 + return n + +# Only care about the XML log output file. +def parse_output_argument(a): + global XML_OUTPUT_FILE + if a.endswith(",xml"): + XML_OUTPUT_FILE = a[:-4] + +# Strip qtestlib specific arguments. +# Only care about the "-o ...,xml" argument. +def clean_cmdline(): + args = [] + prev_arg = None + skip_next_arg = True # Skip argv[0] + for a in sys.argv: + if skip_next_arg: + if prev_arg == "-o": + parse_output_argument(a) + prev_arg = None + skip_next_arg = False + continue + if a in ("-o", "-maxwarnings"): + skip_next_arg = True + prev_arg = a + continue + if a in ("-v1", "-v2", "-vs"): + continue + args.append(a) + return args + + +def log_test(testcase, result, + testsuite=MY_NAME.rpartition(".")[0]): + print("%-7s: %s::%s()" % (result, testsuite, testcase)) + +# Return the exit code +def run_test(testname): + if testname == "always_pass": + exit_code = 0 + elif testname == "always_fail": + exit_code = 1 + elif testname == "always_crash": + exit_code = 130 + elif testname.startswith("fail_then_pass"): + wanted_fails = int(testname.partition(":")[2]) + previous_fails = get_failures(testname) + if previous_fails < wanted_fails: + put_failure(testname) + exit_code = 1 + else: + exit_code = 0 + else: + assert False, "Unknown argument: %s" % testname + + if exit_code == 0: + log_test(testname, "PASS") + elif exit_code == 1: + log_test(testname, "FAIL!") + else: + log_test(testname, "CRASH!") + + return exit_code + +def no_args_run(): + try: + run_list = os.environ["QT_MOCK_TEST_RUN_LIST"].split(",") + except KeyError: + run_list = ["always_pass"] + + total_result = True + fail_list = [] + for test in run_list: + test_exit_code = run_test(test) + if test_exit_code not in (0, 1): + sys.exit(130) # CRASH! + if test_exit_code != 0: + fail_list.append(test) + total_result = total_result and (test_exit_code == 0) + + if XML_TEMPLATE and XML_OUTPUT_FILE: + write_xml_log(XML_OUTPUT_FILE, failure=fail_list) + + if total_result: + sys.exit(0) + else: + sys.exit(1) + + +def main(): + global STATE_FILE + # Will fail if env var is not set. + STATE_FILE = os.environ["QT_MOCK_TEST_STATE_FILE"] + + global XML_TEMPLATE + if "QT_MOCK_TEST_XML_TEMPLATE_FILE" in os.environ: + with open(os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"]) as f: + XML_TEMPLATE = f.read() + + args = clean_cmdline() + + if len(args) == 0: + no_args_run() + assert False, "Unreachable!" + else: + sys.exit(run_test(args[0])) + + +# TODO write XPASS test that does exit(1) + +if __name__ == "__main__": + try: + main() + except Exception as e: + traceback.print_exc() + exit(128) # Something went wrong with this script diff --git a/util/testrunner/tests/tst_testrunner.py b/util/testrunner/tests/tst_testrunner.py new file mode 100755 index 0000000000..02a839409f --- /dev/null +++ b/util/testrunner/tests/tst_testrunner.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python3 + + +############################################################################# +## +## Copyright (C) 2021 The Qt Company Ltd. +## Contact: https://www.qt.io/licensing/ +## +## This file is part of the release tools of the Qt Toolkit. +## +## $QT_BEGIN_LICENSE:GPL-EXCEPT$ +## Commercial License Usage +## Licensees holding valid commercial Qt licenses may use this file in +## accordance with the commercial license agreement provided with the +## Software or, alternatively, in accordance with the terms contained in +## a written agreement between you and The Qt Company. For licensing terms +## and conditions see https://www.qt.io/terms-conditions. For further +## information use the contact form at https://www.qt.io/contact-us. +## +## GNU General Public License Usage +## Alternatively, this file may be used under the terms of the GNU +## General Public License version 3 as published by the Free Software +## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT +## included in the packaging of this file. Please review the following +## information to ensure the GNU General Public License requirements will +## be met: https://www.gnu.org/licenses/gpl-3.0.html. +## +## $QT_END_LICENSE$ +## +############################################################################# + + +import sys +import os +import re +import subprocess + +from subprocess import STDOUT, PIPE +from tempfile import TemporaryDirectory, mkstemp + +MY_NAME = os.path.basename(__file__) +my_dir = os.path.dirname(__file__) +testrunner = os.path.join(my_dir, "..", "qt-testrunner.py") +mock_test = os.path.join(my_dir, "qt_mock_test.py") +xml_log_template = os.path.join(my_dir, "qt_mock_test-log.xml") + +with open(xml_log_template) as f: + XML_TEMPLATE = f.read() + + +import unittest + +def setUpModule(): + global TEMPDIR + TEMPDIR = TemporaryDirectory(prefix="tst_testrunner-") + + filename = os.path.join(TEMPDIR.name, "file_1") + print("setUpModule(): setting up temporary directory and env var" + " QT_MOCK_TEST_STATE_FILE=" + filename + " and" + " QT_MOCK_TEST_XML_TEMPLATE_FILE=" + xml_log_template) + + os.environ["QT_MOCK_TEST_STATE_FILE"] = filename + os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = xml_log_template + +def tearDownModule(): + print("\ntearDownModule(): Cleaning up temporary directory:", + TEMPDIR.name) + del os.environ["QT_MOCK_TEST_STATE_FILE"] + TEMPDIR.cleanup() + + +# Helper to run a command and always capture output +def run(*args, **kwargs): + if DEBUG: + print("Running: ", args, flush=True) + proc = subprocess.run(*args, stdout=PIPE, stderr=STDOUT, **kwargs) + if DEBUG and proc.stdout: + print(proc.stdout.decode(), flush=True) + return proc + +# Helper to run qt-testrunner.py with proper testing arguments. +def run_testrunner(xml_filename=None, extra_args=None, env=None): + + args = [ testrunner, mock_test ] + if xml_filename: + args += [ "--parse-xml-testlog", xml_filename ] + if extra_args: + args += extra_args + + return run(args, env=env) + +# Write the XML_TEMPLATE to filename, replacing the templated results. +def write_xml_log(filename, failure=None): + data = XML_TEMPLATE + # Replace what was asked to fail with "fail" + if type(failure) in (list, tuple): + for template in failure: + data = data.replace("{{"+template+"_result}}", "fail") + elif type(failure) is str: + data = data.replace("{{"+failure+"_result}}", "fail") + # Replace the rest with "pass" + data = re.sub(r"{{[^}]+}}", "pass", data) + with open(filename, "w") as f: + f.write(data) + + +# Test that qt_mock_test.py behaves well. This is necessary to properly +# test qt-testrunner. +class Test_qt_mock_test(unittest.TestCase): + def setUp(self): + state_file = os.environ["QT_MOCK_TEST_STATE_FILE"] + if os.path.exists(state_file): + os.remove(state_file) + def test_always_pass(self): + proc = run([mock_test, "always_pass"]) + self.assertEqual(proc.returncode, 0) + def test_always_fail(self): + proc = run([mock_test, "always_fail"]) + self.assertEqual(proc.returncode, 1) + def test_fail_then_pass_2(self): + proc = run([mock_test, "fail_then_pass:2"]) + self.assertEqual(proc.returncode, 1) + proc = run([mock_test, "fail_then_pass:2"]) + self.assertEqual(proc.returncode, 1) + proc = run([mock_test, "fail_then_pass:2"]) + self.assertEqual(proc.returncode, 0) + def test_fail_then_pass_1(self): + proc = run([mock_test, "fail_then_pass:1"]) + self.assertEqual(proc.returncode, 1) + proc = run([mock_test, "fail_then_pass:1"]) + self.assertEqual(proc.returncode, 0) + def test_fail_then_pass_many_tests(self): + proc = run([mock_test, "fail_then_pass:1"]) + self.assertEqual(proc.returncode, 1) + proc = run([mock_test, "fail_then_pass:2"]) + self.assertEqual(proc.returncode, 1) + proc = run([mock_test, "fail_then_pass:1"]) + self.assertEqual(proc.returncode, 0) + proc = run([mock_test, "fail_then_pass:2"]) + self.assertEqual(proc.returncode, 1) + proc = run([mock_test, "fail_then_pass:2"]) + self.assertEqual(proc.returncode, 0) + def test_xml_file_is_written(self): + filename = os.path.join(TEMPDIR.name, "testlog.xml") + proc = run([mock_test, "-o", filename+",xml"]) + self.assertEqual(proc.returncode, 0) + self.assertTrue(os.path.exists(filename)) + self.assertGreater(os.path.getsize(filename), 0) + os.remove(filename) + +# Test regular invocations of qt-testrunner. +class Test_testrunner(unittest.TestCase): + def setUp(self): + state_file = os.environ["QT_MOCK_TEST_STATE_FILE"] + if os.path.exists(state_file): + os.remove(state_file) + old_logfile = os.path.join(TEMPDIR.name, os.path.basename(mock_test) + ".xml") + if os.path.exists(old_logfile): + os.remove(old_logfile) + self.env = dict() + self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] = os.environ["QT_MOCK_TEST_XML_TEMPLATE_FILE"] + self.env["QT_MOCK_TEST_STATE_FILE"] = state_file + self.extra_args = [ "--log-dir", TEMPDIR.name ] + def prepare_env(self, run_list=None): + if run_list is not None: + self.env['QT_MOCK_TEST_RUN_LIST'] = ",".join(run_list) + def run2(self): + return run_testrunner(extra_args=self.extra_args, env=self.env) + def test_simple_invocation(self): + # All tests pass. + proc = self.run2() + self.assertEqual(proc.returncode, 0) + def test_always_pass(self): + self.prepare_env(run_list=["always_pass"]) + proc = self.run2() + self.assertEqual(proc.returncode, 0) + def test_always_fail(self): + self.prepare_env(run_list=["always_fail"]) + proc = self.run2() + # TODO verify that re-runs==max_repeats + self.assertEqual(proc.returncode, 2) + def test_flaky_pass_1(self): + self.prepare_env(run_list=["always_pass,fail_then_pass:1"]) + proc = self.run2() + self.assertEqual(proc.returncode, 0) + def test_flaky_pass_5(self): + self.prepare_env(run_list=["always_pass,fail_then_pass:1,fail_then_pass:5"]) + proc = self.run2() + self.assertEqual(proc.returncode, 0) + def test_flaky_fail(self): + self.prepare_env(run_list=["always_pass,fail_then_pass:6"]) + proc = self.run2() + self.assertEqual(proc.returncode, 2) + def test_flaky_pass_fail(self): + self.prepare_env(run_list=["always_pass,fail_then_pass:1,fail_then_pass:6"]) + proc = self.run2() + # TODO verify that one func was re-run and passed but the other failed. + self.assertEqual(proc.returncode, 2) + + # If no XML file is found by qt-testrunner, it is usually considered a + # CRASH and the whole test is re-run. But when the return code is zero, it + # doesn't care about XML file and passes anyway. + def test_no_xml_log_written_pass(self): + del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] + self.prepare_env(run_list=["always_pass"]) + proc = self.run2() + self.assertEqual(proc.returncode, 0) + # On the 2nd iteration of the full test, both of the tests pass. + def test_no_xml_log_written_fail_then_pass(self): + del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] + self.prepare_env(run_list=["always_pass,fail_then_pass:1"]) + proc = self.run2() + # TODO verify that the whole test has run twice. + self.assertEqual(proc.returncode, 0) + # Even after 2 iterations of the full test we still get failures but no XML file, + # and this is considered a CRASH. + def test_no_xml_log_written_crash(self): + del self.env["QT_MOCK_TEST_XML_TEMPLATE_FILE"] + self.prepare_env(run_list=["fail_then_pass:2"]) + proc = self.run2() + self.assertEqual(proc.returncode, 3) + +# Test qt-testrunner script with an existing XML log file: +# qt-testrunner.py qt_mock_test.py --parse-xml-testlog file.xml +# qt-testrunner should repeat the testcases that are logged as +# failures and fail or pass depending on how the testcases behave. +# Different XML files are generated for the following test cases. +# + No failure logged. qt-testrunner should exit(0) +# + The "always_pass" test has failed. qt-testrunner should exit(0). +# + The "always_fail" test has failed. qt-testrunner should exit(2). +# + The "always_crash" test has failed. qt-testrunner should exit(2). +# + The "fail_then_pass:2" test failed. qt-testrunner should exit(0). +# + The "fail_then_pass:5" test failed. qt-testrunner should exit(2). +class Test_testrunner_with_xml_logfile(unittest.TestCase): + # Runs before every single test function, creating a unique temp file. + def setUp(self): + (_handle, self.xml_file) = mkstemp( + suffix=".xml", prefix="qt_mock_test-log-", + dir=TEMPDIR.name) + if os.path.exists(os.environ["QT_MOCK_TEST_STATE_FILE"]): + os.remove(os.environ["QT_MOCK_TEST_STATE_FILE"]) + def tearDown(self): + os.remove(self.xml_file) + del self.xml_file + + def test_no_failure(self): + write_xml_log(self.xml_file, failure=None) + proc = run_testrunner(self.xml_file) + self.assertEqual(proc.returncode, 0) + def test_always_pass_failed(self): + write_xml_log(self.xml_file, failure="always_pass") + proc = run_testrunner(self.xml_file) + self.assertEqual(proc.returncode, 0) + def test_always_pass_failed_max_repeats_0(self): + write_xml_log(self.xml_file, failure="always_pass") + proc = run_testrunner(self.xml_file, + extra_args=["--max-repeats", "0"]) + self.assertEqual(proc.returncode, 2) + def test_always_fail_failed(self): + write_xml_log(self.xml_file, failure="always_fail") + proc = run_testrunner(self.xml_file) + self.assertEqual(proc.returncode, 2) + def test_always_crash_failed(self): + write_xml_log(self.xml_file, failure="always_crash") + proc = run_testrunner(self.xml_file) + self.assertEqual(proc.returncode, 2) + def test_fail_then_pass_2_failed(self): + write_xml_log(self.xml_file, failure="fail_then_pass:2") + proc = run_testrunner(self.xml_file) + self.assertEqual(proc.returncode, 0) + def test_fail_then_pass_5_failed(self): + write_xml_log(self.xml_file, failure="fail_then_pass:5") + proc = run_testrunner(self.xml_file) + self.assertEqual(proc.returncode, 2) + def test_with_two_failures(self): + write_xml_log(self.xml_file, + failure=["always_pass", "fail_then_pass:2"]) + proc = run_testrunner(self.xml_file) + self.assertEqual(proc.returncode, 0) + # Check that test output is properly interleaved with qt-testrunner's logging. + matches = re.findall(r"(PASS|FAIL!).*\n.*Test process exited with code", + proc.stdout.decode()) + self.assertEqual(len(matches), 4) + + +if __name__ == "__main__": + + DEBUG = False + if "--debug" in sys.argv: + sys.argv.remove("--debug") + DEBUG = True + + # We set failfast=True as we do not want the test suite to continue if the + # tests of qt_mock_test failed. The next ones depend on it. + unittest.main(failfast=True) -- cgit v1.2.3