summaryrefslogtreecommitdiffstats
path: root/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py')
-rw-r--r--chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py74
1 files changed, 50 insertions, 24 deletions
diff --git a/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py b/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
index 95146472146..cd0ed51f5ba 100644
--- a/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
+++ b/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
@@ -45,6 +45,10 @@ INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
# POSIX limits status codes to 0-255. Normally run-webkit-tests returns the number
# of tests that failed. These indicate exceptional conditions triggered by the
# script itself, so we count backwards from 255 (aka -1) to enumerate them.
+#
+# FIXME: crbug.com/357866. We really shouldn't return the number of failures
+# in the exit code at all.
+EARLY_EXIT_STATUS = 251
SYS_DEPS_EXIT_STATUS = 252
NO_TESTS_EXIT_STATUS = 253
NO_DEVICES_EXIT_STATUS = 254
@@ -52,12 +56,16 @@ UNEXPECTED_ERROR_EXIT_STATUS = 255
ERROR_CODES = (
INTERRUPTED_EXIT_STATUS,
+ EARLY_EXIT_STATUS,
SYS_DEPS_EXIT_STATUS,
NO_TESTS_EXIT_STATUS,
NO_DEVICES_EXIT_STATUS,
UNEXPECTED_ERROR_EXIT_STATUS,
)
+# In order to avoid colliding with the above codes, we put a ceiling on
+# the value returned by num_regressions
+MAX_FAILURES_EXIT_STATUS = 101
class TestRunException(Exception):
def __init__(self, code, msg):
@@ -149,6 +157,9 @@ def _interpret_test_failures(failures):
if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
test_dict['is_missing_image'] = True
+ if test_failures.FailureTestHarnessAssertion in failure_types:
+ test_dict['is_testharness_test'] = True
+
return test_dict
@@ -202,15 +213,25 @@ def summarize_results(port_obj, expectations, initial_results, retry_results, en
if not result.has_stderr and only_include_failing:
continue
elif result_type != test_expectations.SKIP and test_name in initial_results.unexpected_results_by_name:
- if retry_results and test_name not in retry_results.unexpected_results_by_name:
- actual.extend(expectations.get_expectations_string(test_name).split(" "))
- num_flaky += 1
- elif retry_results:
- retry_result_type = retry_results.unexpected_results_by_name[test_name].type
- num_regressions += 1
- if not keywords[retry_result_type] in actual:
- actual.append(keywords[retry_result_type])
+ if retry_results:
+ if test_name not in retry_results.unexpected_results_by_name:
+ # The test failed unexpectedly at first, but ran as expected the second time -> flaky.
+ actual.extend(expectations.get_expectations_string(test_name).split(" "))
+ num_flaky += 1
+ else:
+ retry_result_type = retry_results.unexpected_results_by_name[test_name].type
+ if retry_result_type == test_expectations.PASS:
+ # The test failed unexpectedly at first, then passed unexpectedly -> unexpected pass.
+ num_passes += 1
+ if not result.has_stderr and only_include_failing:
+ continue
+ else:
+ # The test failed unexpectedly both times -> regression.
+ num_regressions += 1
+ if not keywords[retry_result_type] in actual:
+ actual.append(keywords[retry_result_type])
else:
+ # The test failed unexpectedly, but we didn't do any retries -> regression.
num_regressions += 1
test_dict = {}
@@ -233,7 +254,9 @@ def summarize_results(port_obj, expectations, initial_results, retry_results, en
test_dict['actual'] = " ".join(actual)
def is_expected(actual_result):
- return expectations.matches_an_expected_result(test_name, result_type, port_obj.get_option('pixel_tests') or result.reftest_type)
+ return expectations.matches_an_expected_result(test_name, result_type,
+ port_obj.get_option('pixel_tests') or result.reftest_type,
+ port_obj.get_option('enable_sanitizer'))
# To avoid bloating the output results json too much, only add an entry for whether the failure is unexpected.
if not all(is_expected(actual_result) for actual_result in actual):
@@ -246,6 +269,9 @@ def summarize_results(port_obj, expectations, initial_results, retry_results, en
if retry_result:
test_dict.update(_interpret_test_failures(retry_result.failures))
+ if (result.has_repaint_overlay):
+ test_dict['has_repaint_overlay'] = True
+
# Store test hierarchically by directory. e.g.
# foo/bar/baz.html: test_dict
# foo/bar/baz1.html: test_dict
@@ -282,20 +308,20 @@ def summarize_results(port_obj, expectations, initial_results, retry_results, en
results['build_number'] = port_obj.get_option('build_number')
results['builder_name'] = port_obj.get_option('builder_name')
- try:
- # Don't do this by default since it takes >100ms.
- # It's only used for uploading data to the flakiness dashboard.
- if port_obj.get_option("builder_name"):
- port_obj.host.initialize_scm()
- for (name, path) in port_obj.repository_paths():
- results[name.lower() + '_revision'] = port_obj.host.scm().svn_revision(path)
- except Exception, e:
- _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
- # Handle cases where we're running outside of version control.
- import traceback
- _log.debug('Failed to learn head svn revision:')
- _log.debug(traceback.format_exc())
- results['chromium_revision'] = ""
- results['blink_revision'] = ""
+ # Don't do this by default since it takes >100ms.
+ # It's only used for uploading data to the flakiness dashboard.
+ results['chromium_revision'] = ''
+ results['blink_revision'] = ''
+ if port_obj.get_option('builder_name'):
+ for (name, path) in port_obj.repository_paths():
+ scm = port_obj.host.scm_for_path(path)
+ if scm:
+ rev = scm.svn_revision(path)
+ if rev:
+ results[name.lower() + '_revision'] = rev
+ else:
+ _log.warn('Failed to determine svn revision for %s, '
+ 'leaving "%s_revision" key blank in full_results.json.'
+ % (path, name))
return results