aboutsummaryrefslogtreecommitdiffstats
path: root/tests/auto/qml/ecmascripttests/test262.py
blob: 437cd1b27dd271a2ddf9616b9b017e5a9f4c7684 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2017 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite module of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################

# Copyright 2009 the Sputnik authors.  All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.

# This is derived from sputnik.py, the Sputnik console test runner,
# with elements from packager.py, which is separately
# copyrighted. TODO: Refactor so there is less duplication between
# test262.py and packager.py.

import sys
from os import path
rootDir = path.dirname(path.realpath(__file__))
sys.path.insert(0, path.abspath(rootDir + "/test262/tools/packaging"))

import logging
import optparse
import os
import platform
import re
import subprocess
import tempfile
import time
import xml.dom.minidom
import datetime
import shutil
import json
import stat
import multiprocessing
import signal


from parseTestRecord import parseTestRecord, stripHeader

from packagerConfig import *

# ############# Helpers needed for parallel multi-process test execution ############

def runTest(case, args):
    return case.Run(args)

def runTestVarArgs(args):
    return runTest(*args)

def initWorkerProcess():
    signal.signal(signal.SIGINT, signal.SIG_IGN)

# #############

class Test262Error(Exception):
  def __init__(self, message):
    self.message = message

def ReportError(s):
  raise Test262Error(s)


class TestExpectations:
    def __init__(self, enabled):
        self.testsToSkip = []
        self.failingTests = []
        f = open(rootDir + "/TestExpectations")
        if not enabled:
            return
        for line in f.read().splitlines():
            line = line.strip()
            if len(line) == 0 or line[0] == "#":
                continue
            record = line.split()
            if len(record) == 1:
                self.testsToSkip.append(record[0])
            else:
                test = record[0]
                expectation = record[1]
                if expectation == "failing":
                    self.failingTests.append(test)
        f.close()

    def update(self, progress):
        unexpectedPasses = [c.case.name[-1] for c in progress.failed_tests if c.case.IsNegative()]

        # If a test fails that we expected to fail, then it actually passed unexpectedly.
        failures = [c.case.name[-1] for c in progress.failed_tests if not c.case.IsNegative()]
        for failure in failures:
            if failure in self.failingTests:
                unexpectedPasses.append(failure)

        f = open(rootDir + "/TestExpectations")
        lines = f.read().splitlines()
        oldLen = len(lines)
        for result in unexpectedPasses:
            expectationLine = result + " failing"
            try:
                lines.remove(expectationLine)
            except ValueError:
                pass

        f.close()
        if len(lines) != oldLen:
            f = open(rootDir + "/TestExpectations", "w")
            f.write("\n".join(lines))
            f.close()
            print "Changes to TestExpectations written!"


if not os.path.exists(EXCLUDED_FILENAME):
    print "Cannot generate (JSON) test262 tests without a file," + \
        " %s, showing which tests have been disabled!" % EXCLUDED_FILENAME
    sys.exit(1)
EXCLUDE_LIST = xml.dom.minidom.parse(EXCLUDED_FILENAME)
EXCLUDE_LIST = EXCLUDE_LIST.getElementsByTagName("test")
EXCLUDE_LIST = [x.getAttribute("id") for x in EXCLUDE_LIST]


def BuildOptions():
  result = optparse.OptionParser()
  result.add_option("--command", default="qmljs", help="The command-line to run")
  result.add_option("--tests", default=path.abspath(rootDir + '/test262'),
                    help="Path to the tests")
  result.add_option("--cat", default=False, action="store_true",
                    help="Print packaged test code that would be run")
  result.add_option("--summary", default=True, action="store_true",
                    help="Print summary after running tests")
  result.add_option("--full-summary", default=False, action="store_true",
                    help="Print summary and test output after running tests")
  result.add_option("--strict_only", default=False, action="store_true",
                    help="Test only strict mode")
  result.add_option("--non_strict_only", default=False, action="store_true",
                    help="Test only non-strict mode")
  result.add_option("--parallel", default=False, action="store_true",
                    help="Run tests in parallel")
  result.add_option("--with-test-expectations", default=False, action="store_true",
                    help="Parse TestExpectations to deal with tests known to fail")
  result.add_option("--update-expectations", default=False, action="store_true",
                    help="Update test expectations fail when a test passes that was expected to fail")
  # TODO: Once enough tests are made strict compat, change the default
  # to "both"
  result.add_option("--unmarked_default", default="non_strict",
                    help="default mode for tests of unspecified strictness")
  return result


def ValidateOptions(options):
  if not options.command:
    ReportError("A --command must be specified.")
  if not path.exists(options.tests):
    ReportError("Couldn't find test path '%s'" % options.tests)


placeHolderPattern = re.compile(r"\{\{(\w+)\}\}")


def IsWindows():
  p = platform.system()
  return (p == 'Windows') or (p == 'Microsoft')


class TempFile(object):

  def __init__(self, suffix="", prefix="tmp", text=False):
    self.suffix = suffix
    self.prefix = prefix
    self.text = text
    self.fd = None
    self.name = None
    self.is_closed = False
    self.Open()

  def Open(self):
    (self.fd, self.name) = tempfile.mkstemp(
        suffix = self.suffix,
        prefix = self.prefix,
        text = self.text)

  def Write(self, str):
    os.write(self.fd, str)

  def Read(self):
    f = file(self.name)
    result = f.read()
    f.close()
    return result

  def Close(self):
    if not self.is_closed:
      self.is_closed = True
      os.close(self.fd)

  def Dispose(self):
    try:
      self.Close()
      os.unlink(self.name)
    except OSError, e:
      logging.error("Error disposing temp file: %s", str(e))


class TestResult(object):

  def __init__(self, exit_code, stdout, stderr, case):
    self.exit_code = exit_code
    self.stdout = stdout
    self.stderr = stderr
    self.case = case

  def ReportOutcome(self, long_format):
    name = self.case.GetName()
    mode = self.case.GetMode()
    if self.HasUnexpectedOutcome():
      if self.case.IsNegative():
        print "=== %s was expected to fail in %s, but didn't ===" % (name, mode)
      else:
        if long_format:
          print "=== %s failed in %s ===" % (name, mode)
        else:
          print "%s in %s: " % (name, mode)
        out = self.stdout.strip()
        if len(out) > 0:
          print "--- output ---"
          print out
        err = self.stderr.strip()
        if len(err) > 0:
          print "--- errors ---"
          print err
        if long_format:
          print "==="
    elif self.case.IsNegative():
      print "%s failed in %s as expected" % (name, mode)
    else:
      print "%s passed in %s" % (name, mode)

  def HasFailed(self):
    return self.exit_code != 0

  def HasUnexpectedOutcome(self):
    if self.case.IsNegative():
       return not self.HasFailed()
    else:
       return self.HasFailed()


class TestCase(object):

  def __init__(self, suite, name, full_path, strict_mode):
    self.suite = suite
    self.name = name
    self.full_path = full_path
    self.strict_mode = strict_mode
    f = open(self.full_path)
    self.contents = f.read()
    f.close()
    testRecord = parseTestRecord(self.contents, name)
    self.test = testRecord["test"]
    del testRecord["test"]
    del testRecord["header"]
    del testRecord["commentary"]
    self.testRecord = testRecord;


  def GetName(self):
    return path.join(*self.name)

  def GetMode(self):
    if self.strict_mode:
      return "strict mode"
    else:
      return "non-strict mode"

  def GetPath(self):
    return self.name

  def NegateResult(self):
      if self.IsNegative():
          del self.testRecord['negative']
      else:
          self.testRecord['negative'] = "Some failure";

  def IsNegative(self):
    return 'negative' in self.testRecord

  def IsOnlyStrict(self):
    return 'onlyStrict' in self.testRecord

  def IsNoStrict(self):
    return 'noStrict' in self.testRecord

  def GetSource(self):
    # "var testDescrip = " + str(self.testRecord) + ';\n\n' + \
    source = self.suite.GetInclude("cth.js") + \
        self.suite.GetInclude("sta.js") + \
        self.suite.GetInclude("ed.js") + \
        self.suite.GetInclude("testBuiltInObject.js") + \
        self.suite.GetInclude("testIntl.js") + \
        self.test + '\n'

    if self.strict_mode:
      source = '"use strict";\nvar strict_mode = true;\n' + source
    else:
      source =  "var strict_mode = false; \n" + source
    return source

  def InstantiateTemplate(self, template, params):
    def GetParameter(match):
      key = match.group(1)
      return params.get(key, match.group(0))
    return placeHolderPattern.sub(GetParameter, template)

  def Execute(self, command):
    if IsWindows():
      args = '%s' % command
    else:
      args = command.split(" ")
    stdout = TempFile(prefix="test262-out-")
    stderr = TempFile(prefix="test262-err-")
    try:
      logging.info("exec: %s", str(args))
      process = subprocess.Popen(
        args,
        shell = IsWindows(),
        stdout = stdout.fd,
        stderr = stderr.fd
      )
      code = process.wait()
      out = stdout.Read()
      err = stderr.Read()
    finally:
      stdout.Dispose()
      stderr.Dispose()
    return (code, out, err)

  def RunTestIn(self, command_template, tmp):
    tmp.Write(self.GetSource())
    tmp.Close()
    command = self.InstantiateTemplate(command_template, {
      'path': tmp.name
    })
    (code, out, err) = self.Execute(command)
    return TestResult(code, out, err, self)

  def Run(self, command_template):
    tmp = TempFile(suffix=".js", prefix="test262-", text=True)
    try:
      result = self.RunTestIn(command_template, tmp)
    finally:
      tmp.Dispose()
    return result

  def Print(self):
    print self.GetSource()


class ProgressIndicator(object):

  def __init__(self, count):
    self.count = count
    self.succeeded = 0
    self.failed = 0
    self.failed_tests = []

  def HasRun(self, result):
    result.ReportOutcome(True)
    if result.HasUnexpectedOutcome():
      self.failed += 1
      self.failed_tests.append(result)
    else:
      self.succeeded += 1


def MakePlural(n):
  if (n == 1):
    return (n, "")
  else:
    return (n, "s")


class TestSuite(object):

  def __init__(self, root, strict_only, non_strict_only, unmarked_default, load_expectations):
    # TODO: derive from packagerConfig.py
    self.test_root = path.join(root, 'test', 'suite')
    self.lib_root = path.join(root, 'test', 'harness')
    self.strict_only = strict_only
    self.non_strict_only = non_strict_only
    self.unmarked_default = unmarked_default
    self.include_cache = { }
    self.expectations = TestExpectations(load_expectations)

  def Validate(self):
    if not path.exists(self.test_root):
      ReportError("No test repository found")
    if not path.exists(self.lib_root):
      ReportError("No test library found")

  def IsHidden(self, path):
    return path.startswith('.') or path == 'CVS'

  def IsTestCase(self, path):
    return path.endswith('.js')

  def ShouldRun(self, rel_path, tests):
    if len(tests) == 0:
      return True
    for test in tests:
      if test in rel_path:
        return True
    return False

  def GetInclude(self, name):
    if not name in self.include_cache:
      static = path.join(self.lib_root, name)
      if path.exists(static):
        f = open(static)
        contents = stripHeader(f.read())
        contents = re.sub(r'\r\n', '\n', contents)
        self.include_cache[name] = contents + "\n"
        f.close()
      else:
        ReportError("Can't find: " + static)
    return self.include_cache[name]

  def EnumerateTests(self, tests):
    logging.info("Listing tests in %s", self.test_root)
    cases = []
    for root, dirs, files in os.walk(self.test_root):
      for f in [x for x in dirs if self.IsHidden(x)]:
        dirs.remove(f)
      dirs.sort()
      for f in sorted(files):
        if self.IsTestCase(f):
          full_path = path.join(root, f)
          if full_path.startswith(self.test_root):
            rel_path = full_path[len(self.test_root)+1:]
          else:
            logging.warning("Unexpected path %s", full_path)
            rel_path = full_path
          if self.ShouldRun(rel_path, tests) and not rel_path.startswith("intl402" + os.sep):
            basename = path.basename(full_path)[:-3]
            name = rel_path.split(path.sep)[:-1] + [basename]
            if EXCLUDE_LIST.count(basename) >= 1 or self.expectations.testsToSkip.count(basename) >= 1:
              print 'Excluded: ' + basename
            else:
              if not self.non_strict_only:
                strict_case = TestCase(self, name, full_path, True)
                if self.expectations.failingTests.count(basename) >= 1:
                    strict_case.NegateResult()
                if not strict_case.IsNoStrict():
                  if strict_case.IsOnlyStrict() or \
                        self.unmarked_default in ['both', 'strict']:
                    cases.append(strict_case)
              if not self.strict_only:
                non_strict_case = TestCase(self, name, full_path, False)
                if self.expectations.failingTests.count(basename) >= 1:
                    non_strict_case.NegateResult()
                if not non_strict_case.IsOnlyStrict():
                  if non_strict_case.IsNoStrict() or \
                        self.unmarked_default in ['both', 'non_strict']:
                    cases.append(non_strict_case)
    logging.info("Done listing tests")
    return cases

  def PrintSummary(self, progress):
    print
    print "=== Summary ==="
    count = progress.count
    succeeded = progress.succeeded
    failed = progress.failed
    print " - Ran %i test%s" % MakePlural(count)
    if progress.failed == 0:
      print " - All tests succeeded"
    else:
      percent = ((100.0 * succeeded) / count,)
      print " - Passed %i test%s (%.1f%%)" % (MakePlural(succeeded) + percent)
      percent = ((100.0 * failed) / count,)
      print " - Failed %i test%s (%.1f%%)" % (MakePlural(failed) + percent)
      positive = [c for c in progress.failed_tests if not c.case.IsNegative()]
      negative = [c for c in progress.failed_tests if c.case.IsNegative()]
      if len(positive) > 0:
        print
        print "Failed tests"
        for result in positive:
          print "  %s in %s" % (result.case.GetName(), result.case.GetMode())
      if len(negative) > 0:
        print
        print "Expected to fail but passed ---"
        for result in negative:
          print " %s in %s" % (result.case.GetName(), result.case.GetMode())

  def PrintFailureOutput(self, progress):
    for result in progress.failed_tests:
      print
      result.ReportOutcome(False)

  def Run(self, command_template, tests, print_summary, full_summary, parallel, update_expectations):
    if not "{{path}}" in command_template:
      command_template += " {{path}}"
    cases = self.EnumerateTests(tests)
    if len(cases) == 0:
      ReportError("No tests to run")
    progress = ProgressIndicator(len(cases))

    if parallel:
      pool = multiprocessing.Pool(processes=multiprocessing.cpu_count(), initializer=initWorkerProcess)
      results = pool.imap_unordered(func=runTestVarArgs, iterable=[(case, command_template) for case in cases], chunksize=multiprocessing.cpu_count() * 8)
      for result in results:
        progress.HasRun(result)
    else:
      for case in cases:
        result = case.Run(command_template)
        progress.HasRun(result)
    if print_summary:
      self.PrintSummary(progress)
      if full_summary:
        self.PrintFailureOutput(progress)
      else:
        print
        print "Use --full-summary to see output from failed tests"
    print
    if update_expectations:
        self.expectations.update(progress)
    return progress.failed == 0

  def Print(self, tests):
    cases = self.EnumerateTests(tests)
    if len(cases) > 0:
      cases[0].Print()


def Main():
  # Some date tests rely on being run in pacific time.
  # Uncomment the next line for more logging info.
  #logging.basicConfig(level=logging.DEBUG)
  os.environ["TZ"] = "PST8PDT"
  os.environ["LANG"] = "en_US.UTF-8"
  os.environ["LC_TIME"] = "en_US.UTF-8"
  parser = BuildOptions()
  (options, args) = parser.parse_args()
  ValidateOptions(options)
  test_suite = TestSuite(options.tests,
                         options.strict_only,
                         options.non_strict_only,
                         options.unmarked_default,
                         options.with_test_expectations)
  test_suite.Validate()
  if options.cat:
    test_suite.Print(args)
    return 0
  else:
    if test_suite.Run(options.command, args,
                      options.summary or options.full_summary,
                      options.full_summary,
                      options.parallel,
                      options.update_expectations):
      return 0
    else:
      return 1


if __name__ == '__main__':
  try:
    sys.exit(Main())
  except Test262Error, e:
    print "Error: %s" % e.message
    sys.exit(1)