diff options
Diffstat (limited to 'chromium/third_party/catapult/tracing/tracing_build')
17 files changed, 0 insertions, 1559 deletions
diff --git a/chromium/third_party/catapult/tracing/tracing_build/__init__.py b/chromium/third_party/catapult/tracing/tracing_build/__init__.py deleted file mode 100644 index 22060c5dca2..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. -# - -import tracing_project -tracing_project.UpdateSysPathIfNeeded() diff --git a/chromium/third_party/catapult/tracing/tracing_build/check_common.py b/chromium/third_party/catapult/tracing/tracing_build/check_common.py deleted file mode 100644 index b49513f6d61..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/check_common.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2013 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import os - -import tracing_project - - -FILE_GROUPS = ["tracing_css_files", - "tracing_js_html_files", - "tracing_img_files"] - - -def GetFileGroupFromFileName(filename): - extension = os.path.splitext(filename)[1] - return { - '.css': 'tracing_css_files', - '.html': 'tracing_js_html_files', - '.js': 'tracing_js_html_files', - '.png': 'tracing_img_files' - }[extension] - - -def CheckListedFilesSorted(src_file, group_name, listed_files): - sorted_files = sorted(listed_files) - if sorted_files != listed_files: - mismatch = '' - for i in range(len(listed_files)): - if listed_files[i] != sorted_files[i]: - mismatch = listed_files[i] - break - what_is = ' ' + '\n '.join(listed_files) - what_should_be = ' ' + '\n '.join(sorted_files) - return '''In group {0} from file {1}, filenames aren't sorted. - -First mismatch: - {2} - -Current listing: -{3} - -Correct listing: -{4}\n\n'''.format(group_name, src_file, mismatch, what_is, what_should_be) - else: - return '' - - -def GetKnownFiles(): - project = tracing_project.TracingProject() - - vulcanizer = project.CreateVulcanizer() - m = vulcanizer.loader.LoadModule( - module_name='tracing.ui.extras.about_tracing.about_tracing') - absolute_filenames = m.GetAllDependentFilenamesRecursive( - include_raw_scripts=False) - - return list(set([os.path.relpath(f, project.tracing_root_path) - for f in absolute_filenames])) - - -def CheckCommon(file_name, listed_files): - known_files = GetKnownFiles() - u = set(listed_files).union(set(known_files)) - i = set(listed_files).intersection(set(known_files)) - diff = list(u - i) - - if len(diff) == 0: - return '' - - error = 'Entries in ' + file_name + ' do not match files on disk:\n' - in_file_only = list(set(listed_files) - set(known_files)) - in_known_only = list(set(known_files) - set(listed_files)) - - if len(in_file_only) > 0: - error += ' In file only:\n ' + '\n '.join(sorted(in_file_only)) - if len(in_known_only) > 0: - if len(in_file_only) > 0: - error += '\n\n' - error += ' On disk only:\n ' + '\n '.join(sorted(in_known_only)) - - if in_file_only: - error += ( - '\n\n' - ' Note: only files actually used in about:tracing should\n' - ' be listed in the build files. Try running \n' - ' tracing/bin/update_gyp_and_gn\n' - ' to update the files automatically.') - - return error diff --git a/chromium/third_party/catapult/tracing/tracing_build/check_common_unittest.py b/chromium/third_party/catapult/tracing/tracing_build/check_common_unittest.py deleted file mode 100644 index b3af7b6fb68..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/check_common_unittest.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import unittest - -from tracing_build import check_common - - -class CheckCommonUnitTest(unittest.TestCase): - - def testFilesSorted(self): - error = check_common.CheckListedFilesSorted('foo.gyp', 'tracing_pdf_files', - ['/dir/file.pdf', - '/dir/another_file.pdf']) - expected_error = '''In group tracing_pdf_files from file foo.gyp,\ - filenames aren't sorted. - -First mismatch: - /dir/file.pdf - -Current listing: - /dir/file.pdf - /dir/another_file.pdf - -Correct listing: - /dir/another_file.pdf - /dir/file.pdf\n\n''' - assert error == expected_error diff --git a/chromium/third_party/catapult/tracing/tracing_build/check_gypi.py b/chromium/third_party/catapult/tracing/tracing_build/check_gypi.py deleted file mode 100644 index fb59626816b..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/check_gypi.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2013 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import os - -from tracing_build import check_common - -GYPI_FILE = os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', 'trace_viewer.gypi')) - - -def GypiCheck(): - f = open(GYPI_FILE, 'r') - gyp = f.read() - f.close() - - data = eval(gyp) # pylint: disable=eval-used - listed_files = [] - error = '' - for group in check_common.FILE_GROUPS: - filenames = map(os.path.normpath, data['variables'][group]) - error += check_common.CheckListedFilesSorted(GYPI_FILE, group, filenames) - listed_files.extend(filenames) - - return error + check_common.CheckCommon(GYPI_FILE, listed_files) - - -if __name__ == '__main__': - print GypiCheck() diff --git a/chromium/third_party/catapult/tracing/tracing_build/generate_about_tracing_contents.py b/chromium/third_party/catapult/tracing/tracing_build/generate_about_tracing_contents.py deleted file mode 100644 index 333fca19021..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/generate_about_tracing_contents.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import codecs -import argparse -import os -import sys - -import py_vulcanize - -import tracing_project - - -def Main(args): - parser = argparse.ArgumentParser(usage='%(prog)s --outdir=<directory>') - parser.add_argument('--outdir', dest='out_dir', - help='Where to place generated content') - parser.add_argument('--no-min', default=False, action='store_true', - help='Skip minification') - args = parser.parse_args(args) - - if not args.out_dir: - sys.stderr.write('ERROR: Must specify --outdir=<directory>') - parser.print_help() - return 1 - - names = ['tracing.ui.extras.about_tracing.about_tracing'] - project = tracing_project.TracingProject() - - vulcanizer = project.CreateVulcanizer() - load_sequence = vulcanizer.CalcLoadSequenceForModuleNames(names) - - olddir = os.getcwd() - try: - if not os.path.exists(args.out_dir): - os.makedirs(args.out_dir) - o = codecs.open(os.path.join(args.out_dir, 'about_tracing.html'), 'w', - encoding='utf-8') - try: - py_vulcanize.GenerateStandaloneHTMLToFile( - o, - load_sequence, - title='chrome://tracing', - flattened_js_url='tracing.js', - minify=not args.no_min) - except py_vulcanize.module.DepsException, ex: - sys.stderr.write('Error: %s\n\n' % str(ex)) - return 255 - o.close() - - o = codecs.open(os.path.join(args.out_dir, 'about_tracing.js'), 'w', - encoding='utf-8') - assert o.encoding == 'utf-8' - py_vulcanize.GenerateJSToFile( - o, - load_sequence, - use_include_tags_for_scripts=False, - dir_for_include_tag_root=args.out_dir, - minify=not args.no_min) - o.close() - - finally: - os.chdir(olddir) - - return 0 diff --git a/chromium/third_party/catapult/tracing/tracing_build/generate_about_tracing_contents_unittest.py b/chromium/third_party/catapult/tracing/tracing_build/generate_about_tracing_contents_unittest.py deleted file mode 100644 index dd31b7ec344..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/generate_about_tracing_contents_unittest.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import unittest -import tempfile -import shutil - -from tracing_build import generate_about_tracing_contents - - -class GenerateAboutTracingContentsUnittTest(unittest.TestCase): - - def testSmoke(self): - try: - tmpdir = tempfile.mkdtemp() - res = generate_about_tracing_contents.Main(['--outdir', tmpdir]) - assert res == 0 - finally: - shutil.rmtree(tmpdir) diff --git a/chromium/third_party/catapult/tracing/tracing_build/html2trace.py b/chromium/third_party/catapult/tracing/tracing_build/html2trace.py deleted file mode 100644 index 1acab58d5d6..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/html2trace.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2016 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import base64 -import codecs -import gzip -import json -import re -import StringIO - - -GZIP_HEADER_BYTES = b'\x1f\x8b' - - -# Regular expressions for matching the beginning and end of trace data in HTML -# traces. See tracing/extras/importer/trace2html_importer.html. -TRACE_DATA_START_LINE_RE = re.compile( - r'^<\s*script id="viewer-data" type="(application\/json|text\/plain)">$') -TRACE_DATA_END_LINE_RE = re.compile(r'^<\/\s*script>$') - - -def CopyTraceDataFromHTMLFilePath(html_path, trace_path, gzipped_output=False): - """Copies trace data from an existing HTML file into new trace file(s). - - If |html_path| doesn't contain any trace data blocks, this function throws an - exception. If |html_path| contains more than one trace data block, the first - block will be extracted into |trace_path| and the rest will be extracted - into separate files |trace_path|.1, |trace_path|.2, etc. - - The contents of each trace data block is decoded and, if |gzipped_output| is - false, inflated before it's stored in a trace file. - - This function returns a list of paths of the saved trace files ([|trace_path|, - |trace_path|.1, |trace_path|.2, ...]). - """ - trace_data_list = _ExtractTraceDataFromHTMLFile(html_path, - unzip_data=not gzipped_output) - saved_paths = [] - for i, trace_data in enumerate(trace_data_list): - saved_path = trace_path if i == 0 else '%s.%d' % (trace_path, i) - saved_paths.append(saved_path) - with open(saved_path, 'wb' if gzipped_output else 'w') as trace_file: - trace_file.write(trace_data.read()) - return saved_paths - - -def ReadTracesFromHTMLFilePath(html_path): - """Returns a list of inflated JSON traces extracted from an HTML file.""" - return map(json.load, _ExtractTraceDataFromHTMLFile(html_path)) - - -def _ExtractTraceDataFromHTMLFile(html_path, unzip_data=True): - with codecs.open(html_path, mode='r', encoding='utf-8') as html_file: - lines = html_file.readlines() - - start_indices = [i for i in xrange(len(lines)) - if TRACE_DATA_START_LINE_RE.match(lines[i])] - if not start_indices: - raise Exception('File %r does not contain trace data') - - decoded_data_list = [] - for start_index in start_indices: - end_index = next(i for i in xrange(start_index + 1, len(lines)) - if TRACE_DATA_END_LINE_RE.match(lines[i])) - encoded_data = '\n'.join(lines[start_index + 1:end_index]).strip() - decoded_data_list.append(StringIO.StringIO(base64.b64decode(encoded_data))) - - if unzip_data: - return map(_UnzipFileIfNecessary, decoded_data_list) - else: - return map(_ZipFileIfNecessary, decoded_data_list) - - -def _UnzipFileIfNecessary(original_file): - if _IsFileZipped(original_file): - return gzip.GzipFile(fileobj=original_file) - else: - return original_file - - -def _ZipFileIfNecessary(original_file): - if _IsFileZipped(original_file): - return original_file - else: - zipped_file = StringIO.StringIO() - with gzip.GzipFile(fileobj=zipped_file, mode='wb') as gzip_wrapper: - gzip_wrapper.write(original_file.read()) - zipped_file.seek(0) - return zipped_file - - -def _IsFileZipped(f): - is_gzipped = f.read(len(GZIP_HEADER_BYTES)) == GZIP_HEADER_BYTES - f.seek(0) - return is_gzipped diff --git a/chromium/third_party/catapult/tracing/tracing_build/merge_traces.py b/chromium/third_party/catapult/tracing/tracing_build/merge_traces.py deleted file mode 100644 index b107253dcf8..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/merge_traces.py +++ /dev/null @@ -1,632 +0,0 @@ -# Copyright 2016 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import argparse -import codecs -import collections -import gzip -import itertools -import json -import logging -import os -import sys - -# Add tracing/ to the path. -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) -from tracing_build import html2trace, trace2html - - -GZIP_FILENAME_SUFFIX = '.gz' -HTML_FILENAME_SUFFIX = '.html' - - -# Relevant trace event phases. See -# https://code.google.com/p/chromium/codesearch#chromium/src/base/trace_event/common/trace_event_common.h. -METADATA_PHASE = 'M' -MEMORY_DUMP_PHASE = 'v' -BEGIN_PHASE = 'B' -END_PHASE = 'E' - - -# Minimum gap between two consecutive merged traces in microseconds. -MIN_TRACE_GAP_IN_US = 1000000 - - -# Rule for matching IDs in an IdMap. For a given level, |match| should be a -# named tuple class where its fields determine the importance of |entry._items| -# for the purposes of matching pairs of IdMap entries. -IdMapLevel = collections.namedtuple('IdMapLevel', ['name', 'match']) - - -class IdMap(object): - """Abstract class for merging and mapping IDs from multiple sources.""" - - # Sub-classes must provide a tuple of |IdMapLevel| objects. - LEVELS = NotImplemented - - def __init__(self, depth=0): - assert 0 <= depth <= len(self.LEVELS) - self._depth = depth - - if depth > 0: - # Non-root node. - self._canonical_id = None - self._items = collections.defaultdict(set) - self._sources = set() - - if depth < len(self.LEVELS): - # Non-leaf node. - self._entry_map = {} # (Source, Entry ID) -> Entry. - - @property - def max_mapped_id(self): - """The maximum mapped ID of this map's entries.""" - if not self._entry_map: - return 0 - return max(e._canonical_id for e in self._entry_map.itervalues()) - - def AddEntry(self, source, path, **items): - """Add a source-specific entry path to the map. - - Args: - source: The source of the entry (e.g. trace filename). - path: A path of source-specific entry IDs in the map (e.g. [PID, TID]). - **items: Dictionary of items (or sets of items) to be appended to the - target entry's |_items|. - """ - if path: - return self._GetSubMapEntry(source, path[0]).AddEntry(source, path[1:], - **items) - assert 'id' not in items # ID is set according to the path. - for key, value in items.iteritems(): - value_set = self._items[key] - if (isinstance(value, collections.Iterable) and - not isinstance(value, basestring)): - value_set.update(value) - else: - value_set.add(value) - - def MapEntry(self, source, path): - """Map an source-specific entry ID path to a canonical entry ID path. - - Args: - source: The source of the entry (e.g. trace filename). - path: A path of source-specific entry IDs in the map (e.g. [PID, TID]). - - Returns: - A path of canonical entry IDs in the map to which the provided path of - source-specific entry IDs is mapped. - """ - if not path: - return () - entry = self._entry_map[(source, path[0])] - return (entry._canonical_id,) + entry.MapEntry(source, path[1:]) - - def MergeEntries(self): - """Recursively merge the entries in this map. - - Example: Suppose that the following entries were added to the map: - - map.AddEntry(source='trace_A.json', path=[10], name='Browser') - map.AddEntry(source='trace_A.json', path=[20], name='Renderer') - map.AddEntry(source='trace_B.json', path=[30], name='Browser') - - Before merging, |map._entry_map| will contain three separate items: - - ('trace_A.json', 10) -> IdMap(_items={id: {10}, name: {'Browser'}}, - _sources={'trace_A.json'}) - ('trace_A.json', 20) -> IdMap(_items={id: {20}, name: {'Renderer'}}, - _sources={'trace_A.json'}) - ('trace_B.json', 30) -> IdMap(_items={id: {30}, name: {'Browser'}}, - _sources={'trace_B.json'}) - - Since the first two entries come from the same source, they cannot be - merged. On the other hand, the third entry could be merged with either of - the first two. Since it has a common name with the first entry, it will be - merged with it in this method: - - ('trace_A.json', 10) -> IdMap(_items={id: {10, 30}, name: {'Browser'}}, - _sources={'trace_A.json', 'trace_B.json'}) - ('trace_A.json', 20) -> IdMap(_items={id: {20}, name: {Renderer}}, - _sources={'trace_A.json'}) - ('trace_B.json', 30) -> <same IdMap as ('trace_A.json', 10)> - - Pairs of entries will be merged in a descending order of sizes of - pair-wise intersections of |entry._items| until there are no two entries - such that (1) they have at least one value in |entry._items| in common and - (2) they are mergeable (i.e. have no common source). Afterwards, unique IDs - are assigned to the resulting "canonical" entries and their sub-entries are - merged recursively. - """ - if self._depth == len(self.LEVELS): - return - - logging.debug('Merging %r entries in %s...', self.LEVELS[self._depth].name, - self) - - canonical_entries = self._CanonicalizeEntries() - self._AssignIdsToCanonicalEntries(canonical_entries) - - for entry in canonical_entries: - entry.MergeEntries() - - def _GetSubMapEntry(self, source, entry_id): - full_id = (source, entry_id) - entry = self._entry_map.get(full_id) - if entry is None: - entry = type(self)(self._depth + 1) - entry._sources.add(source) - entry._items['id'].add(entry_id) - self._entry_map[full_id] = entry - return entry - - def _CalculateUnmergeableMapFromEntrySources(self): - entry_ids_by_source = collections.defaultdict(set) - for entry_id, entry in self._entry_map.iteritems(): - for source in entry._sources: - entry_ids_by_source[source].add(entry_id) - - unmergeable_map = collections.defaultdict(set) - for unmergeable_set in entry_ids_by_source.itervalues(): - for entry_id in unmergeable_set: - unmergeable_map[entry_id].update(unmergeable_set - {entry_id}) - - return unmergeable_map - - def _IsMergeableWith(self, other): - return self._sources.isdisjoint(other._sources) - - def _GetMatch(self, other): - match_cls = self.LEVELS[self._depth - 1].match - return match_cls(*(self._items[f] & other._items[f] - for f in match_cls._fields)) - - def _MergeFrom(self, source): - if self._depth > 0: - # This is NOT a ROOT node, so we need to merge fields and sources from - # the source node. - for key, values in source._items.iteritems(): - self._items[key].update(values) - self._sources.update(source._sources) - - if self._depth < len(self.LEVELS): - # This is NOT a LEAF node, so we need to copy over entries from the - # source node's entry map. - assert not (set(self._entry_map.iterkeys()) & - set(source._entry_map.iterkeys())) - self._entry_map.update(source._entry_map) - - def _CanonicalizeEntries(self): - canonical_entries = self._entry_map.copy() - - # {ID1, ID2} -> Match between the two entries. - matches = {frozenset([full_id1, full_id2]): entry1._GetMatch(entry2) - for full_id1, entry1 in canonical_entries.iteritems() - for full_id2, entry2 in canonical_entries.iteritems() - if entry1._IsMergeableWith(entry2)} - - while matches: - # Pop the maximum match from the dictionary. - max_match_set, max_match = max(matches.iteritems(), - key=lambda (_, v): map(len, v)) - del matches[max_match_set] - canonical_full_id, merged_full_id = max_match_set - - # Skip pairs of entries that have nothing in common. - if not any(max_match): - continue - - # Merge the entries and update the map to reflect this. - canonical_entry = canonical_entries[canonical_full_id] - merged_entry = canonical_entries.pop(merged_full_id) - logging.debug('Merging %s into %s [match=%s]...', merged_entry, - canonical_entry, max_match) - canonical_entry._MergeFrom(merged_entry) - del merged_entry - self._entry_map[merged_full_id] = canonical_entry - - for match_set in matches.keys(): - if merged_full_id in match_set: - # Remove other matches with the merged entry. - del matches[match_set] - elif canonical_full_id in match_set: - [other_full_id] = match_set - {canonical_full_id} - other_entry = canonical_entries[other_full_id] - if canonical_entry._IsMergeableWith(other_entry): - # Update other matches with the canonical entry which are still - # mergeable. - matches[match_set] = canonical_entry._GetMatch(other_entry) - else: - # Remove other matches with the canonical entry which have become - # unmergeable. - del matches[match_set] - - return canonical_entries.values() - - def _AssignIdsToCanonicalEntries(self, canonical_entries): - assigned_ids = set() - canonical_entries_without_assigned_ids = set() - - # Try to assign each canonical entry to one of the IDs from which it was - # merged. - for canonical_entry in canonical_entries: - candidate_ids = canonical_entry._items['id'] - try: - assigned_id = next(candidate_id for candidate_id in candidate_ids - if candidate_id not in assigned_ids) - except StopIteration: - canonical_entries_without_assigned_ids.add(canonical_entry) - continue - assigned_ids.add(assigned_id) - canonical_entry._canonical_id = assigned_id - - # For canonical entries where this cannot be done (highly unlikely), scan - # from the minimal merged ID upwards for the first unassigned ID. - for canonical_entry in canonical_entries_without_assigned_ids: - assigned_id = next(candidate_id for candidate_id in - itertools.count(min(canonical_entry._items['id'])) - if candidate_id not in assigned_ids) - assigned_ids.add(assigned_id) - canonical_entry._canonical_id = assigned_id - - def __repr__(self): - cls_name = type(self).__name__ - if self._depth == 0: - return '%s root' % cls_name - else: - return '%s %s entry(%s)' % (cls_name, self.LEVELS[self._depth - 1].name, - self._items) - - -class ProcessIdMap(IdMap): - """Class for merging and mapping PIDs and TIDs from multiple sources.""" - - LEVELS = ( - IdMapLevel(name='process', - match=collections.namedtuple('ProcessMatch', - ['name', 'id', 'label'])), - IdMapLevel(name='thread', - match=collections.namedtuple('ThreadMatch', ['name', 'id'])) - ) - - -def LoadTrace(filename): - """Load a trace from a (possibly gzipped) file and return its parsed JSON.""" - logging.info('Loading trace %r...', filename) - if filename.endswith(HTML_FILENAME_SUFFIX): - traces = html2trace.ReadTracesFromHTMLFilePath(filename) - if len(traces) > 1: - logging.warning('HTML trace contains multiple trace data blocks. Only ' - 'the first block will be merged.') - return traces[0] - elif filename.endswith(GZIP_FILENAME_SUFFIX): - with gzip.open(filename, 'rb') as f: - return json.load(f) - else: - with open(filename, 'r') as f: - return json.load(f) - - -def SaveTrace(trace, filename): - """Save a JSON trace to a (possibly gzipped) file.""" - if filename is None: - logging.info('Dumping trace to standard output...') - print json.dumps(trace) - else: - logging.info('Saving trace %r...', filename) - if filename.endswith(HTML_FILENAME_SUFFIX): - with codecs.open(filename, mode='w', encoding='utf-8') as f: - trace2html.WriteHTMLForTraceDataToFile([trace], 'Merged trace', f) - elif filename.endswith(GZIP_FILENAME_SUFFIX): - with gzip.open(filename, 'wb') as f: - json.dump(trace, f) - else: - with open(filename, 'w') as f: - json.dump(trace, f) - - -def MergeTraceFiles(input_trace_filenames, output_trace_filename): - """Merge a collection of input trace files into an output trace file.""" - logging.info('Loading %d input traces...', len(input_trace_filenames)) - input_traces = collections.OrderedDict() - for input_trace_filename in input_trace_filenames: - input_traces[input_trace_filename] = LoadTrace(input_trace_filename) - - logging.info('Merging traces...') - output_trace = MergeTraces(input_traces) - - logging.info('Saving output trace...') - SaveTrace(output_trace, output_trace_filename) - - logging.info('Finished.') - - -def MergeTraces(traces): - """Merge a collection of JSON traces into a single JSON trace.""" - trace_components = collections.defaultdict(collections.OrderedDict) - - for filename, trace in traces.iteritems(): - if isinstance(trace, list): - trace = {'traceEvents': trace} - for name, component in trace.iteritems(): - trace_components[name][filename] = component - - merged_trace = {} - for component_name, components_by_filename in trace_components.iteritems(): - logging.info('Merging %d %r components...', len(components_by_filename), - component_name) - merged_trace[component_name] = MergeComponents(component_name, - components_by_filename) - - return merged_trace - - -def MergeComponents(component_name, components_by_filename): - """Merge a component of multiple JSON traces into a single component.""" - if component_name == 'traceEvents': - return MergeTraceEvents(components_by_filename) - else: - return MergeGenericTraceComponents(component_name, components_by_filename) - - -def MergeTraceEvents(events_by_filename): - """Merge trace events from multiple traces into a single list of events.""" - timestamp_range_by_filename = _AdjustTimestampRanges(events_by_filename) - process_map = _CreateProcessMapFromTraceEvents(events_by_filename) - merged_events = _CombineTraceEvents(events_by_filename, process_map) - merged_events.extend( - _BuildInjectedTraceMarkerEvents(timestamp_range_by_filename, process_map)) - return merged_events - - -def _AdjustTimestampRanges(events_by_filename): - logging.info('Adjusting timestamp ranges of traces...') - - previous_trace_max_timestamp = 0 - timestamp_range_by_filename = collections.OrderedDict() - - for index, (filename, events) in enumerate(events_by_filename.iteritems(), 1): - # Skip metadata events, the timestamps of which are always zero. - non_metadata_events = [e for e in events if e['ph'] != METADATA_PHASE] - if not non_metadata_events: - logging.warning('Trace %r (%d/%d) only contains metadata events.', - filename, index, len(events_by_filename)) - timestamp_range_by_filename[filename] = None - continue - - min_timestamp = min(e['ts'] for e in non_metadata_events) - max_timestamp = max(e['ts'] for e in non_metadata_events) - - # Determine by how much the timestamps should be shifted. - injected_timestamp_shift = max( - previous_trace_max_timestamp + MIN_TRACE_GAP_IN_US - min_timestamp, 0) - logging.info('Injected timestamp shift in trace %r (%d/%d): %d ms ' - '[min=%d, max=%d, duration=%d].', filename, index, - len(events_by_filename), injected_timestamp_shift, - min_timestamp, max_timestamp, max_timestamp - min_timestamp) - - if injected_timestamp_shift > 0: - # Shift the timestamps. - for event in non_metadata_events: - event['ts'] += injected_timestamp_shift - - # Adjust the range. - min_timestamp += injected_timestamp_shift - max_timestamp += injected_timestamp_shift - - previous_trace_max_timestamp = max_timestamp - - timestamp_range_by_filename[filename] = min_timestamp, max_timestamp - - return timestamp_range_by_filename - - -def _CreateProcessMapFromTraceEvents(events_by_filename): - logging.info('Creating process map from trace events...') - - process_map = ProcessIdMap() - for filename, events in events_by_filename.iteritems(): - for event in events: - pid, tid = event['pid'], event['tid'] - process_map.AddEntry(source=filename, path=(pid, tid)) - if event['ph'] == METADATA_PHASE: - if event['name'] == 'process_name': - process_map.AddEntry(source=filename, path=(pid,), - name=event['args']['name']) - elif event['name'] == 'process_labels': - process_map.AddEntry(source=filename, path=(pid,), - label=event['args']['labels'].split(',')) - elif event['name'] == 'thread_name': - process_map.AddEntry(source=filename, path=(pid, tid), - name=event['args']['name']) - - process_map.MergeEntries() - return process_map - - -def _CombineTraceEvents(events_by_filename, process_map): - logging.info('Combining trace events from all traces...') - - type_name_event_by_pid = {} - combined_events = [] - - for index, (filename, events) in enumerate(events_by_filename.iteritems(), 1): - for event in events: - if _UpdateTraceEventForMerge(event, process_map, filename, index, - type_name_event_by_pid): - combined_events.append(event) - - return combined_events - - -def _UpdateTraceEventForMerge(event, process_map, filename, index, - type_name_event_by_pid): - pid, tid = process_map.MapEntry(source=filename, - path=(event['pid'], event['tid'])) - event['pid'], event['tid'] = pid, tid - - if event['ph'] == METADATA_PHASE: - # Update IDs in 'stackFrames' and 'typeNames' metadata events. - if event['name'] == 'stackFrames': - _UpdateDictIds(index, event['args'], 'stackFrames') - for frame in event['args']['stackFrames'].itervalues(): - _UpdateFieldId(index, frame, 'parent') - elif event['name'] == 'typeNames': - _UpdateDictIds(index, event['args'], 'typeNames') - existing_type_name_event = type_name_event_by_pid.get(pid) - if existing_type_name_event is None: - type_name_event_by_pid[pid] = event - else: - existing_type_name_event['args']['typeNames'].update( - event['args']['typeNames']) - # Don't add the event to the merged trace because it has been merged - # into an existing 'typeNames' metadata event for the given process. - return False - - elif event['ph'] == MEMORY_DUMP_PHASE: - # Update stack frame and type name IDs in heap dump entries in process - # memory dumps. - for heap_dump in event['args']['dumps'].get('heaps', {}).itervalues(): - for heap_entry in heap_dump['entries']: - _UpdateFieldId(index, heap_entry, 'bt', ignored_values=['']) - _UpdateFieldId(index, heap_entry, 'type') - - return True # Events should be added to the merged trace by default. - - -def _ConvertId(index, original_id): - return '%d#%s' % (index, original_id) - - -def _UpdateDictIds(index, parent_dict, key): - parent_dict[key] = { - _ConvertId(index, original_id): value - for original_id, value in parent_dict[key].iteritems()} - - -def _UpdateFieldId(index, parent_dict, key, ignored_values=()): - original_value = parent_dict.get(key) - if original_value is not None and original_value not in ignored_values: - parent_dict[key] = _ConvertId(index, original_value) - - -def _BuildInjectedTraceMarkerEvents(timestamp_range_by_filename, process_map): - logging.info('Building injected trace marker events...') - - injected_pid = process_map.max_mapped_id + 1 - - # Inject a mock process with a thread. - injected_events = [ - { - 'pid': injected_pid, - 'tid': 0, - 'ph': METADATA_PHASE, - 'ts': 0, - 'name': 'process_sort_index', - 'args': {'sort_index': -1000} # Show the process at the top. - }, - { - 'pid': injected_pid, - 'tid': 0, - 'ph': METADATA_PHASE, - 'ts': 0, - 'name': 'process_name', - 'args': {'name': 'Merged traces'} - }, - { - 'pid': injected_pid, - 'tid': 0, - 'ph': METADATA_PHASE, - 'ts': 0, - 'name': 'thread_name', - 'args': {'name': 'Trace'} - } - ] - - # Inject slices for each sub-trace denoting its beginning and end. - for index, (filename, timestamp_range) in enumerate( - timestamp_range_by_filename.iteritems(), 1): - if timestamp_range is None: - continue - min_timestamp, max_timestamp = timestamp_range - name = 'Trace %r (%d/%d)' % (filename, index, - len(timestamp_range_by_filename)) - slice_id = 'INJECTED_TRACE_MARKER_%d' % index - injected_events.extend([ - { - 'pid': injected_pid, - 'tid': 0, - 'ph': BEGIN_PHASE, - 'cat': 'injected', - 'name': name, - 'id': slice_id, - 'ts': min_timestamp - }, - { - 'pid': injected_pid, - 'tid': 0, - 'ph': END_PHASE, - 'cat': 'injected', - 'name': name, - 'id': slice_id, - 'ts': max_timestamp - } - ]) - - return injected_events - - -def MergeGenericTraceComponents(component_name, components_by_filename): - """Merge a generic component of multiple JSON traces into a single component. - - This function is only used for components that don't have a component-specific - merging function (see MergeTraceEvents). It just returns the component's first - provided value (in some trace). - """ - components = components_by_filename.itervalues() - first_component = next(components) - if not all(c == first_component for c in components): - logging.warning( - 'Values of trace component %r differ across the provided traces. ' - 'The first defined value of the component will be used.', - component_name) - return first_component - - -def Main(argv): - parser = argparse.ArgumentParser(description='Merge multiple traces.', - add_help=False) - parser.add_argument('input_traces', metavar='INPUT_TRACE', nargs='+', - help='Input trace filename.') - parser.add_argument('-h', '--help', action='help', - help='Show this help message and exit.') - parser.add_argument('-o', '--output_trace', help='Output trace filename. If ' - 'not provided, the merged trace will be written to ' - 'the standard output.') - parser.add_argument('-v', '--verbose', action='count', dest='verbosity', - help='Increase verbosity level.') - args = parser.parse_args(argv[1:]) - - # Set verbosity level. - if args.verbosity >= 2: - logging_level = logging.DEBUG - elif args.verbosity == 1: - logging_level = logging.INFO - else: - logging_level = logging.WARNING - logging.getLogger().setLevel(logging_level) - - try: - MergeTraceFiles(args.input_traces, args.output_trace) - return 0 - except Exception: # pylint: disable=broad-except - logging.exception('Something went wrong:') - return 1 - finally: - logging.warning('This is an EXPERIMENTAL TOOL! If you encounter any ' - 'issues, please file a Catapult bug ' - '(https://github.com/catapult-project/catapult/issues/new) ' - 'with your current Catapult commit hash, a description of ' - 'the problem and any error messages, attach the input ' - 'traces and notify petrcermak@chromium.org. Thank you!') diff --git a/chromium/third_party/catapult/tracing/tracing_build/run_profile.py b/chromium/third_party/catapult/tracing/tracing_build/run_profile.py deleted file mode 100644 index 11ee52681f8..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/run_profile.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2012 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import argparse -import cProfile -import pstats -import StringIO -import inspect -import sys - - - -class Bench(object): - - def SetUp(self): - pass - - def Run(self): - pass - - def TearDown(self): - pass - - -def Main(args): - parser = argparse.ArgumentParser() - parser.add_argument('--repeat-count', type=int, default=10) - parser.add_argument('bench_name') - args = parser.parse_args(args) - - benches = [g for g in globals().values() - if g != Bench and inspect.isclass(g) and - Bench in inspect.getmro(g)] - - # pylint: disable=undefined-loop-variable - b = [b for b in benches if b.__name__ == args.bench_name] - if len(b) != 1: - sys.stderr.write('Bench %r not found.' % args.bench_name) - return 1 - - bench = b[0]() - bench.SetUp() - try: - pr = cProfile.Profile() - pr.enable(builtins=False) - for _ in range(args.repeat_count): - bench.Run() - pr.disable() - s = StringIO.StringIO() - - sortby = 'cumulative' - ps = pstats.Stats(pr, stream=s).sort_stats(sortby) - ps.print_stats() - print s.getvalue() - return 0 - finally: - bench.TearDown() - - -if __name__ == '__main__': - sys.exit(Main(sys.argv[1:])) diff --git a/chromium/third_party/catapult/tracing/tracing_build/run_vinn_tests.py b/chromium/third_party/catapult/tracing/tracing_build/run_vinn_tests.py deleted file mode 100644 index f8c79e0de35..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/run_vinn_tests.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import argparse -import os -import sys - -from hooks import install -import tracing_project -import vinn - - -def _RelPathToUnixPath(p): - return p.replace(os.sep, '/') - - -def RunTests(): - project = tracing_project.TracingProject() - headless_test_module_filenames = [ - '/' + _RelPathToUnixPath(x) - for x in project.FindAllD8TestModuleRelPaths()] - headless_test_module_filenames.sort() - - cmd = """ - HTMLImportsLoader.loadHTML('/tracing/base/headless_tests.html'); - tr.b.unittest.loadAndRunTests(sys.argv.slice(1)); - """ - res = vinn.RunJsString( - cmd, source_paths=list(project.source_paths), - js_args=headless_test_module_filenames, - stdout=sys.stdout, stdin=sys.stdin) - return res.returncode - - -def Main(argv): - parser = argparse.ArgumentParser( - description='Run d8 tests.') - parser.add_argument( - '--no-install-hooks', dest='install_hooks', action='store_false') - parser.set_defaults(install_hooks=True) - args = parser.parse_args(argv[1:]) - if args.install_hooks: - install.InstallHooks() - - sys.exit(RunTests()) diff --git a/chromium/third_party/catapult/tracing/tracing_build/trace2html.py b/chromium/third_party/catapult/tracing/tracing_build/trace2html.py deleted file mode 100644 index 82682d065a8..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/trace2html.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import argparse -import codecs -import base64 -import gzip -import json -import os -import StringIO - -import tracing_project - -from py_vulcanize import generate - - -def Main(argv): - - parser = argparse.ArgumentParser( - usage='%(prog)s <options> trace_file1 [trace_file2 ...]', - epilog='Takes the provided trace file and produces a standalone HTML\n' - 'file that contains both the trace and the trace viewer.') - - project = tracing_project.TracingProject() - project.AddConfigNameOptionToParser(parser) - - parser.add_argument( - '--output', dest='output', - help='Where to put the generated result. If not ' - 'given, the trace filename is used, with an html suffix.') - parser.add_argument( - '--quiet', action='store_true', - help='Dont print the output file name') - parser.add_argument( - '--title', type=str, - help='The title to put in trace viewer top panel.') - parser.add_argument('trace_files', nargs='+') - args = parser.parse_args(argv[1:]) - - if args.output: - output_filename = args.output - elif len(args.trace_files) > 1: - parser.error('Must specify --output if there are multiple trace files.') - else: - name_part = os.path.splitext(args.trace_files[0])[0] - output_filename = name_part + '.html' - - with codecs.open(output_filename, mode='w', encoding='utf-8') as f: - WriteHTMLForTracesToFile(args.trace_files, f, args.title, - config_name=args.config_name) - - if not args.quiet: - print output_filename - return 0 - - -class ViewerDataScript(generate.ExtraScript): - - def __init__(self, trace_data_string, mime_type): - super(ViewerDataScript, self).__init__() - self._trace_data_string = trace_data_string - self._mime_type = mime_type - - def WriteToFile(self, output_file): - output_file.write('<script id="viewer-data" type="%s">\n' % self._mime_type) - compressed_trace = StringIO.StringIO() - with gzip.GzipFile(fileobj=compressed_trace, mode='w') as f: - f.write(self._trace_data_string) - b64_content = base64.b64encode(compressed_trace.getvalue()) - output_file.write(b64_content) - output_file.write('\n</script>\n') - - -def WriteHTMLForTraceDataToFile(trace_data_list, - title, output_file, - config_name=None): - project = tracing_project.TracingProject() - - if config_name is None: - config_name = project.GetDefaultConfigName() - - modules = [ - 'tracing.trace2html', - 'tracing.extras.importer.gzip_importer', # Must have for all configs. - project.GetModuleNameForConfigName(config_name) - ] - - vulcanizer = project.CreateVulcanizer() - load_sequence = vulcanizer.CalcLoadSequenceForModuleNames(modules) - - scripts = [] - for trace_data in trace_data_list: - # If the object was previously decoded from valid JSON data (e.g., in - # WriteHTMLForTracesToFile), it will be a JSON object at this point and we - # should re-serialize it into a string. Other types of data will be already - # be strings. - if not isinstance(trace_data, basestring): - trace_data = json.dumps(trace_data) - mime_type = 'application/json' - else: - mime_type = 'text/plain' - scripts.append(ViewerDataScript(trace_data, mime_type)) - generate.GenerateStandaloneHTMLToFile( - output_file, load_sequence, title, extra_scripts=scripts) - - -def WriteHTMLForTracesToFile(trace_filenames, output_file, title='', - config_name=None): - trace_data_list = [] - for filename in trace_filenames: - with open(filename, 'r') as f: - trace_data = f.read() - trace_data_list.append(trace_data) - if not title: - title = "Trace from %s" % ','.join(trace_filenames) - WriteHTMLForTraceDataToFile(trace_data_list, title, output_file, config_name) diff --git a/chromium/third_party/catapult/tracing/tracing_build/trace2html_unittest.py b/chromium/third_party/catapult/tracing/tracing_build/trace2html_unittest.py deleted file mode 100644 index 4489ebc3e77..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/trace2html_unittest.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import codecs -import os -import tempfile -import unittest - -from tracing_build import trace2html - - -class Trace2HTMLTests(unittest.TestCase): - - def testWriteHTMLForTracesToFile(self): - # Note: We can't use "with" when working with tempfile.NamedTemporaryFile as - # that does not work on Windows. We use the longer, more clunky version - # instead. See https://bugs.python.org/issue14243 for detials. - raw_tmpfile = tempfile.NamedTemporaryFile( - mode='w', suffix='.html', delete=False) - raw_tmpfile.close() - try: - with codecs.open(raw_tmpfile.name, 'w', encoding='utf-8') as tmpfile: - simple_trace_path = os.path.join( - os.path.dirname(__file__), - '..', 'test_data', 'simple_trace.json') - big_trace_path = os.path.join( - os.path.dirname(__file__), - '..', 'test_data', 'big_trace.json') - non_json_trace_path = os.path.join( - os.path.dirname(__file__), - '..', 'test_data', 'android_systrace.txt') - trace2html.WriteHTMLForTracesToFile( - [big_trace_path, simple_trace_path, non_json_trace_path], tmpfile) - finally: - os.remove(raw_tmpfile.name) diff --git a/chromium/third_party/catapult/tracing/tracing_build/tracing_dev_server_config.py b/chromium/third_party/catapult/tracing/tracing_build/tracing_dev_server_config.py deleted file mode 100644 index fcd0a1a9f1f..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/tracing_dev_server_config.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import json -import os - -import tracing_project - - -import webapp2 -from webapp2 import Route - - -def _RelPathToUnixPath(p): - return p.replace(os.sep, '/') - - -class TestListHandler(webapp2.RequestHandler): - - def get(self, *args, **kwargs): # pylint: disable=unused-argument - project = tracing_project.TracingProject() - test_relpaths = ['/' + _RelPathToUnixPath(x) - for x in project.FindAllTestModuleRelPaths()] - - tests = {'test_relpaths': test_relpaths} - tests_as_json = json.dumps(tests) - self.response.content_type = 'application/json' - return self.response.write(tests_as_json) - - -class TracingDevServerConfig(object): - - def __init__(self): - self.project = tracing_project.TracingProject() - - def GetName(self): - return 'tracing' - - def GetRunUnitTestsUrl(self): - return '/tracing/tests.html' - - def AddOptionstToArgParseGroup(self, g): - g.add_argument('-d', '--data-dir', default=self.project.test_data_path) - g.add_argument('-s', '--skp-data-dir', default=self.project.skp_data_path) - - def GetRoutes(self, args): # pylint: disable=unused-argument - return [Route('/tracing/tests', TestListHandler)] - - def GetSourcePaths(self, args): # pylint: disable=unused-argument - return list(self.project.source_paths) - - def GetTestDataPaths(self, args): # pylint: disable=unused-argument - return [ - ('/tracing/test_data/', os.path.expanduser(args.data_dir)), - ('/tracing/skp_data/', os.path.expanduser(args.skp_data_dir)), - ] diff --git a/chromium/third_party/catapult/tracing/tracing_build/update_gypi.py b/chromium/third_party/catapult/tracing/tracing_build/update_gypi.py deleted file mode 100644 index 94ee47e5cd9..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/update_gypi.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import collections -import os -import re - -import tracing_project -from tracing_build import check_common - - -class _Token(object): - - def __init__(self, data, token_id=None): - self.data = data - if token_id: - self.token_id = token_id - else: - self.token_id = 'plain' - - -class BuildFile(object): - - def __init__(self, text, file_groups): - self._file_groups = file_groups - self._tokens = [token for token in self._Tokenize(text)] - - def _Tokenize(self, text): - rest = text - token_regex = self._TokenRegex() - while len(rest): - m = token_regex.search(rest) - if not m: - # In `rest', we couldn't find a match. - # So, lump the entire `rest' into a token - # and stop producing any more tokens. - yield _Token(rest) - return - min_index, end_index, matched_token = self._ProcessMatch(m) - - if min_index > 0: - yield _Token(rest[:min_index]) - - yield matched_token - rest = rest[end_index:] - - def Update(self, files_by_group): - for token in self._tokens: - if token.token_id in files_by_group: - token.data = self._GetReplacementListAsString( - token.data, - files_by_group[token.token_id]) - - def Write(self, f): - for token in self._tokens: - f.write(token.data) - - def _ProcessMatch(self, match): - raise NotImplementedError - - def _TokenRegex(self): - raise NotImplementedError - - def _GetReplacementListAsString(self, existing_list_as_string, filelist): - raise NotImplementedError - - -class GypiFile(BuildFile): - - def _ProcessMatch(self, match): - min_index = match.start(2) - end_index = match.end(2) - token = _Token(match.string[min_index:end_index], - token_id=match.groups()[0]) - return min_index, end_index, token - - def _TokenRegex(self): - # regexp to match the following: - # 'file_group_name': [ - # 'path/to/one/file.extension', - # 'another/file.ex', - # ] - # In the match, - # group 1 is : 'file_group_name' - # group 2 is : """ 'path/to/one/file.extension',\n 'another/file.ex',\n""" - regexp_str = r"'(%s)': \[\n(.+?) +\],?\n" % "|".join(self._file_groups) - return re.compile(regexp_str, re.MULTILINE | re.DOTALL) - - def _GetReplacementListAsString(self, existing_list_as_string, filelist): - list_entry = existing_list_as_string.splitlines()[0] - prefix, _, suffix = list_entry.split("'") - return "".join(["'".join([prefix, filename, suffix + '\n']) - for filename in filelist]) - - -def _GroupFiles(file_name_to_group_name_func, filenames): - file_groups = collections.defaultdict(lambda: []) - for filename in filenames: - file_groups[file_name_to_group_name_func(filename)].append(filename) - for group in file_groups: - file_groups[group].sort() - return file_groups - - -def _UpdateBuildFile(filename, build_file_class): - with open(filename, 'r') as f: - build_file = build_file_class(f.read(), check_common.FILE_GROUPS) - files_by_group = _GroupFiles(check_common.GetFileGroupFromFileName, - check_common.GetKnownFiles()) - build_file.Update(files_by_group) - with open(filename, 'w') as f: - build_file.Write(f) - - -def UpdateGypi(): - tvp = tracing_project.TracingProject() - _UpdateBuildFile( - os.path.join(tvp.tracing_root_path, 'trace_viewer.gypi'), GypiFile) - - -def Update(): - UpdateGypi() diff --git a/chromium/third_party/catapult/tracing/tracing_build/update_gypi_unittest.py b/chromium/third_party/catapult/tracing/tracing_build/update_gypi_unittest.py deleted file mode 100644 index 9187145a0f4..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/update_gypi_unittest.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2015 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import unittest - -from tracing_build.update_gypi import GypiFile - - -class UpdateGypiTests(unittest.TestCase): - - def setUp(self): - self.file_groups = ['group1', 'group2'] - - def testGypiTokenizer(self): - content = ("useless data\n'group1': [\n <file list goes here>\n" - " ]\nNote the four spaces before the ] above") - gypi_files = GypiFile(content, self.file_groups) - self.assertEqual(3, len(gypi_files._tokens)) - self.assertEqual('plain', gypi_files._tokens[0].token_id) - self.assertEqual( - "useless data\n'group1': [\n", gypi_files._tokens[0].data) - self.assertEqual('group1', gypi_files._tokens[1].token_id) - self.assertEqual(" <file list goes here>\n", gypi_files._tokens[1].data) - self.assertEqual('plain', gypi_files._tokens[2].token_id) - self.assertEqual( - " ]\nNote the four spaces before the ] above", - gypi_files._tokens[2].data) - - def testGypiFileListBuilder(self): - gypi_file = GypiFile('', self.file_groups) - existing_list = (" '/four/spaces/indent',\n'" - " '/five/spaces/but/only/first/line/matters',\n") - new_list = ['item1', 'item2', 'item3'] - self.assertEqual( - " 'item1',\n 'item2',\n 'item3',\n", - gypi_file._GetReplacementListAsString(existing_list, new_list)) diff --git a/chromium/third_party/catapult/tracing/tracing_build/vulcanize_trace_viewer.py b/chromium/third_party/catapult/tracing/tracing_build/vulcanize_trace_viewer.py deleted file mode 100644 index d41f54ed50a..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/vulcanize_trace_viewer.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import codecs -import argparse -import os -import sys - -import tracing_project -from py_vulcanize import generate - - -def Main(argv): - - parser = argparse.ArgumentParser( - usage='%(prog)s <options>', - epilog=('Produces a standalone HTML import that contains the\n' - 'trace viewer.')) - - project = tracing_project.TracingProject() - project.AddConfigNameOptionToParser(parser) - - parser.add_argument('--no-min', default=False, action='store_true', - help='skip minification') - parser.add_argument('--report-sizes', default=False, action='store_true', - help='Explain what makes tracing big.') - parser.add_argument('--report-deps', default=False, action='store_true', - help='Print a dot-formatted deps graph.') - parser.add_argument('--output', - help='Where to put the generated result. If not given, ' - '$TRACING/tracing/bin/trace_viewer.html is used.') - - args = parser.parse_args(argv[1:]) - - tracing_dir = os.path.relpath( - os.path.join(os.path.dirname(__file__), '..', '..')) - if args.output: - output_filename = args.output - else: - output_filename = os.path.join( - tracing_dir, 'tracing/bin/trace_viewer_%s.html' % args.config_name) - - with codecs.open(output_filename, 'w', encoding='utf-8') as f: - WriteTraceViewer( - f, - config_name=args.config_name, - minify=not args.no_min, - report_sizes=args.report_sizes, - report_deps=args.report_deps) - - return 0 - - -def WriteTraceViewer(output_file, - config_name=None, - minify=False, - report_sizes=False, - report_deps=False, - output_html_head_and_body=True, - extra_search_paths=None, - extra_module_names_to_load=None): - project = tracing_project.TracingProject() - if extra_search_paths: - for p in extra_search_paths: - project.source_paths.append(p) - if config_name is None: - config_name = project.GetDefaultConfigName() - - module_names = [project.GetModuleNameForConfigName(config_name)] - if extra_module_names_to_load: - module_names += extra_module_names_to_load - - vulcanizer = project.CreateVulcanizer() - load_sequence = vulcanizer.CalcLoadSequenceForModuleNames( - module_names) - - if report_deps: - sys.stdout.write(vulcanizer.GetDepsGraphFromModuleNames(module_names)) - - generate.GenerateStandaloneHTMLToFile( - output_file, load_sequence, - minify=minify, report_sizes=report_sizes, - output_html_head_and_body=output_html_head_and_body) diff --git a/chromium/third_party/catapult/tracing/tracing_build/vulcanize_trace_viewer_unittest.py b/chromium/third_party/catapult/tracing/tracing_build/vulcanize_trace_viewer_unittest.py deleted file mode 100644 index a5c154f4af9..00000000000 --- a/chromium/third_party/catapult/tracing/tracing_build/vulcanize_trace_viewer_unittest.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (c) 2014 The Chromium Authors. All rights reserved. -# Use of this source code is governed by a BSD-style license that can be -# found in the LICENSE file. - -import codecs -import os -import unittest -import tempfile - -from tracing_build import vulcanize_trace_viewer - - -class Trace2HTMLTests(unittest.TestCase): - - def testWriteHTMLForTracesToFile(self): - try: - # Note: We can't use "with" when working with tempfile.NamedTemporaryFile - # as that does not work on Windows. We use the longer, more clunky version - # instead. See https://bugs.python.org/issue14243 for detials. - raw_tmpfile = tempfile.NamedTemporaryFile( - mode='w', suffix='.html', delete=False) - raw_tmpfile.close() - with codecs.open(raw_tmpfile.name, 'w', encoding='utf-8') as tmpfile: - vulcanize_trace_viewer.WriteTraceViewer(tmpfile) - finally: - os.remove(raw_tmpfile.name) |