diff options
30 files changed, 1776 insertions, 1843 deletions
diff --git a/database/scripts/privileges.sql b/database/scripts/privileges.sql index 7571fc6..20fa970 100644 --- a/database/scripts/privileges.sql +++ b/database/scripts/privileges.sql @@ -30,6 +30,10 @@ GRANT SELECT ON sha1 TO bmuser; GRANT INSERT ON sha1 TO bmuser; GRANT UPDATE ON sha1_id_seq TO bmuser; +GRANT SELECT ON testCase TO bmuser; +GRANT INSERT ON testCase TO bmuser; +GRANT UPDATE ON testCase_id_seq TO bmuser; + GRANT SELECT ON benchmark TO bmuser; GRANT INSERT ON benchmark TO bmuser; GRANT UPDATE ON benchmark_id_seq TO bmuser; @@ -58,3 +62,9 @@ GRANT INSERT ON timeSeriesAnnotation TO bmuser; GRANT UPDATE ON timeSeriesAnnotation TO bmuser; GRANT DELETE ON timeSeriesAnnotation TO bmuser; GRANT UPDATE ON timeSeriesAnnotation_id_seq TO bmuser; + +GRANT SELECT ON change TO bmuser; +GRANT INSERT ON change TO bmuser; +GRANT UPDATE ON change TO bmuser; +GRANT DELETE ON change TO bmuser; +GRANT UPDATE ON change_id_seq TO bmuser; diff --git a/database/scripts/tabledefs.sql b/database/scripts/tabledefs.sql index 31c25fc..07f2f84 100644 --- a/database/scripts/tabledefs.sql +++ b/database/scripts/tabledefs.sql @@ -31,10 +31,19 @@ CREATE TABLE sha1 ( ALTER TABLE sha1 OWNER TO postgres; CREATE INDEX sha1_value_idx ON sha1 (value); -- -CREATE TABLE benchmark ( +CREATE TABLE testCase ( id BIGSERIAL PRIMARY KEY, value TEXT UNIQUE NOT NULL) WITH (OIDS=FALSE); +ALTER TABLE testCase OWNER TO postgres; +CREATE INDEX testCase_value_idx ON testCase (value); +-- +CREATE TABLE benchmark ( + id BIGSERIAL PRIMARY KEY, + value TEXT UNIQUE NOT NULL, + testCaseId BIGINT NOT NULL REFERENCES testCase ON DELETE CASCADE +) WITH (OIDS=FALSE); ALTER TABLE benchmark OWNER TO postgres; CREATE INDEX benchmark_value_idx ON benchmark (value); +CREATE INDEX benchmark_testCase_idx ON benchmark (testCaseId); -- CREATE TABLE metric ( id BIGSERIAL PRIMARY KEY, value TEXT UNIQUE NOT NULL, @@ -155,3 +164,40 @@ CREATE INDEX tsanno_platform_idx ON timeSeriesAnnotation (platformId); CREATE INDEX tsanno_branch_idx ON timeSeriesAnnotation (branchId); CREATE INDEX tsanno_benchmark_idx ON timeSeriesAnnotation (benchmarkId); CREATE INDEX tsanno_metric_idx ON timeSeriesAnnotation (metricId); + + +--Time series changes: +CREATE TABLE change +( + id BIGSERIAL PRIMARY KEY, + + benchmarkId BIGINT NOT NULL REFERENCES benchmark ON DELETE CASCADE, + testCaseId BIGINT NOT NULL REFERENCES testCase ON DELETE CASCADE, + metricId BIGINT NOT NULL REFERENCES metric ON DELETE CASCADE, + + hostId BIGINT NOT NULL REFERENCES host ON DELETE CASCADE, + platformId BIGINT NOT NULL REFERENCES platform ON DELETE CASCADE, + branchId BIGINT NOT NULL REFERENCES branch ON DELETE CASCADE, + + sha1Id BIGINT NOT NULL REFERENCES sha1 ON DELETE CASCADE, + timestamp INTEGER NOT NULL, -- First upload timestamp (UTC) + + regression BOOLEAN NOT NULL, -- Regression or improvement + + score REAL NOT NULL, + premature_score REAL NOT NULL, + + UNIQUE (benchmarkId, metricId, hostId, platformId, branchId, sha1id) +) WITH (OIDS=FALSE); +ALTER TABLE change OWNER TO postgres; +CREATE INDEX change_benchmark_idx ON change (benchmarkId); +CREATE INDEX change_testcase_idx ON change (testCaseId); +CREATE INDEX change_metric_idx ON change (metricId); +CREATE INDEX change_host_idx ON change (hostId); +CREATE INDEX change_platform_idx ON change (platformId); +CREATE INDEX change_branch_idx ON change (branchId); +CREATE INDEX change_sha1_idx ON change (sha1Id); +CREATE INDEX change_timestamp_idx ON change (timestamp); +CREATE INDEX change_regression_idx ON change (regression); +CREATE INDEX change_score_idx ON change (score); +CREATE INDEX change_premature_score_idx ON change (premature_score); diff --git a/database/scripts/updatetestcaseids.py b/database/scripts/updatetestcaseids.py new file mode 100755 index 0000000..d11cb80 --- /dev/null +++ b/database/scripts/updatetestcaseids.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python + +""" +This script updates the testCaseId attribute of all rows in the benchmark table. +""" + +import sys +from dbaccess import setDatabase, execQuery, commit +from misc import findOrInsertId, getOptions, benchmarkToComponents + +# --- BEGIN Global functions ---------------------------------------------- +def printUsage(): + sys.stderr.write( + "usage: " + sys.argv[0] + " [--dbhost H] [--dbport P] --db D\n") +# --- END Global functions ---------------------------------------------- + + +# --- BEGIN Main program ---------------------------------------------- + +options, http_get = getOptions() + +if not ("db" in options): + printUsage() + sys.exit(1) + +setDatabase( + options["dbhost"] if "dbhost" in options else None, + options["dbport"] if "dbport" in options else None, + options["db"]) + +for benchmark_id, benchmark in execQuery( + "SELECT id, value FROM benchmark;", ()): + + test_case, test_function, data_tag = ( + benchmarkToComponents(benchmark)) + test_case_id = findOrInsertId("testCase", test_case) + + execQuery( + "UPDATE benchmark" + " SET testCaseId = %s" + " WHERE id = %s", (test_case_id, benchmark_id), False) + +commit() + +# --- END Main program ---------------------------------------------- diff --git a/scripts/computerankings.py b/scripts/computerankings.py deleted file mode 100755 index 8ad1fd3..0000000 --- a/scripts/computerankings.py +++ /dev/null @@ -1,359 +0,0 @@ -#!/usr/bin/env python - -import sys -from dbaccess import setDatabase, execQuery, commit -from misc import ( - getOptions, textToId, getAllSnapshots, getLastRankingSnapshot, getContext, - isValidSHA1, getBMTimeSeriesStatsList) - - -# --- BEGIN Global functions ---------------------------------------------- - -def printUsage(): - sys.stderr.write( - "usage: " + sys.argv[0] + - " --help | [--dbhost H] [--dbport P] --db D --host H --platform P " - "--branch B --sha1 S [--noprogress NP]\n") - -def printVerboseUsage(): - printUsage() - sys.stderr.write("\noptions:\n") - sys.stderr.write( - " --help: This help.\n") - sys.stderr.write( - " --dbhost: The database server host (overriding the default).\n") - sys.stderr.write( - " --dbport: The database server port (overriding the default).\n") - sys.stderr.write( - " --db: The database. One of 'bm' or 'bm-dev' (the latter " - "intended for experimentation).\n") - sys.stderr.write( - " --host: The physical machine on which the results were " - "produced (e.g. barbarella or 172.24.90.79).\n") - sys.stderr.write( - "--platform: The OS/compiler/architecture combination " - "(e.g. linux-g++-32).\n") - sys.stderr.write( - " --branch: The product branch (e.g. 'qt 4.6', 'qt 4.7', or " - "'qt master').\n") - sys.stderr.write( - " --sha1: The tested revision within the branch. Can be " - "extracted using 'git log -1 --pretty=format:%H' (assuming the " - "tested revision is the current head revision).\n") - sys.stderr.write( - " --noprogress: Specify \'true\' to disable progress indicator.\n") - - -# ### 2 B DOCUMENTED! -def printProgress(p, lead): - sys.stdout.write(lead + " ... (" + "{0:.2f}".format(p) + " %)\r") - sys.stdout.flush() - - -# ### 2 B DOCUMENTED! -# NOTE: This function is currently duplicated elsewhere in JavaScipt! -def changeMagnitudeScore(change): - max_change = 2.0 - abs_change = (1.0 / change) if change < 1 else change - return (min(abs_change, max_change) - 1.0) / (max_change - 1.0) - - -# ### 2 B DOCUMENTED! -# NOTE: This function is currently duplicated elsewhere in JavaScript! -def qualityScore(lsd, ni, nz, nc, mdrse): - max_bad_snapshots = 10 # experimental; maybe use max durability score? - max_sample_size = 5; - max_LSD = max_bad_snapshots; - max_NI = max_bad_snapshots * max_sample_size; - max_NZ = max_bad_snapshots * max_sample_size; - max_NC = max_bad_snapshots; - - lsd_score = 0 if (lsd == -1) else min(1, lsd / float(max_LSD)); - ni_score = min(1, ni / float(max_NI)); - nz_score = min(1, nz / float(max_NZ)); - nc_score = min(1, nc / float(max_NC)); - mdrse_score = 0 if (mdrse == -1) else (mdrse / 100.0); - - return (lsd_score + ni_score + nz_score + nc_score + mdrse_score) / 5.0; - - -# Registers the ranking for a given statistic. context1_id and context2_id -# refer to the first and last snapshot respectively in the interval used for -# computing the rankings. -# Assumptions: -# - A high value should be ranked above a small one. -# - A negative value is undefined and gets an invalid ranking position, i.e. -1. -def registerRanking(table, stat_index, stat_name, context1_id, context2_id): - - table.sort(key=lambda x: x[stat_index], reverse=True) - - stat_id = textToId("rankingStat", stat_name) - assert stat_id >= 0 - - row_pos = 0 - ranking_pos = 0 - for row in table: - benchmark_id = row[0] - metric_id = row[1] - lc_timestamp = row[2] - stat_value = row[stat_index] - - # The following statement ensures the following conditions: - # - A negative value gets an invalid ranking position, i.e. -1 - # - Equal values get the same ranking position. - # - The ranking position of benchmark B indicates the number of - # benchmarks ranked higher than B (i.e. having a smaller ranking - # position). - if stat_value < 0: - ranking_pos = -1 - # Note that the remaining values will now be negative, so updating - # row_pos and prev_stat_value is no longer necessary! - else: - if (row_pos > 0) and (stat_value != prev_stat_value): - ranking_pos = row_pos - row_pos = row_pos + 1 - prev_stat_value = stat_value - - # Insert or update the corresponding row in the 'ranking' table: - execQuery( - "SELECT merge_ranking(%s, %s, %s, %s, %s, %s, %s, %s)", - (context1_id, context2_id, benchmark_id, metric_id, - lc_timestamp, stat_id, stat_value, ranking_pos), - False) - - -# ### 2 B DOCUMENTED! -def getAllRankingStats(bmstats_list): - table = [] - for stats in bmstats_list: - - # NOTE: - # - All of the ranking statistics are of type "higher is better" - # (a high value is ranked better than a low value). - # - Moreover, all present/defined values are non-negative. - # - This means that representing absent/undefined values as -1 is ok, - # since this ensures lowest ranking. - - benchmark_id = stats["benchmark_id"] - metric_id = stats["metric_id"] - lc_timestamp = stats["lc_timestamp"] - lsd = stats["lsd"] - ni = stats["ni"] - nz = stats["nz"] - nc = stats["nc"] - mdrse = stats["med_of_rses"] - rsemd = stats["rse_of_meds"] - - qs = qualityScore(lsd, ni, nz, nc, mdrse) - - lc = stats["lc"] - if lc >= 0.0: - lcgss = stats["lc_gsep_score"] - lclss = stats["lc_lsep_score"] - lcds1 = stats["lc_dur1_score"] - lcds2 = stats["lc_dur2_score"] - lcms = changeMagnitudeScore(lc) - lcss1 = lcms * lcgss * lclss * lcds1 - lcss = lcss1 * lcds2 - if lc < 1.0: - lcssr = lcss - lcss1r = lcss1 - lcssi = lcss1i = -1 - else: - lcssi = lcss - lcss1i = lcss1 - lcssr = lcss1r = -1 - else: - lcssr = lcssi = lcss1r = lcss1i = -1 - - table.append( - (benchmark_id, metric_id, lc_timestamp, qs, lcssr, lcssi, lcss1r, - lcss1i)) - - return table - - -# ### 2 B DOCUMENTED! -def getFirstUploadTimestamp(snapshots, sha1_id): - try: - return snapshots[zip(*snapshots)[0].index(sha1_id)][1] - except ValueError: - return -1 - - -# ### 2 B DOCUMENTED! -def updateRankings( - host_id, platform_id, branch_id, sha12_id, context2_id, no_progress): - - # Get all snapshots matching the host/platform/branch combination: - sys.stdout.write("getting snapshots ... ") - sys.stdout.flush() - snapshots = getAllSnapshots(host_id, platform_id, branch_id) - sys.stdout.write("done\n") - sys.stdout.flush() - - - # Rankings will normally be computed once a day for each - # host/platform/branch combination (note the tradeoff between update - # frequency and database size): - ranking_interval = 3600 * 24 # secs in a day - - # Rankings will be updated if at least one of the following - # conditions eventually becomes True: - force_cond = empty_cond = interval_cond = False - - force_ranking = False - #force_ranking = True # Uncomment for testing - - force_cond = force_ranking - - if not force_cond: - last_ranking_sha1_id, last_ranking_timestamp = getLastRankingSnapshot( - host_id, platform_id, branch_id) - empty_cond = last_ranking_sha1_id < 0 - if not empty_cond: - assert last_ranking_timestamp >= 0 - - target_timestamp = getFirstUploadTimestamp(snapshots, sha12_id) - if target_timestamp < 0: - sys.stderr.write( - "error: failed to extract target_timestamp " - "(error in command-line args?)\n") - sys.exit(1) - - interval_cond = ( - (target_timestamp - last_ranking_timestamp) > ranking_interval) - - if not (force_cond or empty_cond or interval_cond): - sys.stdout.write( - "not updating rankings ('force', 'empty', and 'interval' " - "conditions all failed)\n") - return - - sys.stdout.write( - "updating rankings ('force' cond.: " + str(force_cond) + - "; 'empty' cond.: " + str(empty_cond) + - "; 'interval' cond.: " + str(interval_cond) + ") ...\n") - - # For simplicity we hardcode the tolerances for now: - difftol = 1.1 - durtolmin = 3 - durtolmax = 10 - - # Determine the target snapshot range: - # (The range should end at the snapshot given on the command-line and begin - # at the snapshot that is 2 * durtolmax snapshots back in time, or, if no - # such snapshot exists, the first available snapshot.) - try: - sha12_pos = zip(*snapshots)[0].index(sha12_id) - except ValueError: - sys.stderr.write( - "no observations found for SHA-1 ID: " + str(sha12_id) + "\n") - sys.exit(1) - sha11_pos = max(0, (sha12_pos - 2 * durtolmax) + 1) - snapshots = snapshots[sha11_pos:(sha12_pos + 1)] - if len(snapshots) < 2: - sys.stderr.write( - "no observations found before SHA-1 ID: " + str(sha12_id) + - " (computing rankings makes no sense)\n") - sys.exit(1) - - # Get time series statistics for all benchmarks: - if no_progress: - sys.stdout.write("getting time series statistics ... ") - bmstats_list = getBMTimeSeriesStatsList( - host_id, platform_id, branch_id, snapshots, None, difftol, durtolmin, - durtolmax, None if no_progress else printProgress, - "getting time series statistics") - - if no_progress: - sys.stdout.write("done\n") - else: - sys.stdout.write("\n") - - - # *** Compute rankings ************************************************** - - # Step 1: Create a table containing all ranking statistics (one row per - # benchmark/metric): - sys.stdout.write("creating table for all ranking stats ... ") - sys.stdout.flush() - table = getAllRankingStats(bmstats_list) - sys.stdout.write("done\n") - sys.stdout.flush() - - # Step 2: Sort the table individually for each ranking statistic and - # register the ranking positions in the database: - context1_id = getContext(host_id, platform_id, branch_id, snapshots[0][0]) - if context1_id == -1: - sys.stderr.write("error: failed to find context for start snapshot\n") - sys.exit(1) - nameToIndex = { "QS": 3, "LCSSR": 4, "LCSSI": 5, "LCSS1R": 6, "LCSS1I": 7 } - for name in nameToIndex: - sys.stdout.write("registering ranking for " + name + " ... ") - sys.stdout.flush() - registerRanking( - table, nameToIndex[name], name, context1_id, context2_id) - sys.stdout.write("done\n") - sys.stdout.flush() - -# --- END Global functions ---------------------------------------------- - - -# --- BEGIN Main program ---------------------------------------------- - -options, http_get = getOptions() - -if "help" in options: - printVerboseUsage() - sys.exit(1) - -if (not ("db" in options and "host" in options and "platform" in options and - "branch" in options and "sha1" in options)): - printUsage() - sys.exit(1) - -if not isValidSHA1(options["sha1"]): - sys.stderr.write("error: invalid SHA-1: " + options["sha1"] + "\n") - sys.exit(1) - -setDatabase( - options["dbhost"] if "dbhost" in options else None, - options["dbport"] if "dbport" in options else None, - options["db"]) - -host_id = textToId("host", options["host"]) -if host_id == -1: - sys.stderr.write("error: no such host: " + options["host"] + "\n") - sys.exit(1) -platform_id = textToId("platform", options["platform"]) -if platform_id == -1: - sys.stderr.write("error: no such platform: " + options["platform"] + "\n") - sys.exit(1) -branch_id = textToId("branch", options["branch"]) -if branch_id == -1: - sys.stderr.write("error: no such branch:" + options["branch"] + "\n") - sys.exit(1) -sha12_id = textToId("sha1", options["sha1"]) -if sha12_id == -1: - sys.stderr.write("error: no such SHA-1:" + options["sha1"] + "\n") - sys.exit(1) - -context2_id = getContext(host_id, platform_id, branch_id, sha12_id) -if context2_id == -1: - sys.stderr.write("error: no results found for this context\n") - sys.exit(1) - -updateRankings( - host_id, platform_id, branch_id, sha12_id, context2_id, - ("noprogress" in options) and ( - (options["noprogress"] == "1") - or (options["noprogress"].lower() == "true"))) - -# Write to database: -commit() - -sys.stdout.write("rankings computation done\n") -sys.exit(0) - -# --- END Main program ---------------------------------------------- diff --git a/scripts/getnamemappings.py b/scripts/getnamemappings.py new file mode 100644 index 0000000..2768449 --- /dev/null +++ b/scripts/getnamemappings.py @@ -0,0 +1,34 @@ +import sys, json +from dbaccess import execQuery +from misc import printJSONHeader + + +class GetNameMappings: + + def __init__(self): + pass + + def execute(self): + self.hosts = dict(execQuery("SELECT id, value FROM host", ())) + self.platforms = dict(execQuery("SELECT id, value FROM platform", ())) + self.branches = dict(execQuery("SELECT id, value FROM branch", ())) + self.sha1s = dict(execQuery("SELECT id, value FROM sha1", ())) + self.benchmarks = dict(execQuery("SELECT id, value FROM benchmark", ())) + self.metrics = dict(execQuery("SELECT id, value FROM metric", ())) + self.writeOutput() + + def writeOutputAsJSON(self): + printJSONHeader() + json.dump({ + 'hosts': self.hosts, + 'platforms': self.platforms, + 'branches': self.branches, + 'sha1s': self.sha1s, + 'benchmarks': self.benchmarks, + 'metrics': self.metrics + }, sys.stdout) + + +class GetNameMappingsAsJSON(GetNameMappings): + def writeOutput(self): + self.writeOutputAsJSON() diff --git a/scripts/getrankings.py b/scripts/getrankings.py deleted file mode 100644 index 45cc9a3..0000000 --- a/scripts/getrankings.py +++ /dev/null @@ -1,219 +0,0 @@ -import sys -import json -from dbaccess import execQuery, database -from misc import ( - textToId, idToText, getContext, getTimestampFromContext, getSnapshots, - getRankingContexts, benchmarkToComponents, printJSONHeader) - -class GetRankings: - - def __init__( - self, host, platform, branch, sha12, test_case_filter, maxsize): - self.host = host - self.host_id = textToId('host', self.host) - self.platform = platform - self.platform_id = textToId('platform', self.platform) - self.branch = branch - self.branch_id = textToId('branch', self.branch) - self.context2_id = getContext( - self.host_id, self.platform_id, self.branch_id, - textToId('sha1', sha12)) - self.test_case_filter = test_case_filter - self.maxsize = maxsize - - - # Returns -1, 0, and 1 if ranking position x is considered less than, - # equal to, and greater than ranking position y respectively. - # Note: a negative ranking position is considered worse (i.e. effectively - # treated as having an "infinite" ranking position) than any non-negative - # ranking position. - def cmp_rank_pos(self, x, y): - if x < 0: - return 1 - elif y < 0: - return -1 - elif x < y: - return -1 - elif x > y: - return 1 - else: - return 0 - - - # Gets all rankings matching the context/metric combination combination. - def getRankings(self): - - if self.context2_id < 0: - print "error: invalid context" - sys.exit(1) - - # Find the previous context (if any) for which rankings exist: - ranking_contexts = getRankingContexts( - self.host_id, self.platform_id, self.branch_id) - curr_index = zip(*ranking_contexts)[0].index(self.context2_id) - if curr_index < (len(ranking_contexts) - 1): - context2_prev_id = ranking_contexts[curr_index + 1][0] - else: - context2_prev_id = -1 # No rankings before this context - - rankings = {} - context_ids = set([self.context2_id]) # Affected context IDs - - - # Get all time series notes: - qres = execQuery( - "SELECT benchmarkId, metricId, note FROM timeSeriesAnnotation" - " WHERE hostId = %s AND platformId = %s AND branchId = %s", - (self.host_id, self.platform_id, self.branch_id)) - notes = {} - for benchmark_id, metric_id, note in qres: - notes[benchmark_id, metric_id] = note - - - # Get rankings for each statistic: - stat_infos = execQuery("SELECT id, value FROM rankingStat", ()) - for stat_id, stat_name in stat_infos: - - # Get the unsorted ranking information: - ranking_all = execQuery( - "SELECT benchmarkId, metricId, context1Id, pos, value," - " lastChangeTimestamp" - " FROM ranking" - " WHERE context2Id = %s" - " AND statId = %s", - (self.context2_id, stat_id)) - - ranking = [] - - # Apply test case filter and add notes: - for row in ranking_all: - benchmark_id = row[0] - benchmark = idToText("benchmark", benchmark_id) - test_case, test_function, data_tag = ( - benchmarkToComponents(benchmark)) - if ((self.test_case_filter == None) - or (test_case in self.test_case_filter)): - - # Append note if any: - metric_id = row[1] - try: - note = notes[benchmark_id, metric_id] - except KeyError: - note = "" - - ranking.append(( - benchmark_id, metric_id, row[2], row[3], row[4], - row[5], note)) - - - for row in ranking: - context_ids.add(row[2]) - - # Sort the table in ascending order on the 'pos' column, but - # so that negative positions are ranked below any other positions: - ranking.sort(key=lambda row: row[3], cmp=self.cmp_rank_pos) - - # Keep only the 'maxsize' highest ranked benchmarks: - ranking = ranking if (self.maxsize < 0) else ranking[:self.maxsize] - - if context2_prev_id >= 0: - # Compute deltas from previous ranking: - ranking_prev_list = execQuery( - "SELECT benchmarkId, metricId, pos" - " FROM ranking" - " WHERE context2Id = %s" - " AND statId = %s", - (context2_prev_id, stat_id)) - ranking_prev = {} - for benchmark_id, metric_id, pos in ranking_prev_list: - ranking_prev[benchmark_id, metric_id] = pos - - # Append deltas where applicable: - ranking_without_deltas = ranking - ranking = [] - - for (benchmark_id, metric_id, context1_id, pos, value, - lc_timestamp, note) in ranking_without_deltas: - row = [benchmark_id, metric_id, context1_id, pos, value, - lc_timestamp, note] - if pos >= 0: - try: - pos_prev = ranking_prev[benchmark_id, metric_id] - if pos_prev >= 0: - delta = pos_prev - pos - row.append(delta) - except KeyError: - pass - ranking.append(row) - - - # Add to main list: - rankings[stat_name.lower()] = ranking; - - - # Extract affected SHA-1s: - assert len(context_ids) > 0 - sha1_infos = execQuery( - "SELECT context.id, sha1Id, sha1.value" - " FROM context, sha1" - " WHERE context.id IN" - " (%s" + ", %s"*(len(context_ids) - 1) + ")" + - " AND sha1Id = sha1.id", - tuple(context_ids)) - - - return sha1_infos, rankings - - - # Extracts the individual snapshots in the maximum range spanned by - # the SHA-1s in sha1_infos: - def getSnapshotsInMaxRange(self, sha1_infos): - - min_timestamp = max_timestamp = first_sha1_id = last_sha1_id = None - for context_id, sha1_id, sha1 in sha1_infos: - timestamp = getTimestampFromContext(context_id) - if min_timestamp == None: - min_timestamp = max_timestamp = timestamp - first_sha1_id = last_sha1_id = sha1_id - elif timestamp < min_timestamp: - min_timestamp = timestamp - first_sha1_id = sha1_id - elif timestamp > max_timestamp: - max_timestamp = timestamp - last_sha1_id = sha1_id - - snapshots = getSnapshots( - self.host_id, self.platform_id, self.branch_id, first_sha1_id, - last_sha1_id) - - return snapshots - - - def execute(self): - self.sha1_infos, self.rankings = self.getRankings() - self.snapshots = self.getSnapshotsInMaxRange(self.sha1_infos) - - self.benchmarks = execQuery("SELECT id, value FROM benchmark", ()) - self.metrics = execQuery("SELECT id, value FROM metric", ()) - - self.writeOutput() - - - def writeOutputAsJSON(self): - printJSONHeader() - json.dump({ - 'database': database(), - 'host': self.host, - 'platform': self.platform, - 'branch': self.branch, - 'benchmarks': self.benchmarks, - 'metrics': self.metrics, - 'snapshots': map( - lambda s: (idToText("sha1", s[0]), s[1]), self.snapshots), - 'rankings': self.rankings - }, sys.stdout) - - -class GetRankingsAsJSON(GetRankings): - def writeOutput(self): - self.writeOutputAsJSON() diff --git a/scripts/getstats.py b/scripts/getstats.py index d0b7d59..735b28a 100755 --- a/scripts/getstats.py +++ b/scripts/getstats.py @@ -9,8 +9,10 @@ from getresultdetails2 import GetResultDetails2AsJSON from gettimeseriesstats import GetTimeSeriesStatsAsJSON from gettimeseriesdetails import GetTimeSeriesDetailsAsJSON from getsnapshots import GetSnapshotsAsJSON -from getrankings import GetRankingsAsJSON from settimeseriesnote import SetTimeSeriesNote +from gettopchanges import GetTopChangesAsJSON +from getnamemappings import GetNameMappingsAsJSON +from gettestcaseswithchanges import GetTestCasesWithChangesAsJSON from dbaccess import setDatabase from misc import getOptions, printErrorAsJSON @@ -18,13 +20,25 @@ import sys # --- BEGIN Global functions ---------------------------------------------- +# Returns true iff name exists in options and is true. +def boolOption(options, name): + if name in options: + try: + res = (int(options[name]) != 0) + except: + res = (options[name].lower() == "true") + else: + res = False + return res + + # Returns a command instance. def createCommand(options, http_get): def printUsageError(): error = ( "usage: " + sys.argv[0] + " [--dbhost H --dbport P] --db D + \\\n" - " --cmd contexts [--rankedonly R] | \\\n" + " --cmd contexts | \\\n" " --cmd testcases1 --host H --platform P --branch B " "--sha1 S | \\\n" " --cmd testcases2 --host1 H --platform1 P --branch1 B " @@ -45,11 +59,13 @@ def createCommand(options, http_get): "--durtolmax T --benchmark BM --metric M | \\\n" " --cmd snapshots --host H --platform P " "--branch B --sha11 S --sha12 S | \\\n" - " --cmd rankings --host H --platform P " - "--branch B --sha1 S [--testcasefilter 'TC1 TC2 ...'] " "[--maxsize M] | \\\n" " --cmd settimeseriesnote --host H --platform P " - "--branch B --benchmark B --metric M --note N") + "--branch B --benchmark B --metric M --note N | \\\n" + " --cmd topchanges --regressions R --last L --timescope T " + "--premature P --limit L [--testcasefilter 'TC1 TC2 ...'] | \\\n" + " --cmd namemappings | \\\n" + " --cmd testcaseswithchanges") if http_get: printErrorAsJSON("usage error") @@ -82,15 +98,7 @@ def createCommand(options, http_get): # --- 'contexts' --------------------------------- if cmd == "contexts": - if "rankedonly" in options: - try: - ranked_only = (int(options["rankedonly"]) != 0) - except: - ranked_only = (options["rankedonly"].lower() == "true") - else: - ranked_only = False - - return ListContextsAsJSON(ranked_only) + return ListContextsAsJSON() # --- 'testcases1' --------------------------------- elif cmd == "testcases1": @@ -252,26 +260,6 @@ def createCommand(options, http_get): return GetSnapshotsAsJSON(host, platform, branch, sha11, sha12) - # --- 'rankings' --------------------------------- - elif cmd == "rankings": - if ("host" in options and "platform" in options and - "branch" in options and "sha1" in options): - host = options["host"] - platform = options["platform"] - branch = options["branch"] - sha1 = options["sha1"] - - if "maxsize" in options: - try: - maxsize = int(options["maxsize"]) - except: - raise BaseException("'maxsize' not an integer") - else: - maxsize = 10 - - return GetRankingsAsJSON( - host, platform, branch, sha1, test_case_filter, maxsize) - # --- 'settimeseriesnote' --------------------------------- # ### Hm ... this command doesn't really get statistics, so maybe # rename getstats.py to something more generic @@ -290,6 +278,30 @@ def createCommand(options, http_get): return SetTimeSeriesNote( host, platform, branch, benchmark, metric, note) + # --- 'topchanges' --------------------------------- + elif cmd == "topchanges": + if ("regressions" in options and "last" in options and + "timescope" in options and "premature" in options and + "limit" in options): + regressions = boolOption(options, "regressions") + last = boolOption(options, "last") + timescope = int(options["timescope"]) + premature = boolOption(options, "premature") + limit = int(options["limit"]) + + return GetTopChangesAsJSON( + test_case_filter, regressions, last, timescope, premature, + limit) + + # --- 'namemappings' --------------------------------- + elif cmd == "namemappings": + return GetNameMappingsAsJSON() + + # --- 'testcaseswithchanges' --------------------------------- + elif cmd == "testcaseswithchanges": + return GetTestCasesWithChangesAsJSON() + + # No match: printUsageError() sys.exit(1) diff --git a/scripts/gettestcaseswithchanges.py b/scripts/gettestcaseswithchanges.py new file mode 100644 index 0000000..c1d44c2 --- /dev/null +++ b/scripts/gettestcaseswithchanges.py @@ -0,0 +1,30 @@ +import sys, json +from dbaccess import execQuery +from misc import printJSONHeader + + +class GetTestCasesWithChanges: + + def __init__(self): + pass + + def execute(self): + self.test_cases = execQuery( + "SELECT value FROM (SELECT DISTINCT testCaseId FROM change)" + " AS foo, testCase WHERE testCase.id = testCaseId" + " ORDER BY value", ()) + + # Flatten one level: + self.test_cases = ( + [item for sublist in self.test_cases for item in sublist]) + + self.writeOutput() + + def writeOutputAsJSON(self): + printJSONHeader() + json.dump({ 'testCases': self.test_cases }, sys.stdout) + + +class GetTestCasesWithChangesAsJSON(GetTestCasesWithChanges): + def writeOutput(self): + self.writeOutputAsJSON() diff --git a/scripts/gettopchanges.py b/scripts/gettopchanges.py new file mode 100644 index 0000000..1b7282a --- /dev/null +++ b/scripts/gettopchanges.py @@ -0,0 +1,181 @@ +import sys, json, calendar, time +from dbaccess import execQuery, database +from misc import ( + textToId, idToText, getContext, getTimestampFromContext, getSnapshots, + benchmarkToComponents, printJSONHeader) + + + +# Gets the top changes for a specific context by considering only the +# last change in each time series. +# +# An additional filter is applied by only considering results from +# test cases matching test_case_ids. +# +# NOTE: The result may contain at most one change from a given time series. +def getTopChangesForContext_last( + host_id, platform_id, branch_id, regressions, premature, limit, + test_case_ids): + + query = ( + "SELECT change.benchmarkId, change.metricId, sha1Id, last_timestamp, ") + query += ("greatest(score, premature_score)" if premature else "score") + query += ( + " AS final_score" + " FROM" + " (SELECT benchmarkId, metricId, max(timestamp) AS last_timestamp" + " FROM change" + " WHERE hostId = %s" + " AND platformId = %s" + " AND branchId = %s" + " AND regression = %s") + args = [host_id, platform_id, branch_id, regressions] + + if len(test_case_ids) > 0: + query += ( + " AND testCaseId IN (%s" + ", %s"*(len(test_case_ids) - 1) + ")" + ) + args += test_case_ids + + query += ( + " GROUP BY benchmarkId, metricId) AS last_change" + " , change" + " WHERE last_change.benchmarkId = change.benchmarkId" + " AND last_change.metricId = change.metricId" + " AND change.timestamp = last_timestamp" + " ORDER BY final_score DESC, last_timestamp DESC" + " LIMIT %s" + ) + args.append(limit) + + return execQuery(query, args) + + +# Gets the top changes for a specific context by considering all +# changes in the given time scope. +# +# An additional filter is applied by only considering results from +# test cases matching test_case_ids. +# +# NOTE: The result may contain any number of changes from a given time +# series. +def getTopChangesForContext_timeScope( + host_id, platform_id, branch_id, regressions, premature, limit, + test_case_ids, lo_timestamp): + + query = "SELECT benchmarkId, metricId, sha1Id, timestamp, " + query += ("greatest(score, premature_score)" if premature else "score") + query += ( + " AS final_score" + " FROM change" + " WHERE hostId = %s" + " AND platformId = %s" + " AND branchId = %s" + " AND regression = %s" + " AND timestamp >= %s") + args = [host_id, platform_id, branch_id, regressions, lo_timestamp] + + if len(test_case_ids) > 0: + query += ( + " AND testCaseId IN (%s" + ", %s"*(len(test_case_ids) - 1) + ")" + ) + args += test_case_ids + + query += ( + " ORDER BY final_score DESC, timestamp DESC" + " LIMIT %s" + ) + args.append(limit) + + return execQuery(query, args) + + +# Returns test case IDs corresponding to the names in test_case_filter. +def getTestCaseIdsFromFilter(test_case_filter): + return (execQuery( + "SELECT id FROM testCase" + " WHERE value IN (%s" + ", %s"*(len(test_case_filter) - 1) + ")", + tuple(test_case_filter)) + if ((test_case_filter != None) and (len(test_case_filter) > 0)) + else ()) + + +class GetTopChanges: + + def __init__( + self, test_case_filter, regressions, last, timescope, premature, limit): + self.test_case_filter = test_case_filter + self.regressions = regressions + self.last = last + self.timescope = timescope + self.premature = premature + self.limit = limit + + + # Gets the top changes for a specific context. + def getTopChangesForContext( + self, host_id, platform_id, branch_id, lo_timestamp): + + test_case_ids = getTestCaseIdsFromFilter(self.test_case_filter) + + return ( + getTopChangesForContext_last( + host_id, platform_id, branch_id, self.regressions, + self.premature, self.limit, test_case_ids) + if self.last else + getTopChangesForContext_timeScope( + host_id, platform_id, branch_id, self.regressions, + self.premature, self.limit, test_case_ids, lo_timestamp) + ) + + + # Gets the top changes for all contexts. + def getTopChangesForAllContexts(self): + + # Compute lowest timestamp (secs since 1970) in time scope (days ago): + curr_timestamp = calendar.timegm(time.gmtime()) + secs_in_day = 24 * 60 * 60 + lo_timestamp = ( + -1 if (self.timescope < 0) else + curr_timestamp - self.timescope * secs_in_day) + + context_ids = execQuery( + "SELECT DISTINCT hostId, platformId, branchId FROM context " + "ORDER BY hostId, platformId, branchId", ()) + + contexts = [] + + for host_id, platform_id, branch_id in context_ids: + top_changes = self.getTopChangesForContext( + host_id, platform_id, branch_id, lo_timestamp) + contexts.append({ + "hostId": host_id, + "platformId": platform_id, + "branchId": branch_id, + "topchanges": top_changes + }) + + return contexts + + + def execute(self): + self.contexts = self.getTopChangesForAllContexts() + self.writeOutput() + + + def writeOutputAsJSON(self): + printJSONHeader() + json.dump({ + 'database': database(), + 'regressions': self.regressions, + 'last': self.last, + 'timescope': self.timescope, + 'premature': self.premature, + 'limit': self.limit, + 'contexts': self.contexts + }, sys.stdout) + + +class GetTopChangesAsJSON(GetTopChanges): + def writeOutput(self): + self.writeOutputAsJSON() diff --git a/scripts/listcontexts.py b/scripts/listcontexts.py index 4aae8bc..a78daa5 100644 --- a/scripts/listcontexts.py +++ b/scripts/listcontexts.py @@ -6,15 +6,8 @@ from misc import idToText, printJSONHeader class ListContexts: - def __init__(self, ranked_only): - self.ranked_only = ranked_only - - - def rankingsExist(self, contextId): - rankings = execQuery( - "SELECT id FROM ranking WHERE context2Id = %s LIMIT 1", - (contextId,)) - return len(rankings) > 0 + def __init__(self): + pass def execute(self): @@ -36,44 +29,29 @@ class ListContexts: id, host, platform, branch, sha1, timestamp = contexts[0] curHost, curPlatform, curBranch = host, platform, branch - - rankings_exist = self.rankingsExist(id) - rankings_exist_count = 1 if rankings_exist else 0 - - curSnapshots = [] - if (not self.ranked_only) or rankings_exist: - curSnapshots = [ - (idToText('sha1', sha1), timestamp, 1 if rankings_exist else 0)] + curSnapshots = [(idToText('sha1', sha1), timestamp)] for (id, host, platform, branch, sha1, timestamp) in contexts[1:]: if (host, platform, branch) != (curHost, curPlatform, curBranch): - if (not self.ranked_only) or (rankings_exist_count > 0): - assert len(curSnapshots) > 0 - self.contexts.append({ - 'host' : idToText('host', curHost), - 'platform' : idToText('platform', curPlatform), - 'branch' : idToText('branch', curBranch), - 'snapshots' : curSnapshots - }) - - rankings_exist_count = 0 + self.contexts.append({ + 'host' : idToText('host', curHost), + 'platform' : idToText('platform', curPlatform), + 'branch' : idToText('branch', curBranch), + 'snapshots' : curSnapshots + }) curHost, curPlatform, curBranch = host, platform, branch curSnapshots = [] - rankings_exist = self.rankingsExist(id) - if rankings_exist: - rankings_exist_count = rankings_exist_count + 1 - if (not self.ranked_only) or rankings_exist: - curSnapshots.append( - (idToText('sha1', sha1), timestamp, - 1 if rankings_exist else 0)) + curSnapshots.append((idToText('sha1', sha1), timestamp)) self.writeOutput() + def writeOutputAsJSON(self): printJSONHeader() json.dump({ 'contexts' : self.contexts }, sys.stdout) + class ListContextsAsJSON(ListContexts): def writeOutput(self): self.writeOutputAsJSON() diff --git a/scripts/misc.py b/scripts/misc.py index b3a2274..8bd8516 100644 --- a/scripts/misc.py +++ b/scripts/misc.py @@ -45,6 +45,33 @@ def textToId(table, text): # ### 2 B DOCUMENTED! +def findOrInsertId(table, value, *args): + + query_result = execQuery( + "SELECT id FROM " + table + " WHERE value = %s", (value,)) + if len(query_result) == 1: + # Found, so return ID: + return query_result[0][0] + + # Not found, so insert: + query = "INSERT INTO " + table + " (value" + for i in range(0, len(args), 2): + query += ", " + args[i] + query += ") VALUES (%s" + values = [value] + for i in range(0, len(args), 2): + query += ", %s" + values.append(args[i + 1]) + + # ... and retrieve ID: + query += ") RETURNING id" + query_result = execQuery(query, values) + + assert len(query_result) == 1 + return query_result[0][0] + + +# ### 2 B DOCUMENTED! # Maybe also rename to lowerIsBetter() ? (but note that a global function with # that name already exists in uploadresults.py) def metricIdToLowerIsBetter(metric_id): @@ -64,6 +91,13 @@ def metricIdToLowerIsBetter(metric_id): # Returns the non-negative ID of the given context, or -1 if not found. def getContext(host_id, platform_id, branch_id, sha1_id): + global contextIdCache + if not 'contextIdCache' in globals(): + contextIdCache = {} + + if (host_id, platform_id, branch_id, sha1_id) in contextIdCache: + return contextIdCache[host_id, platform_id, branch_id, sha1_id] + result = execQuery( "SELECT id FROM context" " WHERE hostId = %s" @@ -72,9 +106,10 @@ def getContext(host_id, platform_id, branch_id, sha1_id): " AND sha1Id = %s" "LIMIT 1", (host_id, platform_id, branch_id, sha1_id)) - if len(result): - return result[0][0] - return -1 + result = result[0][0] if len(result) else -1 + contextIdCache[host_id, platform_id, branch_id, sha1_id] = result + + return result # Returns the test case, test function, and data tag components of @@ -100,32 +135,46 @@ def getTimestampFromContext(context_id): # Finds snapshots that match a host/platform/branch combination and that # lie within the range -# [sha11, sha12] if sha12_id >= 0, or -# [sha11, +inf) if sha12_ is < 0. +# [sha11, sha12] if both sha11_id and sha12_id are >= 0, or +# ( -inf, sha12] if only sha11_id is < 0, or +# [sha11, +inf) if only sha12_id is < 0, or +# ( -inf, +inf) if both sha11_id and sha2_id are < 0 +# # Returns a chronologically order n-tuple of 2-tuples: # (sha1, first upload timestamp). def getSnapshots(host_id, platform_id, branch_id, sha11_id, sha12_id): - timestamp1 = execQuery( + + timestamp1 = (execQuery( "SELECT EXTRACT(EPOCH FROM timestamp)::INT FROM context" " WHERE hostId = %s" " AND platformId = %s" " AND branchId = %s" " AND sha1Id = %s", (host_id, platform_id, branch_id, sha11_id))[0][0] - if sha12_id >= 0: - timestamp2 = execQuery( - "SELECT EXTRACT(EPOCH FROM timestamp)::INT FROM context" - " WHERE hostId = %s" - " AND platformId = %s" - " AND branchId = %s" - " AND sha1Id = %s", - (host_id, platform_id, branch_id, sha12_id))[0][0] + if (sha11_id >= 0) else -1) + + timestamp2 = (execQuery( + "SELECT EXTRACT(EPOCH FROM timestamp)::INT FROM context" + " WHERE hostId = %s" + " AND platformId = %s" + " AND branchId = %s" + " AND sha1Id = %s", + (host_id, platform_id, branch_id, sha12_id))[0][0] + if (sha12_id >= 0) else -1) + + prefix = "AND EXTRACT(EPOCH FROM timestamp)::INT" + + if (timestamp1 == -1) and (timestamp2 == -1): + range_expr = "" + elif (timestamp1 >= 0) and (timestamp2 < 0): + range_expr = ("%s >= %d" % (prefix, timestamp1)) + elif (timestamp1 < 0) and (timestamp2 >= 0): + range_expr = ("%s <= %d" % (prefix, timestamp2)) + else: # Ensure chronological order: if timestamp1 > timestamp2: timestamp1, timestamp2 = timestamp2, timestamp1 - range_expr = "BETWEEN %d AND %d" % (timestamp1, timestamp2) - else: - range_expr = ">= %d" % timestamp1 + range_expr = ("%s BETWEEN %d AND %d" % (prefix, timestamp1, timestamp2)) # Each distinct SHA-1 that occurs for this host/platform/branch # combination may occur multiple times with different upload times. @@ -138,8 +187,7 @@ def getSnapshots(host_id, platform_id, branch_id, sha11_id, sha12_id): " FROM context" " WHERE hostId = %s" " AND platformId = %s" - " AND branchId = %s" - " AND EXTRACT(EPOCH FROM timestamp)::INT " + range_expr + + " AND branchId = %s " + range_expr + " ORDER BY timestamp ASC", (host_id, platform_id, branch_id)) @@ -169,48 +217,8 @@ def getAllSnapshots(host_id, platform_id, branch_id, reverse = False): return tuple(snapshots) -# Returns the (SHA-1 ID, timestamp) pair associated with the most recent -# rankings computed for the given host/platform/branch combination, or -# (-1, -1) if no match is found. -def getLastRankingSnapshot(host_id, platform_id, branch_id): - result = execQuery( - "SELECT matchingcontext.sha1id, EXTRACT(EPOCH FROM timestamp)::INT" - " FROM ranking," - " (SELECT id, sha1Id, timestamp" - " FROM context" - " WHERE hostId = %s" - " AND platformId = %s" - " AND branchId = %s) AS matchingContext" - " WHERE context2Id = matchingContext.id" - " ORDER BY timestamp DESC LIMIT 1", - (host_id, platform_id, branch_id)) - if len(result): - return result[0] - return -1, -1 - - -# For the given host/platform/branch combination, this function returns -# all contexts for which rankings exist. The return value is a list of -# (context ID, timestamp) pairs sorted in descending order on timestamp -# (latest timestamp first). -def getRankingContexts(host_id, platform_id, branch_id): - result = execQuery( - "SELECT DISTINCT matchingcontext.id," - " EXTRACT(EPOCH FROM timestamp)::INT AS etimestamp" - " FROM ranking," - " (SELECT id, sha1Id, timestamp" - " FROM context" - " WHERE hostId = %s" - " AND platformId = %s" - " AND branchId = %s) AS matchingContext" - " WHERE context2Id = matchingContext.id" - " ORDER BY etimestamp DESC", - (host_id, platform_id, branch_id)) - return result - - -# Retrieves the time series of valid median results for the given -# benchmark/metric combination. Only the part of the time series that +# Retrieves the time series + additional stats of valid median results for +# the given benchmark/metric combination. Only the part of the time series that # is within the selected snapshot interval is considered. # # Returns a 7-tuple: @@ -355,6 +363,61 @@ def getTimeSeries( ms, lsd) +# Retrieves the time series of valid median results for the given +# benchmark/metric combination within the given contexts. +# Only the part of the time series that is within the selected snapshot +# interval is considered. +# +# Returns an n-tuple of 2-tuples: +# +# ( +# <corresponding index in the 'contexts' list>, +# <median of valid observations or -1 if all obs. are invalid> +# ) +# +def getBasicTimeSeries(contexts, benchmark_id, metric_id): + + # Fetch raw values: + assert len(contexts) > 0 + raw_values = (execQuery( + "SELECT value, valid, contextId FROM result" + " WHERE contextId IN (%s" + ", %s"*(len(contexts) - 1) + ")" + + " AND benchmarkId = %s" + " AND metricId = %s" + " ORDER BY contextId", + tuple(contexts) + (benchmark_id, metric_id)) + + [(-1, -1, -1)]) # Note sentinel item + + # Compute per-sample stats: + curr_context_id = -1 + valid_and_positive_sample = [] + median_obs_map = {} + # Loop over all observations (which are grouped on sample; + # note the 1-1 correspondence between samples and contexts): + for obs, valid, context_id in raw_values: + if context_id != curr_context_id: + # A new sample has been collected, so register it and + # prepare for the next one: + median_obs = stats.medianscore(valid_and_positive_sample) if ( + len(valid_and_positive_sample) > 0) else -1 + median_obs_map[curr_context_id] = median_obs + valid_and_positive_sample = [] + curr_context_id = context_id + # Append a valid and positive observation to the current sample: + if valid and (obs > 0): + valid_and_positive_sample.append(obs) + + # Order chronologically: + ts = [] + index = 0 + for context in contexts: + if context in median_obs_map: + ts.append((index, median_obs_map[context])) + index = index + 1 + + return ts + + # Returns the factor by which val improves over base_val by taking the # lower_is_better property into consideration. # Example: base_val = 10 and val = 20 results in 0.5 if lower_is_better is true, @@ -369,7 +432,7 @@ def metricAdjustedRatio(base_val, val, lower_is_better): # Whether a change is significant depends on the difftol argument. # Only positive values are considered. # -# The output is an n-tuple of 7-tuples, one per change: +# The output is a list of 7-tuples, one per change: # # 1: Base index, i.e. the index in the time series that contains the base # value used to compute the change. @@ -545,6 +608,84 @@ def getChanges(time_series, lower_is_better, difftol, durtolmin, durtolmax): return tuple(changes) +# Extracts the (significant) changes of all time series in a given +# host/platform/branch combination. +# +# The output is a list of 3-tuples, one per time series having at least one +# change: +# +# 1: Benchmark ID. +# 2: Metric ID. +# 3: The changes as a list of 9-tuples. The first 7 elements correspond to +# the output from getChanges() (documented elsewhere). Element 8 +# and 9 are the SHA1 ID and timestamp corresponding to the change. +# +def getAllChanges( + host_id, platform_id, branch_id, difftol, durtolmin, durtolmax, + progress_func = None, progress_arg = None): + + if progress_func != None: + progress_func(0.0, progress_arg) + + # Get all snapshots matching the host/platform/branch combination: + snapshots = getAllSnapshots(host_id, platform_id, branch_id) + + # Get the list of contexts that corresponds to these snapshots in this + # host/platform/branch combination: + contexts = [] + for sha1_id, timestamp in snapshots: + contexts.append(getContext(host_id, platform_id, branch_id, sha1_id)) + + if len(contexts) == 0: + return [] + + # Get all distinct benchmark/metric combinations that match the + # host/platform/branch context and are within the selected snapshot + # interval. Each such combination corresponds to a time series. + tseries_list = execQuery( + "SELECT DISTINCT benchmarkId, metricId FROM result" + " WHERE contextId IN (%s" + ", %s"*(len(contexts) - 1) + ")", +# " WHERE contextId IN (%s" + ", %s"*(len(contexts) - 1) + ") LIMIT 10", + contexts) + + changes = [] + + # Loop over time series: + if progress_func != None: + i = 0 + for benchmark_id, metric_id in tseries_list: + + # Get the time series (without extra stats): + time_series = getBasicTimeSeries(contexts, benchmark_id, metric_id) + + # Extract the significant changes: + basic_tschanges = getChanges( + time_series, metricIdToLowerIsBetter(metric_id), difftol, + durtolmin, durtolmax) + # ... add the SHA-1 and timestamp to each item: + tschanges = [] + for change in basic_tschanges: + index = time_series[change[1]][0] + sha1_id = snapshots[index][0] + timestamp = snapshots[index][1] + tschanges.append(change + (sha1_id, timestamp)) + + if len(tschanges) > 0: + changes.append((benchmark_id, metric_id, tschanges)) + + if progress_func != None: + i = i + 1 + divisor = len(tseries_list) // 100 # report at most 100 times + if (divisor > 0) and ((i % divisor) == 0): + perc_done = (i / float(len(tseries_list))) * 100.0 + progress_func(perc_done, progress_arg) + + if progress_func != None: + progress_func(100.0, progress_arg) + + return changes + + # ### 2 B DOCUMENTED! def getTimeSeriesMiscStats(time_series, changes, snapshots, stats): if len(changes) > 0: @@ -654,6 +795,170 @@ def getBMTimeSeriesStatsList( return tuple(bmstats_list) +# Returns the score for the most recent change in 'changes'. +# If 'regression' is true, only regressions are considered, and otherwise +# only improvements. +# If 'premature' is true, the premature score (not considering post-change +# durability) is returned instead of the regular score. +# +# Returns -1 if no score is found. +# + + +# OBSOLETE ??? + + +def getLastChangeScore(changes, regression, premature): + pass + + +# Returns the highest score for any change in a certain time interval. +# +# The time interval is defined like this: <2 B DONE!> +# +# If 'regression' is true, only regressions are considered, and otherwise +# only improvements. +# If 'premature' is true, the premature score (not considering post-change +# durability) is returned instead of the regular score. +# +# Returns -1 if no score is found. +# + + +# +# OBSOLETE ??? + + +def getHighestChangeScore(changes, regression, premature, days): + pass + + +# Computes change scores for each time series (benchmark/metric combination) +# of the given host/platform/branch combination. +# +# Change types: +# - most recent change (i.e. significant regression or improvement) +# - strongest change last n days, for n in {7, 30, 180, and -1 (infinite)} +# ... and all of these with and without inclusion of premature changes. +# +# ADD MORE DOCS HERE ... 2 B DONE! +# + + +# OBSOLETE ??? + + +# def getChangeScores( +# host_id, platform_id, branch_id, snapshots, test_case_filter, +# difftol, durtolmin, durtolmax, progress_func = None, progress_arg = None): + +# if progress_func != None: +# progress_func(0.0, progress_arg) + +# contexts = [] +# for sha1_id, timestamp in snapshots: +# contexts.append(getContext(host_id, platform_id, branch_id, sha1_id)) + +# # Get all distinct benchmark/metric combinations that match the +# # host/platform/branch context and are within the selected snapshot +# # interval. Each such combination corresponds to a time series. +# assert len(contexts) > 0 +# tseries = execQuery( +# "SELECT DISTINCT benchmarkId, metricId FROM result" +# " WHERE contextId IN (%s" + ", %s"*(len(contexts) - 1) + ")", +# contexts) + +# scores = [] + +# # Loop over time series: +# if progress_func != None: +# i = 0 +# #for benchmark_id, metric_id in tseries[800:810]: +# for benchmark_id, metric_id in tseries: + +# benchmark = idToText("benchmark", benchmark_id) +# #if benchmark != "tst_qmetaobject:indexOfMethod(_q_columnsAboutToBeRemoved(QModelIndex,int,int))": +# # continue + +# test_case, test_function, data_tag = ( +# benchmarkToComponents(benchmark)) + +# # Skip this time series if it doesn't match the test case filter: +# if ((test_case_filter != None) +# and (not test_case in test_case_filter)): +# continue + +# # Get the time series (without extra stats): +# time_series = getBasicTimeSeries( +# host_id, platform_id, branch_id, snapshots, benchmark_id, metric_id) + +# # Extract the significant changes: +# changes = getChanges( +# time_series, metricIdToLowerIsBetter(metric_id), difftol, +# durtolmin, durtolmax) + +# tsscores = {} + +# tsscores["benchmark_id"] = benchmark_id +# tsscores["metric_id"] = metric_id + +# tsscores["regr_last"] = getLastChangeScore(changes, True, False) +# tsscores["regr_last_pmt"] = getLastChangeScore(changes, True, True) +# tsscores["impr_last"] = getLastChangeScore(changes, False, False) +# tsscores["impr_last_pmt"] = getLastChangeScore(changes, False, True) + +# tsscores["regr_7"] = getHighestChangeScore( +# changes, True, False, 7) +# tsscores["regr_7_pmt"] = getHighestChangeScore( +# changes, True, True, 7) +# tsscores["impr_7"] = getHighestChangeScore( +# changes, False, False, 7) +# tsscores["impr_7_pmt"] = getHighestChangeScore( +# changes, False, True, 7) + +# tsscores["regr_30"] = getHighestChangeScore( +# changes, True, False, 30) +# tsscores["regr_30_pmt"] = getHighestChangeScore( +# changes, True, True, 30) +# tsscores["impr_30"] = getHighestChangeScore( +# changes, False, False, 30) +# tsscores["impr_30_pmt"] = getHighestChangeScore( +# changes, False, True, 30) + +# tsscores["regr_180"] = getHighestChangeScore( +# changes, True, False, 180) +# tsscores["regr_180_pmt"] = getHighestChangeScore( +# changes, True, True, 180) +# tsscores["impr_180"] = getHighestChangeScore( +# changes, False, False, 180) +# tsscores["impr_180_pmt"] = getHighestChangeScore( +# changes, False, True, 180) + +# tsscores["regr_all"] = getHighestChangeScore( +# changes, True, False, -1) +# tsscores["regr_all_pmt"] = getHighestChangeScore( +# changes, True, True, -1) +# tsscores["impr_all"] = getHighestChangeScore( +# changes, False, False, -1) +# tsscores["impr_all_pmt"] = getHighestChangeScore( +# changes, False, True, -1) + +# scores.append(tsscores) + +# if progress_func != None: +# i = i + 1 +# divisor = len(tseries) // 100 # report at most 100 times +# if (divisor > 0) and ((i % divisor) == 0): +# perc_done = (i / float(len(tseries))) * 100.0 +# progress_func(perc_done, progress_arg) + +# if progress_func != None: +# progress_func(100.0, progress_arg) + +# return scores + + + # Returns True iff s is a valid SHA-1 string. def isValidSHA1(s): def containsOnlyHexDigits(s): diff --git a/scripts/updateallchanges.py b/scripts/updateallchanges.py new file mode 100755 index 0000000..e5d81c8 --- /dev/null +++ b/scripts/updateallchanges.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python + +""" +This script invokes updatechanges.py for all host/platform/branch combinations. +""" + +import sys +from subprocess import Popen, PIPE +from dbaccess import setDatabase, execQuery, commit +from misc import getOptions, idToText + + +# --- BEGIN Global functions ---------------------------------------------- + +def printUsage(): + sys.stderr.write( + "usage: " + sys.argv[0] + " --help | [--dbhost H --dbport P] --db D\n") + + +def printVerboseUsage(): + printUsage() + sys.stderr.write("\noptions:\n") + sys.stderr.write( + " --help: This help.\n") + sys.stderr.write( + " --dbhost: The database server host (overriding the default).\n") + sys.stderr.write( + " --dbport: The database server port (overriding the default).\n") + sys.stderr.write( + " --db: The database. One of 'bm' or 'bm-dev' (the latter " + "intended for experimentation).\n") + + +# Executes the external updatechanges.py script with appropriate arguments. +def execUpdateChanges(host, platform, branch, options): + + cmd = [ + "updatechanges.py", "--db", options["db"], "--host", host, + "--platform", platform, "--branch", branch, "--noprogress", "true"] + if "dbhost" in options: + cmd += ["--dbhost", options["dbhost"]] + if "dbport" in options: + cmd += ["--dbport", options["dbport"]] + + sys.stdout.write( + "\nupdating changes for " + host + " / " + platform + " / " + branch + + " ...\n") + sys.stdout.flush() + + p = Popen(cmd, stdout = PIPE, stderr = PIPE) + stdout, stderr = p.communicate() + if (p.returncode != 0): + sys.stdout.write("failed to execute command '" + str(cmd) + "':\n") + sys.stdout.write(" return code: " + str(p.returncode) + "\n") + sys.stdout.write(" stdout: >" + stdout.strip() + "<\n") + sys.stdout.write(" stderr: >" + stderr.strip() + "<\n") + else: + sys.stdout.write("updatechanges.py executed successfully:\n") + sys.stdout.write(" return code: " + str(p.returncode) + "\n") + sys.stdout.write(" stdout: >" + stdout.strip() + "<\n") + sys.stdout.write(" stderr: >" + stderr.strip() + "<\n") + +# --- END Global functions ---------------------------------------------- + + +# --- BEGIN Main program ---------------------------------------------- + +options, http_get = getOptions() + +if "help" in options: + printVerboseUsage() + sys.exit(1) + +if not ("db" in options): + printUsage() + sys.exit(1) + +setDatabase( + options["dbhost"] if "dbhost" in options else None, + options["dbport"] if "dbport" in options else None, + options["db"]) + +hpb_ids = execQuery( + "SELECT DISTINCT hostId, platformId, branchId FROM context " + "ORDER BY hostId, platformId, branchId", ()) + +for host_id, platform_id, branch_id in hpb_ids: + execUpdateChanges( + idToText("host", host_id), + idToText("platform", platform_id), + idToText("branch", branch_id), + options) + +sys.exit(0) + +# --- END Main program ---------------------------------------------- diff --git a/scripts/updatechanges.py b/scripts/updatechanges.py new file mode 100755 index 0000000..3e1c8dd --- /dev/null +++ b/scripts/updatechanges.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python + +""" +This script is intended to be executed whenever a new snapshot is complete +for a host/platform/branch combination. + +The script registers - in the 'change' table - all current changes of all +time series of the host/platform/branch combination in question (wiping all +existing changes first). + +The 'Change Summary' web page can then populate its internal tables +directly from the 'change' table. +""" + + +import sys +from dbaccess import setDatabase, execQuery, commit +from misc import getOptions, textToId, getAllChanges + + +# --- BEGIN Global functions ---------------------------------------------- + +def printUsage(): + sys.stderr.write( + "usage: " + sys.argv[0] + + " --help | [--dbhost H] [--dbport P] --db D --host H --platform P " + "--branch B [--noprogress NP]\n") + +def printVerboseUsage(): + printUsage() + sys.stderr.write("\noptions:\n") + sys.stderr.write( + " --help: This help.\n") + sys.stderr.write( + " --dbhost: The database server host (overriding the default).\n") + sys.stderr.write( + " --dbport: The database server port (overriding the default).\n") + sys.stderr.write( + " --db: The database. One of 'bm' or 'bm-dev' (the latter " + "intended for experimentation).\n") + sys.stderr.write( + " --host: The physical machine on which the results were " + "produced (e.g. barbarella or 172.24.90.79).\n") + sys.stderr.write( + "--platform: The OS/compiler/architecture combination " + "(e.g. linux-g++-32).\n") + sys.stderr.write( + " --branch: The product branch (e.g. 'qt 4.6', 'qt 4.7', or " + "'qt master').\n") + sys.stderr.write( + " --noprogress: Specify \'true\' to disable progress indicator.\n") + + +# ### 2 B DOCUMENTED! +def printProgress(p, lead): + sys.stdout.write(lead + " ... (" + "{0:.2f}".format(p) + " %)\r") + sys.stdout.flush() + + +# ### 2 B DOCUMENTED! +# NOTE: This function is currently duplicated elsewhere in JavaScipt! +def changeMagnitudeScore(change): + max_change = 2.0 + abs_change = (1.0 / change) if change < 1 else change + return (min(abs_change, max_change) - 1.0) / (max_change - 1.0) + + +# Updates the 'changes' table in the current database with all changes in +# all time series (i.e. benchmark/metric combinations) of the given +# host/platform/branch combination. Progress will be written standard output +# iff no_progress is False. +# +# The algorithm is effectively this: +# - Compute the current changes. +# - Delete all rows matching the given host/platform/branch combination. +# - Add a row for each individual change. +# +def updateChanges(host_id, platform_id, branch_id, no_progress): + + # Hardcode tolerances here for now: + difftol = 1.1 + durtolmin = 3 + durtolmax = 10 + + # Get all changes for this host/platform/branch combination: + if no_progress: + sys.stdout.write( + "getting all changes for this host/platform/branch " + "combination ... ") + changes = getAllChanges( + host_id, platform_id, branch_id, difftol, durtolmin, durtolmax, + None if no_progress else printProgress, + "getting all changes for this host/platform/branch combination") + + if no_progress: + sys.stdout.write("done") + sys.stdout.write( + "\n" + str(len(changes)) + " time series with changes found\n") + + if len(changes) == 0: + sys.stderr.write("error: no time series with changes found\n") + sys.exit(1) + + + # Store the changes in the 'change' table: + sys.stdout.write("storing to 'change' table ...") + sys.stdout.flush() + + # ... delete all rows matching this host/platform/branch combination: + execQuery( + "DELETE FROM change" + " WHERE hostId = %s" + " AND platformId = %s" + " AND branchId = %s", (host_id, platform_id, branch_id), False) + + # ... insert rows for the new changes: + query = ( + "INSERT INTO change" + " (benchmarkId, testCaseId, metricId, hostId, platformId, branchId," + " sha1Id, timestamp, regression, score, premature_score) VALUES ") + args = [] + + for benchmark_id, metric_id, tschanges in changes: + + test_case_id = execQuery( + "SELECT testCaseId FROM benchmark WHERE id = %s", + (benchmark_id,))[0][0]; + + for change in tschanges: + + ratio = change[2] + regression = ratio < 1.0 + magn_score = changeMagnitudeScore(ratio) + gsep_score = change[3] + lsep_score = change[4] + dur_score1 = change[5] + dur_score2 = change[6] + premature_score = magn_score * gsep_score * lsep_score * dur_score1 + score = premature_score * dur_score2 + sha1_id = change[7] + timestamp = change[8] + + if len(args) > 0: + query += ", " + query += ("(%s" + ", %s"*10 + ") ") + args += ( + benchmark_id, test_case_id, metric_id, host_id, platform_id, + branch_id, sha1_id, timestamp, regression, score, + premature_score) + + execQuery(query, args, False) + + sys.stdout.write("done\n") + sys.stdout.flush() + + + +# --- END Global functions ---------------------------------------------- + + +# --- BEGIN Main program ---------------------------------------------- + +options, http_get = getOptions() + +if "help" in options: + printVerboseUsage() + sys.exit(1) + +if (not ("db" in options and "host" in options and "platform" in options and + "branch" in options)): + printUsage() + sys.exit(1) + +setDatabase( + options["dbhost"] if "dbhost" in options else None, + options["dbport"] if "dbport" in options else None, + options["db"]) + +host_id = textToId("host", options["host"]) +if host_id == -1: + sys.stderr.write("error: no such host: " + options["host"] + "\n") + sys.exit(1) +platform_id = textToId("platform", options["platform"]) +if platform_id == -1: + sys.stderr.write("error: no such platform: " + options["platform"] + "\n") + sys.exit(1) +branch_id = textToId("branch", options["branch"]) +if branch_id == -1: + sys.stderr.write("error: no such branch:" + options["branch"] + "\n") + sys.exit(1) + +updateChanges( + host_id, platform_id, branch_id, + ("noprogress" in options) and ( + (options["noprogress"] == "1") + or (options["noprogress"].lower() == "true"))) + +# Write to database: +commit() + +# --- END Main program ---------------------------------------------- diff --git a/scripts/uploadresults.py b/scripts/uploadresults.py index a50285c..89f97ab 100755 --- a/scripts/uploadresults.py +++ b/scripts/uploadresults.py @@ -5,7 +5,8 @@ from subprocess import Popen, PIPE from xml.dom.minidom import parse, getDOMImplementation from dbaccess import setDatabase, execQuery, commit from misc import ( - getOptions, textToId, idToText, isValidSHA1, getContext, getAllSnapshots) + getOptions, textToId, idToText, findOrInsertId, isValidSHA1, getContext, + getAllSnapshots) # --- BEGIN Global functions ---------------------------------------------- @@ -182,33 +183,6 @@ def extractResults(file): return results -# ### 2 B DOCUMENTED! -def findOrInsertId(table, value, *args): - - query_result = execQuery( - "SELECT id FROM " + table + " WHERE value = %s", (value,)) - if len(query_result) == 1: - # Found, so return ID: - return query_result[0][0] - - # Not found, so insert: - query = "INSERT INTO " + table + " (value" - for i in range(0, len(args), 2): - query += ", " + args[i] - query += ") VALUES (%s" - values = [value] - for i in range(0, len(args), 2): - query += ", %s" - values.append(args[i + 1]) - - # ... and retrieve ID: - query += ") RETURNING id" - query_result = execQuery(query, values) - - assert len(query_result) == 1 - return query_result[0][0] - - # Uploads a set of results to the database. def uploadToDatabase(host, platform, branch, sha1, results): @@ -237,7 +211,9 @@ def uploadToDatabase(host, platform, branch, sha1, results): benchmark = ( result['testCase'] + ":" + result['testFunction'] + "(" + str(result['dataTag']) + ")") - benchmarkId = findOrInsertId("benchmark", benchmark) + testCaseId = findOrInsertId("testCase", result['testCase']) + benchmarkId = findOrInsertId( + "benchmark", benchmark, "testCaseId", testCaseId) metricId = findOrInsertId( "metric", result['metric'], "lowerIsBetter", @@ -254,20 +230,6 @@ def uploadToDatabase(host, platform, branch, sha1, results): commit() -# Returns True iff rankings exist for the given context. -def rankingsExist(options): - context_id = getContext( - textToId('host', options["host"]), - textToId('platform', options["platform"]), - textToId('branch', options["branch"]), - textToId('sha1', options["sha1"])) - - matches = execQuery( - "SELECT id FROM ranking WHERE context2Id = %s LIMIT 1", (context_id,)) - - return len(matches) > 0 - - # Returns the context ID if found, otherwise -1: def getContextIdFromNames(options): host_id = textToId("host", options["host"]) @@ -297,35 +259,12 @@ def contextComplete(options): return sample_size >= max_sample_size -# Executes the external computerankings.py script with appropriate arguments. -# If new_context is True, this snapshot is assumed to be the first one in -# this time series (host/platform/branch combination), and no results have been -# uploaded for this context yet. In that case, rankings will instead be -# computed for the latest existing snapshot in this time series. -def execComputeRankings(options, new_context): - - if new_context: - # Attempt to use the latest available snapshot for this - # host/platform/branch combination as the actual snapshot: - host_id = textToId("host", options["host"]) - platform_id = textToId("platform", options["platform"]) - branch_id = textToId("branch", options["branch"]) - snapshots = getAllSnapshots(host_id, platform_id, branch_id) - if len(snapshots) > 0: - actual_snapshot = idToText("sha1", snapshots[-1][0]) - if actual_snapshot == options["sha1"]: - sys.stderr.write( - "error: context unexpectedly exists in database\n") - sys.exit(1) - else: - return # special case when no snapshots exist yet - else: - actual_snapshot = options["sha1"] - +# Executes the external updatechanges.py script with appropriate arguments. +def execUpdateChanges(options): cmd = [ - "computerankings.py", "--db", options["db"], "--host", options["host"], + "updatechanges.py", "--db", options["db"], "--host", options["host"], "--platform", options["platform"], "--branch", options["branch"], - "--sha1", actual_snapshot, "--noprogress", "true"] + "--noprogress", "true"] if "dbhost" in options: cmd += ["--dbhost", options["dbhost"]] if "dbport" in options: @@ -339,7 +278,7 @@ def execComputeRankings(options, new_context): sys.stdout.write(" stdout: >" + stdout.strip() + "<\n") sys.stdout.write(" stderr: >" + stderr.strip() + "<\n") else: - sys.stdout.write("computerankings.py executed successfully:\n") + sys.stdout.write("updatechanges.py executed successfully:\n") sys.stdout.write(" return code: " + str(p.returncode) + "\n") sys.stdout.write(" stdout: >" + stdout.strip() + "<\n") sys.stdout.write(" stderr: >" + stderr.strip() + "<\n") @@ -494,42 +433,21 @@ sys.stdout.write("UPLOADING RESULTS, OPTIONS: " + str(options) + "\n") sys.stdout.flush() -# Reject uploading if this context is already complete (since it could -# modify the input for (and thus invalidate) any rankings computed for -# this snapshot): +# Reject uploading if this context is already complete: if contextComplete(options): sys.stderr.write( - "this snapshot is already complete for this time series -> uploading " - "rejected!\n") - sys.exit(1) - - -# Reject uploading if rankings exist for this context, since that would -# require a recomputation of those rankings. -# -# (Note that we only need to check the current SHA-1 as long as we assume that -# results are uploaded in an order that is consistent with the order of the -# corresponding SHA-1s in the branch in question: Rankings for earlier SHA-1s -# will not be affected by results for this SHA-1, and rankings for later -# SHA-1s cannot exist a this point (by assumption).) -if rankingsExist(options): - sys.stderr.write( - "error: rankings have already been computed for this context\n") + "this snapshot is already complete -> uploading rejected!\n") sys.exit(1) -# If this is the first set of results for the current context, we compute -# rankings for the same context, only one snapshot back: +# If this is the first set of results for the current context, we update +# changes for all time series in this host/platform/branch combination: if not contextExists(options): - sys.stdout.write( - "new snapshot in this time series -> attempt to compute rankings " - "for the previous snapshot ...\n") + sys.stdout.write("update changes (before registering new snapshot) ...\n") sys.stdout.flush() - execComputeRankings(options, True) + execUpdateChanges(options) else: - sys.stdout.write( - "results for this snapshot have already been uploaded -> don't " - "attempt to compute rankings at this point\n") + sys.stdout.write("skipping update changes (1)\n") sys.stdout.flush() @@ -549,22 +467,20 @@ sys.stdout.write("done\n") sys.stdout.flush() -# If no more results are expected in this context, we can compute rankings +# If no more results are expected in this context, we can update changes # already at this point. (Note: In the case that one or more uploads failed # for this context, it will be regarded as incomplete forever. In that case, -# computation of rankings for this context will be instead be triggered right +# update of changes for this context will be instead be triggered right # before uploading the first set of results for the next context (see code # above).) if contextComplete(options): sys.stdout.write( - "this snapshot is complete for this time series -> attempt to " - "compute rankings for this snapshot ...\n") + "update changes (after completion of current snapshot) ...\n") sys.stdout.flush() - execComputeRankings(options, False) + execUpdateChanges(options) else: - sys.stdout.write( - "this snapshot is incomplete for this time series -> " - "don't attempt to compute rankings at this point\n") + sys.stdout.write("skipping update changes (2)\n") + sys.stdout.flush() sys.stdout.write("UPLOADING RESULTS DONE\n") diff --git a/web/analysis/index.html b/web/analysis/index.html index cb03f69..6c70a77 100644 --- a/web/analysis/index.html +++ b/web/analysis/index.html @@ -7,7 +7,6 @@ <script type="text/javascript" src="../global/global.js"></script> <script type="text/javascript" src="main.js"></script> <link rel="stylesheet" href="../global/style.css" type="text/css" /> - </head> <body> @@ -39,25 +38,6 @@ $("#mainPageLink").attr("href", "http://" + location.host + "/bm2"); </script> -<div id="div_analysisPageLink" style="display:inline"> - -<a id="analysisPageLink" href="">analysis page (all types)</a> -<script type="text/javascript"> -$("#analysisPageLink").attr( - "href", "http://" + location.host + "/bm2/analysis"); -</script> -</div> - -<div id="div_analysisPageRankedOnlyLink" style="display:inline"> - -<a id="analysisPageRankedOnlyLink" href=""> - analysis page (rankings only)</a> -<script type="text/javascript"> -$("#analysisPageRankedOnlyLink").attr( - "href", "http://" + location.host + "/bm2/analysis?rankedonly=1"); -</script> -</div> - <div id="div_availableContexts" style="display:none"> <br /> @@ -77,45 +57,6 @@ $("#analysisPageRankedOnlyLink").attr( </script> </td><td></td> </tr> - <tr id="actionTable_rankings"> - <td style="padding-top:5px; padding-bottom:5px;"> - <a class="disabledActionButton" id="action_rankings" - href="javascript::void(0)" onclick="return false"> - Show rankings for Context <span class="context1"> - 1 </span></a> - <script type="text/javascript"> - setTooltip( - $("#action_rankings"), - "For Context 1, this action ranks benchmarks on different " + - "criteria.<br /><br />The idea is to prioritize benchmarks " + - "that deserve attention (either because they reflect stable " + - "changes in Qt, or because they need to improve their own " + - "quality)." + - "<br /><br /><b>Note:</b> This action is enabled iff " + - "an 'R' is indicated for the snapshot of Context 1, " + - "thus indicating availability of rankings in the " + - "database."); - </script> - </td> - <td id="max_rank_size_lead">Maximum size: - <select id="max_rank_size" onchange="updateActions()"> - <option value="5">5</option> - <option value="10">10</option> - <option value="20">20</option> - <option value="50">50</option> - <option value="100">100</option> - <option value="200">200</option> - <option value="500">500</option> - <option value="1000">1000</option> - <option value="-1">unlimited</option> - </select> - <script type="text/javascript"> - setTooltip( - $("#max_rank_size_lead"), - "The maximum number of benchmarks to show in each ranking."); - </script> - </td> - </tr> <tr> <td style="padding-top:5px; padding-bottom:5px;"> <a class="disabledActionButton" id="action_stats2" diff --git a/web/analysis/main.js b/web/analysis/main.js index 735c5d8..196e6ec 100644 --- a/web/analysis/main.js +++ b/web/analysis/main.js @@ -1,8 +1,5 @@ // --- BEGIN Global variables ----------------------------------- -var rankedOnly = null; // Whether to adapt GUI to only show - // information related to rankings - var testCaseChecked = new Array(); // --- END Global variables ------------------------------------- @@ -151,7 +148,6 @@ function updateActions() { context1_ = context1(); if (context1_ == null) { // special case: no contexts are available setActionUrl("action_stats1", ""); - setActionUrl("action_rankings", ""); setActionUrl("action_stats2", ""); setActionUrl("action_tsstats", ""); return; @@ -175,22 +171,6 @@ function updateActions() { url_stats1 += "&sha1=" + context1_["sha1"]; url_stats1 += "&testcasefilter=" + testCaseFilter; - // --- rankings --- - if (context1_["rankingsExist"]) { - url_rankings = "rankings.shtml"; - url_rankings += "?db=" + $('#database').val(); - url_rankings += "&cmd=rankings"; - url_rankings += "&host=" + encodeURIComponent(context1_["host"]); - url_rankings += "&platform=" + - encodeURIComponent(context1_["platform"]); - url_rankings += "&branch=" + encodeURIComponent(context1_["branch"]); - url_rankings += "&sha1=" + context1_["sha1"]; - url_rankings += "&testcasefilter=" + testCaseFilter; - url_rankings += "&maxsize=" + $("#max_rank_size option:selected").val(); - } else { - url_rankings = ""; - } - // --- stats2 --- if (context2_["host"] != "") { url_stats2 = "stats2.html"; @@ -241,7 +221,6 @@ function updateActions() { } setActionUrl("action_stats1", url_stats1); - setActionUrl("action_rankings", url_rankings); setActionUrl("action_stats2", url_stats2); setActionUrl("action_tsstats", url_tsstats); } @@ -287,7 +266,6 @@ function context1() { var snapshot = tr.find("select[name = 'snapshot'] option:selected"); context["sha1"] = snapshot.attr("value"); context["snapshotIndex"] = snapshot.index(); - context["rankingsExist"] = (snapshot.attr("text")[0] == 'R'); return context; } @@ -327,7 +305,6 @@ function fetchContexts() { query = "?db=" + database; query += "&cmd=contexts"; - query += "&rankedonly=" + (rankedOnly ? 1 : 0); url = "http://" + location.host + "/cgi-bin/getstatswrapper" + query; //alert("url: >" + url + "<"); @@ -424,12 +401,6 @@ function fetchContexts() { $("#div_availableContexts").css("display", "block"); - if (rankedOnly) { - // In contexts table, disable the 'Context 2' column: - $("#contextsTable th:eq(1)").remove(); - $("#contextsTable td:nth-child(2)").remove(); - } - fetchTestCases(); } } @@ -543,26 +514,7 @@ function clickContextRadioButton(cb, col) { $(document).ready(function() { - var args = queryStringArgs(); - - var rankedOnly_int = parseInt(extractArg(args, "rankedonly")); - rankedOnly = ((!isNaN(rankedOnly_int)) && (rankedOnly_int != 0)) - if (rankedOnly) { - // In actions table, keep only header and rows related to - // 'Show rankings...' action: - $("#actionTable tr").css("display", "none"); - $("#actionTable tr[id='actionTable_header']").css("display", ""); - $("#actionTable tr[id*='rankings']").css("display", ""); - - $("#title").text("BM2 Analysis (ranked snapshots only)"); - - $("#div_analysisPageRankedOnlyLink").css("display", "none"); - } else { - $("#div_analysisPageLink").css("display", "none"); - } - // Set default action arguments: - $("#max_rank_size option[value='10']").attr("selected", true); $("#diff_tol option[value='1.1']").attr("selected", true); $("#dur_tol_min option[value='3']").attr("selected", true); $("#dur_tol_max option[value='10']").attr("selected", true); diff --git a/web/analysis/rankings.js b/web/analysis/rankings.js deleted file mode 100644 index 890165e..0000000 --- a/web/analysis/rankings.js +++ /dev/null @@ -1,400 +0,0 @@ -// --- BEGIN Global variables ----------------------------------- -var maxsize = null; // Maximum number of benchmarks in a ranking -// --- END Global variables ------------------------------------- - -function selectRankingTable() { - var val = $("#select_rankingTable").attr("value"); - var types = ["qs", "lcssr", "lcssi", "lcss1r", "lcss1i"]; - for (index in types) { - var type = types[index]; - $("#div_rankingTable_" + type).css( - "display", (val == type) ? "block" : "none"); - } - - var currTable = "#rankingTable_" + val; - - // Update "number of rows" label: - var nrows = $(currTable).find("tr").length - 1; - $("#rankingTable_nrows").text( - nrows + ((nrows == 1) ? " row" : " rows") + - " (limit: " + (maxsize < 0 ? "unlimited" : maxsize) + ")"); - - // Update plot to reflect selection of current table: - var cb = $(currTable).find("input[name='currSelector'][checked='true']"); - if (cb.length == 0) { - if (plot) - clearPlot(); - } else { - cb.trigger("click"); - cb.attr("checked", true); - } -} - - -// Submits a note for a given time series. -function submitTimeSeriesNote( - textInput, tableSel, database, host, platform, branch, benchmark, metric) { - - updateStatus("updating note ...", true); - - // Synchronize other ranking tables: - // ### WORK IN PROGRESS: - // var types = ["qs", "lcssr", "lcssi", "lcss1r", "lcss1i"]; - // for (var i in types) { - // var tsel = "#rankingTable_" + types[i]; - // if (tsel == tableSel) - // continue; - // alert("i: " + i + ", tsel: " + tsel); - // var tr = $(tsel + " tr").find( - // "td:nth-child(6):contains(" + metric + ")").find( - // "td:nth-child(7):contains(" + benchmark + ")"); - // alert("tr.length: " + tr.length); - // } - - - // Update database: - query = "?db=" + database + - "&cmd=settimeseriesnote" + - "&host=" + encodeURIComponent(host) + - "&platform=" + encodeURIComponent(platform) + - "&branch=" + encodeURIComponent(branch) + - "&benchmark=" + encodeURIComponent(benchmark) + - "&metric=" + encodeURIComponent(metric) + - "¬e=" + encodeURIComponent(textInput.value.substring(0, 256)); - - url = "http://" + location.host + "/cgi-bin/getstatswrapper" + query; - //alert("url: >" + url + "<"); - - $.ajax({ - url: url, - type: "GET", - dataType: "json", - - success: function(data, textStatus, request) { - if (request.readyState == 4) { - if (request.status == 200) { - - if (data.error != null) { - updateStatus( - "updating note ... failed: " + - data.error, false); - return - } - - updateStatus("updating note ... done", false); - updateStatus("", false); - } - } - }, - - error: function(request, textStatus, errorThrown) { - descr = errorThrown; - if (errorThrown == null) { - descr = "undefined error - is the server down?"; - } - updateStatus("updating note ... error: " + descr, false); - } - - // complete: function(request, textStatus) { - // alert("complete; request.status: " + request.status) - // } - - }); -} - -// Submits a note for a given snapshot in a given time series. -function submitSnapshotNote(textInput) { - // 2 B DONE! -} - -function populateRankingTable( - tableSel, rankings, database, host, platform, branch, sha11, sha12, - difftol, durtolmin, durtolmax, bmarkId2Name, metricId2Name) { - - // Remove all rows below the header ... - $(tableSel + " tr:gt(0)").remove(); - - var currTime = dateToTimestamp(currDate); - - var html = ""; - for (var i = 0; i < rankings.length; ++i) { - - var row = rankings[i]; - var bmarkId = row[0]; - var metricId = row[1]; - var context1Id = row[2]; // unused? - var pos = row[3]; - var val = row[4]; - var lcTimestamp = row[5]; - var note = row[6]; - var hasPrevDelta = (row.length > 7); - var prevDelta = hasPrevDelta ? row[7] : null; - - benchmark = bmarkId2Name[bmarkId]; - metric = metricId2Name[metricId]; - - html += "<tr>" - - html += "<td><input type=\"radio\" name=\"currSelector\" onclick=\"" + - "clickBMRadioButton(this, '" + tableSel + "', '" + - database + "', '" + host + "', '" + - platform + "', '" + branch + "', '" + - sha11 + "', '" + sha12 + "', '" + - benchmark + "', '" + metric + "', " + - difftol + ", " + durtolmin + ", " + durtolmax + - ")\"></td>"; - - if (pos >= 0) { - html += "<td style=\"text-align:right\">" + pos + "</td>"; - } else { - html += "<td style=\"text-align:center; color:red\">n/a</td>"; - } - if (hasPrevDelta) { - html += "<td style=\"text-align:right\">" + prevDelta + "</td>"; - } else { - html += "<td style=\"text-align:center; color:red\">n/a</td>"; - } - if (val >= 0) { - html += "<td style=\"text-align:right\">" + val + "</td>"; - } else { - html += "<td style=\"text-align:center; color:red\">n/a</td>"; - } - - if (lcTimestamp >= 0) { - var secsAgo = currTime - lcTimestamp; - lcDaysAgo = secsToDays(secsAgo); - html += "<td style=\"background-color:" + ageColor(secsAgo) + - "; text-align:right\">" + lcDaysAgo + "</td>"; - } else { - html += "<td style=\"text-align:center; color:red\">n/a</td>"; - } - - html += "<td class=\"metric\">" + metric + "</td>"; - html += "<td class=\"benchmark\" style=\"width:50%\">" + - benchmark + "</td>"; - - html += "<td style=\"width:50%\">" + - "<input type=\"text\" style=\"width:100%\" " + - "value=\"" + note + "\" " + - "onchange=\"submitTimeSeriesNote(this, '" + - tableSel + "', '" + - database + "', '" + host + "', '" + - platform + "', '" + branch + "', '" + - benchmark + "', '" + metric + "')\" /></td>"; - - html += "</tr>"; - } - - $(tableSel + " > tbody:last").append(html); - $(tableSel).trigger("update"); - if (html != "") // hm ... why is this test necessary? - $(tableSel).trigger("appendCache"); - - // var sorting = [[11,1],[0,0]]; - //$("table").trigger("sorton",[sorting]); -} - -function fetchRankings( - database, host, platform, branch, sha1, testCaseFilter, maxsize) { - updateStatus("fetching rankings ...", true); - - query = "?db=" + database + - "&cmd=rankings" + - "&host=" + encodeURIComponent(host) + - "&platform=" + encodeURIComponent(platform) + - "&branch=" + encodeURIComponent(branch) + - "&sha1=" + sha1 + - "&maxsize=" + maxsize; - if (testCaseFilter != "") - query += "&testcasefilter=" + encodeURIComponent(testCaseFilter); - - url = "http://" + location.host + "/cgi-bin/getstatswrapper" + query; - //alert("url: >" + url + "<"); - - $.ajax({ - url: url, - type: "GET", - dataType: "json", - - success: function(data, textStatus, request) { - if (request.readyState == 4) { - if (request.status == 200) { - - if (data.error != null) { - updateStatus( - "fetching rankings ... failed: " + - data.error, false); - return - } - - updateStatus("fetching rankings ... done", false); - updateStatus("", false); - - var sha11 = data.snapshots[0][0]; - var sha12 = data.snapshots[data.snapshots.length - 1][0]; - - // ### The tolerance values should automatically be - // set to those hardcoded in finalizeresults.py! - // ... 2 B DONE! - difftol = 1.1; - durtolmin = 3; - durtolmax = 30; - - // Show context ... - $("#main_context_database").text(data.database); - $("#main_context_host").text(data.host); - $("#main_context_platform").text(data.platform); - $("#main_context_branch").text(data.branch); - $("#main_context_sha11").text(sha11); - $("#main_context_sha12").text(sha12); - $("#main_context_difftol").text(difftol); - $("#main_context_durtolmin").text(durtolmin); - $("#main_context_durtolmax").text(durtolmax); - - setSnapshots(data.snapshots); - - - var bmarkId2Name = []; - for (var i = 0; i < data.benchmarks.length; ++i) { - bmarkInfo = data.benchmarks[i]; - bmarkId2Name[bmarkInfo[0]] = bmarkInfo[1]; - } - - var metricId2Name = []; - for (var i = 0; i < data.metrics.length; ++i) { - metricInfo = data.metrics[i]; - metricId2Name[metricInfo[0]] = metricInfo[1]; - } - - var rankings = { - "qs": data.rankings.qs, - "lcssr": data.rankings.lcssr, - "lcssi": data.rankings.lcssi, - "lcss1r": data.rankings.lcss1r, - "lcss1i": data.rankings.lcss1i - }; - for (key in rankings) - populateRankingTable( - "#rankingTable_" + key, rankings[key], - data.database, data.host, data.platform, - data.branch, sha11, sha12, difftol, durtolmin, - durtolmax, bmarkId2Name, metricId2Name); - - - // Initially show the QS statistic table: - $("#select_rankingTable").attr("value", "qs"); - selectRankingTable(); - - $("#div_tsbm_border").css("display", "block"); - $("#div_tsbm").css("display", "block"); - $("#div_perBenchmarkStats").css("display", "block"); - - $("#div_context").css("display", "block"); - $("#div_rankings").css("display", "block"); - - clearPlot(); - } - } - }, - - error: function(request, textStatus, errorThrown) { - descr = errorThrown; - if (errorThrown == null) { - descr = "undefined error - is the server down?"; - } - updateStatus("fetching rankings ... error: " + descr, false); - } - - // complete: function(request, textStatus) { - // alert("complete; request.status: " + request.status) - // } - - }); -} - - -function initRankingTable(tableSel) { - - $(tableSel).tablesorter({ - headers: { - 0: { sorter: false }, // checkbox - 1: { sorter: "mixed_numeric_asc_before_missing" }, // Pos - 2: { sorter: "mixed_numeric_desc_before_missing" }, // Delta - 3: { sorter: false }, // Score (ordered as pos!) - 4: { sorter: "mixed_numeric_asc_before_missing" }, // LCDA - 5: { }, // Metric - 6: { } // Benchmark - } - }); - - // Note: The nth-child selector below uses 1-based indexing! - setTooltip( // Position - $(tableSel).find("th:nth-child(2)"), - "Ranking position. The lower the number, the stronger the " + - "benchmark deserves attention."); - setTooltip( // Delta - $(tableSel).find("th:nth-child(3)"), - "The previous ranking position minus the current one. " + - "The higher the number, the faster the benchmark rises " + - "in the ranking."); - setTooltip( // Score - $(tableSel).find("th:nth-child(4)"), - "The value of the current ranking statistic."); - setTooltip( // LCDA - $(tableSel).find("th:nth-child(5)"), tooltipText_lcda_nodist()); -} - -$(document).ready(function() { - - initTablesorter(); - initTSBMBody(); - - initRankingTable("#rankingTable_qs"); - initRankingTable("#rankingTable_lcssr"); - initRankingTable("#rankingTable_lcssi"); - initRankingTable("#rankingTable_lcss1r"); - initRankingTable("#rankingTable_lcss1i"); - - var args = queryStringArgs(); - - database = extractArg(args, "db"); - if (database == "") { - alert("ERROR: invalid query string (empty database)"); - return; - } - - host = extractArg(args, "host"); - if (host == "") { - alert("ERROR: invalid query string (empty host)"); - return; - } - platform = extractArg(args, "platform"); - if (platform == "") { - alert("ERROR: invalid query string (empty platform)"); - return; - } - branch = extractArg(args, "branch"); - if (branch == "") { - alert("ERROR: invalid query string (empty branch)"); - return; - } - sha1 = extractArg(args, "sha1"); - if (sha1 == "") { - alert("ERROR: invalid query string (empty sha1)"); - return; - } - maxsize = extractArg(args, "maxsize"); - if (maxsize == "") { - alert("ERROR: invalid query string (empty maxsize)"); - return; - } - - var testCaseFilter = extractArg(args, "testcasefilter"); // optional - - $("#div_tsbm_border").css("display", "none"); - $("#div_tsbm").css("display", "none"); - $("#div_rankings").css("display", "none"); - - fetchRankings( - database, host, platform, branch, sha1, testCaseFilter, maxsize); -}); diff --git a/web/analysis/rankings.shtml b/web/analysis/rankings.shtml deleted file mode 100644 index 64cdfb0..0000000 --- a/web/analysis/rankings.shtml +++ /dev/null @@ -1,150 +0,0 @@ -<html> - -<head> - - <title>BM2 - Rankings</title> - - <script type="text/javascript" src="../global/jquery-1.4.2.min.js"></script> - <script type="text/javascript" src="../global/flot/jquery.flot.js"></script> - <script type="text/javascript" - src="../global/flot/jquery.flot.selection.js"></script> - <script type="text/javascript" - src="../global/tablesorter/jquery.tablesorter.js"> - </script> - <script type="text/javascript" src="../global/boxover/boxover.js"></script> - <script type="text/javascript" src="../global/global.js"></script> - <script type="text/javascript" src="tsbmbody.js"></script> - <script type="text/javascript" src="rankings.js"></script> - - <link type="text/css" rel="stylesheet" href="../global/style.css" /> - <link type="text/css" rel="stylesheet" - href="../global/tablesorter/docs/css/jq.css" /> - <link type="text/css" rel="stylesheet" - href="../global/tablesorter/themes/bm/style.css" /> - -</head> - -<body> - -<span id="title" style="font-size:18; font-weight:bold"> -BM2 - Rankings</span> - -<span style="white-space:nowrap"> -<span id="status1">no status</span> -<img alt="spinner1" id="spinner1" src="../global/images/ajax-spinner.gif" - style="display:none"/> -<img alt="nospinner1" id="nospinner1" src="../global/images/nospinner.png" - style="display:inline"/> -</span> - -<br /> - -<a id="mainPageLink" href="">main page</a> -<script type="text/javascript"> -$("#mainPageLink").attr("href", "http://" + location.host + "/bm2"); -</script> - - - -<a id="analysisPageLink" href="">analysis page (all types)</a> -<script type="text/javascript"> -$("#analysisPageLink").attr( - "href", "http://" + location.host + "/bm2/analysis"); -</script> - - - -<a id="analysisPageRankedOnlyLink" href=""> - analysis page (ranked snapshots only)</a> -<script type="text/javascript"> -$("#analysisPageRankedOnlyLink").attr( - "href", "http://" + location.host + "/bm2/analysis?rankedonly=1"); -</script> - -<br /> -<br /> - - -<div id="div_tsbm_border" - style="display:none; border-style:solid; border-width:2px; padding:5px"> -<!--#include file="tsbmbody.html" --> -</div> - - -<div id="div_rankings" style="display:none"> - -<span id="status2">no status</span> -<img alt="spinner2" id="spinner2" src="../global/images/ajax-spinner.gif" - style="display:none"/> -<img alt="nospinner2" id="nospinner2" src="../global/images/nospinner.png" - style="display:inline"/> - -<!-- *** BEGIN Rankings *********************************** --> -<br /> -<!-- <span style="font-size:14; font-weight:bold">Benchmarks:</span> --> -<select id="select_rankingTable" onchange="selectRankingTable()"> - <option value="qs" selected=1>Benchmark Quality</option> - <option value="lcssr">Last Change Stability (regressions only)</option> - <option value="lcssi">Last Change Stability (improvements only)</option> - <option value="lcss1r"> - Last Change Stability (regressions only, include premature last changes) - </option> - <option value="lcss1i"> - Last Change Stability (improvements only, include premature last changes) - </option> -</select> -<script type="text/javascript"> - setTooltip($("#select_rankingTable"), "The current ranking statistic.") -</script> - -<br /> -<span id="rankingTable_nrows">no rows</span> - -<br /> -<div style="overflow:auto; height:400px; border-style:solid; border-width:1px;"> - - -<div id="div_rankingTable_qs" style="display:block"> -<table id="rankingTable_qs" class="tablesorter" border="0" cellpadding="0" - cellspacing="1" style="width:100%"> -<!--#include file="rankingtabledef.html" --> -</table> -</div> <!-- div_rankingTable_qs --> - -<div id="div_rankingTable_lcssr" style="display:block"> -<table id="rankingTable_lcssr" class="tablesorter" border="0" cellpadding="0" - cellspacing="1" style="width:100%"> -<!--#include file="rankingtabledef.html" --> -</table> -</div> <!-- div_rankingTable_lcssr --> - -<div id="div_rankingTable_lcssi" style="display:block"> -<table id="rankingTable_lcssi" class="tablesorter" border="0" cellpadding="0" - cellspacing="1" style="width:100%"> -<!--#include file="rankingtabledef.html" --> -</table> -</div> <!-- div_rankingTable_lcssi --> - -<div id="div_rankingTable_lcss1r" style="display:block"> -<table id="rankingTable_lcss1r" class="tablesorter" border="0" cellpadding="0" - cellspacing="1" style="width:100%"> -<!--#include file="rankingtabledef.html" --> -</table> -</div> <!-- div_rankingTable_lcss1r --> - -<div id="div_rankingTable_lcss1i" style="display:block"> -<table id="rankingTable_lcss1i" class="tablesorter" border="0" cellpadding="0" - cellspacing="1" style="width:100%"> -<!--#include file="rankingtabledef.html" --> -</table> -</div> <!-- div_rankingTable_lcss1i --> - - -</div> -<!-- *** END Rankings *********************************** --> - -</div> <!-- div_rankings --> - -</body> - -</html> diff --git a/web/analysis/rankingtabledef.html b/web/analysis/rankingtabledef.html deleted file mode 100644 index 6eb662b..0000000 --- a/web/analysis/rankingtabledef.html +++ /dev/null @@ -1,14 +0,0 @@ -<thead> -<tr> - <th></th> <!-- check box --> - <th>Pos</th> - <th>Delta</th> - <th>Score</th> - <th>LCDA</th> - <th>Metric</th> - <th>Benchmark</th> - <th>Note</th> -</tr> -</thead> -<tbody> -</tbody> diff --git a/web/analysis/tsbm.js b/web/analysis/tsbm.js index 722c36d..497b281 100644 --- a/web/analysis/tsbm.js +++ b/web/analysis/tsbm.js @@ -1,6 +1,6 @@ function fetchSnapshots( - database, host, platform, branch, sha11, sha12, benchmark, metric, difftol, - durtolmin, durtolmax) { + database, host, platform, branch, sha11, sha12, sha1Sel, benchmark, + metric, difftol, durtolmin, durtolmax) { updateStatus("fetching snapshots ...", true); query = "?db=" + database + @@ -40,8 +40,8 @@ function fetchSnapshots( // Fetch and plot time series: fetchTimeSeries( database, host, platform, branch, sha11, sha12, - benchmark, metric, difftol, durtolmin, durtolmax, - false); + sha1Sel, benchmark, metric, difftol, durtolmin, + durtolmax, false); } } }, @@ -98,6 +98,7 @@ $(document).ready(function() { alert("ERROR: invalid query string (empty sha12)"); return; } + var sha1Sel = extractArg(args, "sha1_sel"); // optional var benchmark = extractArg(args, "benchmark"); if (benchmark == "") { alert("ERROR: invalid query string (empty benchmark)"); @@ -141,6 +142,6 @@ $(document).ready(function() { // Fetch snapshots: fetchSnapshots( - database, host, platform, branch, sha11, sha12, benchmark, metric, - difftol, durtolmin, durtolmax); + database, host, platform, branch, sha11, sha12, sha1Sel, benchmark, + metric, difftol, durtolmin, durtolmax); }); diff --git a/web/analysis/tsbmbody.html b/web/analysis/tsbmbody.html index 69fed8d..a441b50 100644 --- a/web/analysis/tsbmbody.html +++ b/web/analysis/tsbmbody.html @@ -273,9 +273,9 @@ <!-- *** END Time series benchmark and metric ********************** --> <span id="status4">no status</span> -<img alt="spinner4" id="spinner4" src="images/ajax-spinner.gif" +<img alt="spinner4" id="spinner4" src="../global/images/ajax-spinner.gif" style="display:none"/> -<img alt="nospinner4" id="nospinner4" src="images/nospinner.png" +<img alt="nospinner4" id="nospinner4" src="../global/images/nospinner.png" style="display:inline"/> <!-- *** BEGIN Time series plot *********************************** --> diff --git a/web/analysis/tsbmbody.js b/web/analysis/tsbmbody.js index 2901ebb..ee81045 100644 --- a/web/analysis/tsbmbody.js +++ b/web/analysis/tsbmbody.js @@ -51,7 +51,7 @@ function tooltipText_mdrse() { return "Median of the valid relative standard errors of all " + "snapshots.<br /><br />" + " " + - "<img src=\"images/rse.png\" />" + + "<img src=\"../global/images/rse.png\" />" + "<br /><br />A high value might indicate " + "unstable or fluctuating results."; } @@ -60,7 +60,7 @@ function tooltipText_rsemd() { return "Relative standard error of the valid median observations " + "of all snapshots.<br /><br />" + " " + - "<img src=\"images/rse.png\" />" + + "<img src=\"../global/images/rse.png\" />" + "<br /><br />A high value might indicate either 1)" + "unstable or fluctuating results<br />" + "or 2) stable changes of a high magnitude."; @@ -104,7 +104,7 @@ function tooltipText_lcms() { "indicates the strength of the last signicifant change as a value " + "ranging from 0 (weak) to 1 (strong):<br /><br />" + " " + - "<img src=\"images/lcms.png\" />"; + "<img src=\"../global/images/lcms.png\" />"; } function tooltipText_lcss() { @@ -184,7 +184,7 @@ function tooltipText_lcds2() { function tooltipText_rse_plot() { return "Relative standard error of the valid and positive observations " + "in this sample.<br /><br />" + - "<img src=\"images/rse.png\" /><br /><br />" + + "<img src=\"../global/images/rse.png\" /><br /><br />" + "<b>Note:</b> RSE is not defined for less than two values."; } @@ -536,7 +536,7 @@ function clearPlot() { function createPlot( timeSeries, changes, benchmark, metric, lowerIsBetter, ms, lsd, ni, nz, nc, mdrse, rsemd, qs, lc, lcda, lcd, lcms, lcss, lcss1, lcgss, lclss, lcds1, - lcds2) { + lcds2, sha1Sel) { clearPlot(); @@ -798,7 +798,7 @@ function createPlot( $("#plot_canvas").unbind("plothover"); $("#plot_canvas").bind("plothover", function (event, pos, item) { - snIndex = Math.floor(pos.x + 0.5); // Snapshot index + var snIndex = Math.floor(pos.x + 0.5); // Snapshot index if ((snIndex < 0) || (snIndex >= snapshots.length)) return; if (snIndex == prevHoverIndex) @@ -861,9 +861,7 @@ function createPlot( $("#sample2 tr:gt(0)").remove(); } - $("#plot_canvas").unbind("plotclick"); - $("#plot_canvas").bind("plotclick", function (event, pos, item) { - snIndex = Math.floor(pos.x + 0.5); // Snapshot index + function clickPlot(snIndex) { if ((snIndex < 0) || (snIndex >= snapshots.length)) return; if (snIndex == prevClickIndex) { @@ -950,6 +948,12 @@ function createPlot( fetchResultDetails2(benchmark, metric, "", sha1); } } + } + + $("#plot_canvas").unbind("plotclick"); + $("#plot_canvas").bind("plotclick", function (event, pos, item) { + var snIndex = Math.floor(pos.x + 0.5); // Snapshot index + clickPlot(snIndex); }); clearHoverHighlighting(); @@ -987,6 +991,16 @@ function createPlot( if (isNonNullNumber(lclss)) $("#bmstats_lclss").text(lclss); if (isNonNullNumber(lcds1)) $("#bmstats_lcds1").text(lcds1); if (isNonNullNumber(lcds2)) $("#bmstats_lcds2").text(lcds2); + + // Select an initial snapshot if requested: + if ((sha1Sel != null) && (sha1Sel != "")) { + for (i = 0; i < snapshots.length; ++i) + if (snapshots[i][0] == sha1Sel) { + clickPlot(i); + toggleSnapshotDetails(); + break; + } + } } // ### REFACTOR: Similar function in stats2.js! 2 B DONE! @@ -1106,8 +1120,8 @@ function fetchResultDetails2(benchmark, metric, sha11, sha12) { } function fetchTimeSeries( - database, host, platform, branch, sha11, sha12, benchmark, metric, difftol, - durtolmin, durtolmax, showTSBMURL) { + database, host, platform, branch, sha11, sha12, sha1Sel, benchmark, metric, + difftol, durtolmin, durtolmax, showTSBMURL) { hideTSBMURL(); @@ -1194,7 +1208,7 @@ function fetchTimeSeries( data.time_series, data.changes, benchmark, metric, parseInt(data.lib), ms, lsd, ni, nz, nc, mdrse, rsemd, qs, lc, lcda, lcd, lcms, lcss, lcss1, lcgss, lclss, - lcds1, lcds2); + lcds1, lcds2, sha1Sel); if (showTSBMURL) enableTSBMURL( @@ -1300,9 +1314,10 @@ function clickBMRadioButton( if (cb.checked) { // Fetch and plot time series: + var sha1Sel = ""; // no snapshot selected initially fetchTimeSeries( - database, host, platform, branch, sha11, sha12, benchmark, metric, - difftol, durtolmin, durtolmax, true); + database, host, platform, branch, sha11, sha12, sha1Sel, benchmark, + metric, difftol, durtolmin, durtolmax, true); } else { clearPlot(); } diff --git a/web/changesummary/index.shtml b/web/changesummary/index.shtml deleted file mode 100644 index 5d45217..0000000 --- a/web/changesummary/index.shtml +++ /dev/null @@ -1,234 +0,0 @@ -<html> - -<head> - <title>BM Change Summary</title> - <script type="text/javascript" src="../global/jquery-1.4.2.min.js"></script> - <script type="text/javascript" - src="../global/tablesorter/jquery.tablesorter.js"> - <script type="text/javascript" src="../global/boxover/boxover.js"></script> - <script type="text/javascript" src="../global/global.js"></script> - <script type="text/javascript" src="main.js"></script> - - <link rel="stylesheet" href="../global/style.css" type="text/css" /> - <link type="text/css" rel="stylesheet" - href="../global/tablesorter/docs/css/jq.css" /> - <link type="text/css" rel="stylesheet" - href="../global/tablesorter/themes/bm/style.css" /> -</head> - -<body> - -<span id="title" style="font-size:18; font-weight:bold">BM Change Summary</span> - - -<span style="white-space:nowrap"> -<span id="status">no status</span> -<img alt="spinner" id="spinner" src="images/ajax-spinner.gif" - style="display:none"/> -</span> - -<br /> - -<a id="mainPageLink" href="">main page</a> -<script type="text/javascript"> -$("#mainPageLink").attr("href", "http://" + location.host + "/bm2"); -</script> - -<br /> -<br /> - - -<!-- BEGIN Ranking target -------------------------------------------- --> - -<table style="border:0"> -<tr> -<td style="border:0"> -<fieldset> -<legend>Ranking Target</legend> - -<!-- Primary change type: --> -<select id="primary_change_type" onchange="updateRankingTarget()"> - <option singular_name="regression" tag="regr">Regressions</option> - <option singular_name="improvement" tag="impr">Improvements</option> -</select> - -<br /> - -<!-- Secondary change type: --> -<select id="secondary_change_type" onchange="updateMainTable()"> - <option id="last_change" tag="last"></option> - <option id="max_change_last_week" tag="7"></option> - <option id="max_change_last_month" tag="30"></option> - <option id="max_change_last_6_months" tag="180"></option> - <option id="max_change_ever" tag="all"></option> -</select> - -<br /> -<input id="incl_premature_changes" type="checkbox" - onchange="updateMainTable()"/> -<label id="incl_premature_changes_label">Include premature changes</label> - -</fieldset> -</td> -</tr> -</table> - -<!-- END Ranking target -------------------------------------------- --> - -<br /> -<br /> - -<!-- BEGIN Main tables (exactly one will be visible at a time) ----- --> - -<!-- ... regressions (excluding premature ones): ---------------- --> - -<div id="div_mt_regr_last" style="display:block"> -<table id="mt_regr_last" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_regr_7" style="display:none"> -<table id="mt_regr_7" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_regr_30" style="display:none"> -<table id="mt_regr_30" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_regr_180" style="display:none"> -<table id="mt_regr_180" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_regr_all" style="display:none"> -<table id="mt_regr_all" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<!-- ... regressions (including premature ones): ---------------- --> - -<div id="div_mt_regr_last_pmt" style="display:none"> -<table id="mt_regr_last_pmt" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_regr_7_pmt" style="display:none"> -<table id="mt_regr_7_pmt" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_regr_30_pmt" style="display:none"> -<table id="mt_regr_30_pmt" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_regr_180_pmt" style="display:none"> -<table id="mt_regr_180_pmt" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_regr_all_pmt" style="display:none"> -<table id="mt_regr_all_pmt" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<!-- ... improvements (excluding premature ones): ---------------- --> - -<div id="div_mt_impr_last" style="display:none"> -<table id="mt_impr_last" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_impr_7" style="display:none"> -<table id="mt_impr_7" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_impr_30" style="display:none"> -<table id="mt_impr_30" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_impr_180" style="display:none"> -<table id="mt_impr_180" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_impr_all" style="display:none"> -<table id="mt_impr_all" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<!-- ... improvements (including premature ones): ---------------- --> - -<div id="div_mt_impr_last_pmt" style="display:none"> -<table id="mt_impr_last_pmt" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_impr_7_pmt" style="display:none"> -<table id="mt_impr_7_pmt" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_impr_30_pmt" style="display:none"> -<table id="mt_impr_30_pmt" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_impr_180_pmt" style="display:none"> -<table id="mt_impr_180_pmt" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<div id="div_mt_impr_all_pmt" style="display:none"> -<table id="mt_impr_all_pmt" class="tablesorter" border="0" - cellpadding="0" cellspacing="1"> -<!--#include file="cstabledef.html" --> -</table> -</div> - -<!-- END Main table -------------------------------------------- --> - -</body> -</html> diff --git a/web/changesummary/main.js b/web/changesummary/main.js deleted file mode 100644 index 03f8c66..0000000 --- a/web/changesummary/main.js +++ /dev/null @@ -1,89 +0,0 @@ -function updateMainTable() { - var pctTag = $("#primary_change_type option:selected").attr("tag"); - var sctTag = $("#secondary_change_type option:selected").attr("tag"); - var pmtTag = - ($("#incl_premature_changes:checked").length == 1) ? "_pmt" : ""; - var divName = "div_mt_" + pctTag + "_" + sctTag + pmtTag; - - // Hide all main tables but the current one: - $('div[id^="div_mt_"]').css("display", "none"); - $("#" + divName).css("display", "block"); -} - -function updateRankingTarget() { - var pctName = - $("#primary_change_type option:selected").attr("singular_name"); - - $("#last_change").text("Most recent " + pctName); - $("#max_change_last_week").text("Strongest " + pctName + " last week"); - $("#max_change_last_month").text("Strongest " + pctName + " last month"); - $("#max_change_last_6_months").text( - "Strongest " + pctName + " last six months"); - $("#max_change_ever").text("Strongest " + pctName + " ever"); - - $("#incl_premature_changes_label").text( - "Include premature " + pctName + "s"); - - updateMainTable(); -} - -function initMainTable(tableSel) { - - $(tableSel).tablesorter({ - headers: { - 3: { sorter: "mixed_numeric_desc_before_missing" }, // rank pos 0 - 4: { sorter: false }, // 1 - 5: { sorter: false }, // 2 - 6: { sorter: false }, // 3 - 7: { sorter: false }, // 4 - 8: { sorter: false }, // 5 - 9: { sorter: false }, // 6 - 10: { sorter: false }, // 7 - 11: { sorter: false }, // 8 - 12: { sorter: false } // 9 - } - }); - - // // Note: The nth-child selector below uses 1-based indexing! - // setTooltip($(tableSel).find("th:nth-child(2)"), tooltipText_ms()); - // setTooltip($(tableSel).find("th:nth-child(3)"), tooltipText_lsd()); - // setTooltip($(tableSel).find("th:nth-child(4)"), tooltipText_ni()); - // setTooltip($(tableSel).find("th:nth-child(5)"), tooltipText_nz()); - // setTooltip($(tableSel).find("th:nth-child(6)"), tooltipText_nc()); - // setTooltip($(tableSel).find("th:nth-child(7)"), tooltipText_mdrse()); - // setTooltip($(tableSel).find("th:nth-child(8)"), tooltipText_rsemd()); - // setTooltip($(tableSel).find("th:nth-child(9)"), tooltipText_qs()); - // setTooltip($(tableSel).find("th:nth-child(10)"), tooltipText_lc()); - // setTooltip($(tableSel).find("th:nth-child(11)"), tooltipText_lcda()); - // setTooltip($(tableSel).find("th:nth-child(12)"), tooltipText_lcms()); - // setTooltip($(tableSel).find("th:nth-child(13)"), tooltipText_lcss()); - // setTooltip($(tableSel).find("th:nth-child(14)"), tooltipText_lcss1()); - // setTooltip($(tableSel).find("th:nth-child(15)"), tooltipText_lcgss()); - // setTooltip($(tableSel).find("th:nth-child(16)"), tooltipText_lclss()); - // setTooltip($(tableSel).find("th:nth-child(17)"), tooltipText_lcds1()); - // setTooltip($(tableSel).find("th:nth-child(18)"), tooltipText_lcds2()); - - // $(tableSel).bind("sortStart",function() { - // $("#pbmTable_sortInProgress").show(); - // }).bind("sortEnd",function() { - // $("#pbmTable_sortInProgress").hide(); - // }); -} - -$(document).ready(function() { - - initTablesorter(); - - // Initialize all main tables: - $('table[id^="mt_"]').each(function() { - initMainTable("#" + $(this).attr("id")); - }); - - // Initialize ranking target: - $("#primary_change_type option[singular_name='regression']").attr( - "selected", true); - $("#incl_premature_changes").attr("checked", false) - updateRankingTarget(); - - fetchContexts(); -}); diff --git a/web/global/global.js b/web/global/global.js index f8f1cc1..13c0917 100644 --- a/web/global/global.js +++ b/web/global/global.js @@ -129,6 +129,28 @@ function zeroPad2(s) { } // ### 2 B DOCUMENTED! +function interpolatedColor(r1, g1, b1, r2, g2, b2, fromValue, toValue, value) { + // assert fromValue <= toValue + var frac = Math.max(Math.min(value, toValue), fromValue); + var r = Math.round((1 - frac) * r1 + frac * r2); + var g = Math.round((1 - frac) * g1 + frac * g2); + var b = Math.round((1 - frac) * b1 + frac * b2); + var color = + "#" + zeroPad2(r.toString(16)) + zeroPad2(g.toString(16)) + + zeroPad2(b.toString(16)); + return color; +} + + +// ### 2 B DOCUMENTED! +function scoreColor(score, regressions) { + return regressions + ? interpolatedColor(255, 255, 255, 255, 0, 0, 0.0, 1.0, score) + : interpolatedColor(255, 255, 255, 0, 255, 0, 0.0, 1.0, score); +} + + +// ### 2 B DOCUMENTED! function ageColor(secsAgo) { var secsInDay = 86400; // 24 * 60 * 60 diff --git a/web/global/style.css b/web/global/style.css index c1bc2d5..fd21134 100644 --- a/web/global/style.css +++ b/web/global/style.css @@ -73,12 +73,14 @@ table, th, td { } .tooltipHeader1 { - background-color:#dd0; + background-color:#b1c9eb; /* width:400px; */ opacity: 0.95; } .tooltipBody1 { - background-color:#ffb; + background-color:#c0dbff; + border-style:solid; + border-width:1; /* width:400px; */ /* opacity: 0.95; */ } diff --git a/web/index.html b/web/index.html index d49cdc1..8f28a5f 100644 --- a/web/index.html +++ b/web/index.html @@ -1,22 +1,18 @@ <html> <head> + <title>BM Main Page</title> </head> <body> -<h1> -BM Main Page -</h1> +<span id="title" style="font-size:18; font-weight:bold">BM Main Page</span> <ul> <li> -<a href="changesummary">Change Summary</a> +<a href="topchanges">Top Changes</a> </li> <li> -<a href="analysis">Analysis (all types)</a> -</li> -<li> -<a href="analysis?rankedonly=1">Analysis (rankings only)</a> +<a href="analysis">Detailed Analysis</a> </li> </ul> diff --git a/web/topchanges/index.shtml b/web/topchanges/index.shtml new file mode 100644 index 0000000..03623b6 --- /dev/null +++ b/web/topchanges/index.shtml @@ -0,0 +1,193 @@ +<html> + +<head> + <title>BM Top Changes</title> + <script type="text/javascript" src="../global/jquery-1.4.2.min.js"></script> + <script type="text/javascript" + src="../global/tablesorter/jquery.tablesorter.js"></script> + <script type="text/javascript" src="../global/boxover/boxover.js"></script> + <script type="text/javascript" src="../global/global.js"></script> + <script type="text/javascript" src="main.js"></script> + + <link rel="stylesheet" href="../global/style.css" type="text/css" /> + <link type="text/css" rel="stylesheet" + href="../global/tablesorter/docs/css/jq.css" /> + <link type="text/css" rel="stylesheet" + href="../global/tablesorter/themes/bm/style.css" /> +</head> + +<body> + +<span id="title" style="font-size:18; font-weight:bold">BM Top Changes</span> + + +<span style="white-space:nowrap"> +<span id="status">no status</span> +<img alt="spinner" id="spinner" src="../global/images/ajax-spinner.gif" + style="display:none"/> +</span> + +<br /> + +<a id="mainPageLink" href="">main page</a> +<script type="text/javascript"> +$("#mainPageLink").attr("href", "http://" + location.host + "/bm2"); +</script> + +<br /> +<br /> + +<table style="border:0"> +<tr> +<td style="border:0; background-color:#eee"> +This page shows "top 10" change rankings for all host/platform/branch +combinations. +</td> +</tr> +</table> + +<br /> + +<table style="border:0"> + +<tr> +<td valign="top" style="border:0"> +<!-- BEGIN Change Type -------------------------------------------- --> + +<table style="border:0"> +<tr> +<td style="border:0"> +<fieldset> +<legend style="font-size:16px; font-weight:bold">Change Type</legend> + +<!-- Primary change type: --> +<select id="primary_change_type" style="margin-top:5px; margin-bottom:5px"> + <option singular_name="regression" tag="regr">Regressions</option> + <option singular_name="improvement" tag="impr">Improvements</option> +</select> + +<br /> + +<!-- Secondary change type: --> +<select id="secondary_change_type" style="margin-bottom:5px"> + <option id="last_change" tag="last">Latest</option> + <option id="max_change_last_week" tag="7">Biggest last week</option> + <option id="max_change_last_month" tag="30">Biggest last month</option> + <option id="max_change_last_6_months" tag="180">Biggest last six months + </option> + <option id="max_change_ever" tag="all">Biggest ever</option> +</select> +<script type="text/javascript"> + setTooltip( + $("#secondary_change_type"), + "Select <em>Latest</em> to consider only the most recent change " + + "for each benchmark, or select a time scope."); +</script> + +<br /> + +<span id="incl_premature_changes_span"> +<input id="incl_premature_changes" type="checkbox" /> +<label id="incl_premature_changes_label">Include premature</label> +</span> +<script type="text/javascript"> + setTooltip( + $("#incl_premature_changes_span"), + "Whether to include scores that are unaffected by " + + "post-change stability."); +</script> + + +</fieldset> +</td> +</tr> +</table> + +<!-- END Change type -------------------------------------------- --> +</td> + +<td valign="top" style="border:0"> +<!-- BEGIN Test case filter -------------------------------------------- --> + +<table style="border:0"> +<tr> +<td style="border:0"> +<fieldset> +<legend style="font-size:16px; font-weight:bold">Test Cases</legend> + +<table style="border:0"> +<tr> +<td style="border:0; background-color:#eee"> +Only benchmarks matching the selected test cases are considered (<b>note:</b> +selecting none is equivalent to selecting all). +</td> +</tr> +</table> + +<div id="div_testCaseFilter_hidden" style="display:block"> +<a id="link_testCaseFilter" href="javascript::void(0)" + style="text-decoration:none" onclick="toggleTestCaseFilter()">show</a> +</div> <!-- div_testCaseFilter_hidden --> + +<div id="div_testCaseFilter_shown" style="display:none"> +<a id="link_testCaseFilter" href="javascript::void(0)" + style="text-decoration:none" onclick="toggleTestCaseFilter()">hide</a> + +<a href="javascript::void(0)" style="text-decoration:none" + onclick="$('#test_cases input').attr('checked', true); return false;"> + select all</a> + +<a href="javascript::void(0)" style="text-decoration:none" + onclick="$('#test_cases input').attr('checked', false); return false;"> + clear all</a> + +<table id="test_cases"> +</table> + +</div> <!-- div_testCaseFilter_shown --> + +</fieldset> +</td> +</tr> +</table> + +<!-- END Test case filter -------------------------------------------- --> +</td> +</tr> +</table> + + +<br /> + +<!-- BEGIN Update button ----- --> + +<input type="button" value="Update" onclick="updateMainTable()" /> + +<!-- END Update button ----- --> + +<br /> + + +<!-- BEGIN Main table -------------------------------------------> + +<table style="border:0"> +<tr> +<td style="border:0; background-color:#eee"> +Changes in benchmark time series are ranked according to a score that is +a combined measure of the reliability and significance of the change. +The score ranges from 0 (low) to 1 (high). +<br /> +Clicking a score opens a page with details about the change itself and the +time series in which it occurs. +</td> +</tr> +</table> +<table id="main_table" class="tablesorter" border="0" + cellpadding="0" cellspacing="1"> +<!--#include file="tctabledef.html" --> +</table> + +<!-- END Main table -------------------------------------------- --> + +</body> +</html> diff --git a/web/topchanges/main.js b/web/topchanges/main.js new file mode 100644 index 0000000..cb9a208 --- /dev/null +++ b/web/topchanges/main.js @@ -0,0 +1,422 @@ +// --- BEGIN Global variables ----------------------------------- + +var limit = null; // Size of top score list (a "Top 10" list has limit=10 etc.) + +// Id-to-name mappings: +var hosts = null; +var platforms = null; +var branches = null; +var sha1s = null; +var benchmarks = null; +var metrics = null; + +// Test cases with changes: +var testCases = null; + +// --- END Global variables ------------------------------------- + +function toggleTestCaseFilter() { + var divObj_hidden = $("#div_testCaseFilter_hidden"); + var divObj_shown = $("#div_testCaseFilter_shown"); + if (divObj_shown.css("display") == "none") { + divObj_shown.css("display", "block"); + divObj_hidden.css("display", "none"); + } else { + divObj_shown.css("display", "none"); + divObj_hidden.css("display", "block"); + } +} + + +function updateTestCaseTable() { + + // Clear table: + $("#test_cases tr").remove(); + + // Populate table: + + var nc = 8; // # of columns + var nr = Math.ceil(testCases.length / nc); // # of rows + var c1 = testCases.length % nc; // lowest column index for empty bottom cell + + var html = ""; + for (r = 0; r < nr; ++r) { + if ((r < (nr - 1)) || (nc > 1)) + html += "<tr>"; + for (c = 0; c < nc; ++c) { + if ((r == (nr - 1)) && (c >= c1) && (!(nc == 1))) { + html += "<td></td>"; // Fill in empty bottom cell + } else { + var index = -1; + if (c <= c1) + index = r + c * nr; + else + index = r + c1 * nr + (c - c1) * (nr - 1); + + var name = testCases[index]; + html += "<td style=\"white-space: nowrap\">"; + html += "<input type=\"checkbox\" id=\"tc" + index + "\" " + + "name=\"" + name + "\"/>"; + html += "<label for=\"tc" + index + "\">" + name + + "</label>"; + html += "</td>"; + } + } + if ((r < (nr - 1)) || (nc > 1)) + html += "</tr>"; + } + + $("#test_cases").append(html); +} + + +// ### 2 B DOCUMENTED! +function timeSeriesURL( + database, hostId, platformId, branchId, sha1Id, benchmarkId, metricId, + difftol, durtolmin, durtolmax) { + + query = "?db=" + database; + query += "&host=" + encodeURIComponent(hosts[hostId]); + query += "&platform=" + encodeURIComponent(platforms[platformId]); + query += "&branch=" + encodeURIComponent(branches[branchId]); + query += "&sha11=earliest"; // invalid SHA-1 specifies infinite startpoint + query += "&sha12=latest"; // invalid SHA-1 specifies infinite endpoint + query += "&sha1_sel=" + sha1s[sha1Id]; + query += "&benchmark=" + encodeURIComponent(benchmarks[benchmarkId]); + query += "&metric=" + encodeURIComponent(metrics[metricId]); + query += "&difftol=" + difftol; + query += "&durtolmin=" + durtolmin; + query += "&durtolmax=" + durtolmax; + + url = "http://" + location.host + "/bm2/analysis/tsbm.shtml" + query; + return url; +} + + +// Loads a main table from the database. +function loadMainTable(tableName, regressions, last, timescope, premature) { + + var tableSel = "#" + tableName; + + updateStatus("fetching top changes ...", true); + + //var database = $('#database').val(); + var database = "bm"; // ### Hardcoded for now! + // ### The following tolerances are hardcoded for now. They are assumed + // to match the tolerances used for computing the 'change' table + // (see updatechanges.py script)! + var difftol = 1.1; + var durtolmin = 3; + var durtolmax = 10; + + query = "?db=" + database; + query += "&cmd=topchanges"; + query += "®ressions=" + (regressions ? 1 : 0); + query += "&last=" + (last ? 1 : 0); + query += "×cope=" + timescope; + query += "&premature=" + (premature ? 1 : 0); + query += "&limit=" + limit; + + // Add test case filter: + testCaseFilter = ""; + $("#test_cases input").each(function() { + if (this.checked) { + testCaseFilter += " " + this.name; + } + }); + query += "&testcasefilter=" + testCaseFilter; + + url = "http://" + location.host + "/cgi-bin/getstatswrapper" + query; + //alert("url: >" + url + "<"); + + $.ajax({ + url: url, + type: "GET", + dataType: "json", + + success: function(data, textStatus, request) { + if (request.readyState == 4) { + if (request.status == 200) { + + if (data.error != null) { + updateStatus( + "fetching top changes ... failed: " + + data.error, false); + return + } + + updateStatus("fetching top changes ... done", false); + updateStatus("", false); + + // Remove all rows below the header ... + $(tableSel + " tr:gt(0)").remove(); + + // Insert new rows ... + contexts = data.contexts; + html = ""; + for (i = 0; i < contexts.length; ++i) { + context = contexts[i]; + + var hostId = context.hostId; + var platformId = context.platformId; + var branchId = context.branchId; + + html += "<tr>"; + + html += "<td style=\"white-space: nowrap\">" + + hosts[hostId] + "</td>"; + html += "<td style=\"white-space: nowrap\">" + + platforms[platformId] + "</td>"; + html += "<td style=\"white-space: nowrap\">" + + branches[branchId] + "</td>"; + + // Fill in scores: + for (j = 0; j < context.topchanges.length; ++j) { + var tc = context.topchanges[j]; + var benchmarkId = tc[0]; + var metricId = tc[1]; + var sha1Id = tc[2]; + var timestamp = tc[3]; + var score = tc[4]; + var bgColor = scoreColor(score, data.regressions); + html += "<td style=\"background-color:" + + bgColor + "\">"; + html += "<a href=\"" + + timeSeriesURL( + database, hostId, platformId, branchId, + sha1Id, benchmarkId, metricId, difftol, + durtolmin, durtolmax) + + "\"" + + " target=\"_blank\" style=\"" + + "text-decoration:none; color:black; " + + "display:block\">"; + html += score; + html += "</a>"; + + html += "</td>"; + } + + // Fill in missing scores: + for (j = context.topchanges.length; j < limit; ++j) { + html += + "<td style=\"background-color:#bbb\"></td>"; + } + + html += "</tr>"; + } + + $(tableSel + " > tbody:last").append(html); + $(tableSel).trigger("update"); + if (html != "") // hm ... why is this test necessary? + $(tableSel).trigger("appendCache"); + } + } + }, + + error: function(request, textStatus, errorThrown) { + descr = errorThrown; + if (errorThrown == null) { + descr = "undefined error - is the server down?"; + } + updateStatus( + "fetching top changes ... error: " + descr, false); + } + + // complete: function(request, textStatus) { + // alert("complete; request.status: " + request.status) + // } + + }); + + return false; +} + + +// Updates the main table based on the current change type. +function updateMainTable() { + var pctTag = $("#primary_change_type option:selected").attr("tag"); + var sctTag = $("#secondary_change_type option:selected").attr("tag"); + var pmtTag = + ($("#incl_premature_changes:checked").length == 1) ? "_pmt" : ""; + var tableName = "mt_" + pctTag + "_" + sctTag + pmtTag; + var divName = "div_" + tableName; + // ### NOTE: tableName and divName unused for now. Later they may be used + // for caching based on multiple main tables (of which only one is shown + // at a time). + + // Hide all tables but the current one: + // ### NOTE: Unused for now (see above) + // $('div[id^="div_mt_"]').css("display", "none"); + // $("#" + divName).css("display", "block"); + + var regressions = (pctTag == "regr"); + var last = (sctTag == "last"); + var sctTag_int = parseInt(sctTag); + var timescope = + ((sctTag == "all") || (isNaN(sctTag_int))) ? -1 : sctTag_int; + var premature = (pmtTag == "_pmt"); + + // Load the table from the database if necessary: + // if ($("#" + tableName + " tr").length == 1) { + // // The table for this change type is empty + // // (except for header row), so populate it from the database: + // loadMainTable(tableName, regressions, last, timescope, premature) + // } + + // ### Just reload the only main table for now (later a caching scheme + // should be implemented): + loadMainTable("main_table", regressions, last, timescope, premature) +} + + +function fetchNameMappings() { + updateStatus("fetching name mappings ...", true); + + //database = $('#database').val(); + database = "bm"; // ### Hardcoded for now! + + query = "?db=" + database; + query += "&cmd=namemappings"; + + url = "http://" + location.host + "/cgi-bin/getstatswrapper" + query; + //alert("url: >" + url + "<"); + + $.ajax({ + url: url, + type: "GET", + dataType: "json", + + success: function(data, textStatus, request) { + if (request.readyState == 4) { + if (request.status == 200) { + + if (data.error != null) { + updateStatus( + "fetching name mappings ... failed: " + + data.error, false); + return + } + + updateStatus("fetching name mappings ... done", false); + updateStatus("", false); + + hosts = data.hosts; + platforms = data.platforms; + branches = data.branches; + sha1s = data.sha1s; + benchmarks = data.benchmarks; + metrics = data.metrics; + + updateTestCaseTable(); + updateMainTable(); + } + } + }, + + error: function(request, textStatus, errorThrown) { + descr = errorThrown; + if (errorThrown == null) { + descr = "undefined error - is the server down?"; + } + updateStatus( + "fetching name mappings ... error: " + descr, false); + } + + // complete: function(request, textStatus) { + // alert("complete; request.status: " + request.status) + // } + + }); + + return false; +} + + +function fetchTestCases() { + updateStatus("fetching test cases ...", true); + + //database = $('#database').val(); + database = "bm"; // ### Hardcoded for now! + + query = "?db=" + database; + query += "&cmd=testcaseswithchanges"; + + url = "http://" + location.host + "/cgi-bin/getstatswrapper" + query; + //alert("url: >" + url + "<"); + + $.ajax({ + url: url, + type: "GET", + dataType: "json", + + success: function(data, textStatus, request) { + if (request.readyState == 4) { + if (request.status == 200) { + + if (data.error != null) { + updateStatus( + "fetching test cases ... failed: " + + data.error, false); + return + } + + updateStatus("fetching test cases ... done", false); + updateStatus("", false); + + testCases = data.testCases; + + fetchNameMappings(); + } + } + }, + + error: function(request, textStatus, errorThrown) { + descr = errorThrown; + if (errorThrown == null) { + descr = "undefined error - is the server down?"; + } + updateStatus( + "fetching test cases ... error: " + descr, false); + } + + // complete: function(request, textStatus) { + // alert("complete; request.status: " + request.status) + // } + + }); + + return false; +} + + +$(document).ready(function() { + + initTablesorter(); + + // Initialize main table: + $("#main_table").tablesorter({ + headers: { + 3: { sorter: "mixed_numeric_desc_before_missing" }, // rank pos 0 + 4: { sorter: false }, // 1 + 5: { sorter: false }, // 2 + 6: { sorter: false }, // 3 + 7: { sorter: false }, // 4 + 8: { sorter: false }, // 5 + 9: { sorter: false }, // 6 + 10: { sorter: false }, // 7 + 11: { sorter: false }, // 8 + 12: { sorter: false } // 9 + } + }); + + // Initialize change type: + $("#primary_change_type option[singular_name='regression']").attr( + "selected", true); + $("#incl_premature_changes").attr("checked", false) + + // Deduce the limit directly from the static HTML of the main table: + // (subtract 3 for the Host, Platform, and Branch columns) + limit = $("#main_table th").length - 3; + + fetchTestCases(); +}); diff --git a/web/changesummary/cstabledef.html b/web/topchanges/tctabledef.html index 9c6fbf5..340a1d5 100644 --- a/web/changesummary/cstabledef.html +++ b/web/topchanges/tctabledef.html @@ -3,7 +3,6 @@ <th>Host</th> <th>Platform</th> <th>Branch</th> - <th>0</th> <th>1</th> <th>2</th> <th>3</th> @@ -13,6 +12,7 @@ <th>7</th> <th>8</th> <th>9</th> + <th>10</th> </tr> </thead> <tbody> |