summaryrefslogtreecommitdiffstats
path: root/scripts/getrankings.py
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/getrankings.py')
-rw-r--r--scripts/getrankings.py219
1 files changed, 0 insertions, 219 deletions
diff --git a/scripts/getrankings.py b/scripts/getrankings.py
deleted file mode 100644
index 45cc9a3..0000000
--- a/scripts/getrankings.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import sys
-import json
-from dbaccess import execQuery, database
-from misc import (
- textToId, idToText, getContext, getTimestampFromContext, getSnapshots,
- getRankingContexts, benchmarkToComponents, printJSONHeader)
-
-class GetRankings:
-
- def __init__(
- self, host, platform, branch, sha12, test_case_filter, maxsize):
- self.host = host
- self.host_id = textToId('host', self.host)
- self.platform = platform
- self.platform_id = textToId('platform', self.platform)
- self.branch = branch
- self.branch_id = textToId('branch', self.branch)
- self.context2_id = getContext(
- self.host_id, self.platform_id, self.branch_id,
- textToId('sha1', sha12))
- self.test_case_filter = test_case_filter
- self.maxsize = maxsize
-
-
- # Returns -1, 0, and 1 if ranking position x is considered less than,
- # equal to, and greater than ranking position y respectively.
- # Note: a negative ranking position is considered worse (i.e. effectively
- # treated as having an "infinite" ranking position) than any non-negative
- # ranking position.
- def cmp_rank_pos(self, x, y):
- if x < 0:
- return 1
- elif y < 0:
- return -1
- elif x < y:
- return -1
- elif x > y:
- return 1
- else:
- return 0
-
-
- # Gets all rankings matching the context/metric combination combination.
- def getRankings(self):
-
- if self.context2_id < 0:
- print "error: invalid context"
- sys.exit(1)
-
- # Find the previous context (if any) for which rankings exist:
- ranking_contexts = getRankingContexts(
- self.host_id, self.platform_id, self.branch_id)
- curr_index = zip(*ranking_contexts)[0].index(self.context2_id)
- if curr_index < (len(ranking_contexts) - 1):
- context2_prev_id = ranking_contexts[curr_index + 1][0]
- else:
- context2_prev_id = -1 # No rankings before this context
-
- rankings = {}
- context_ids = set([self.context2_id]) # Affected context IDs
-
-
- # Get all time series notes:
- qres = execQuery(
- "SELECT benchmarkId, metricId, note FROM timeSeriesAnnotation"
- " WHERE hostId = %s AND platformId = %s AND branchId = %s",
- (self.host_id, self.platform_id, self.branch_id))
- notes = {}
- for benchmark_id, metric_id, note in qres:
- notes[benchmark_id, metric_id] = note
-
-
- # Get rankings for each statistic:
- stat_infos = execQuery("SELECT id, value FROM rankingStat", ())
- for stat_id, stat_name in stat_infos:
-
- # Get the unsorted ranking information:
- ranking_all = execQuery(
- "SELECT benchmarkId, metricId, context1Id, pos, value,"
- " lastChangeTimestamp"
- " FROM ranking"
- " WHERE context2Id = %s"
- " AND statId = %s",
- (self.context2_id, stat_id))
-
- ranking = []
-
- # Apply test case filter and add notes:
- for row in ranking_all:
- benchmark_id = row[0]
- benchmark = idToText("benchmark", benchmark_id)
- test_case, test_function, data_tag = (
- benchmarkToComponents(benchmark))
- if ((self.test_case_filter == None)
- or (test_case in self.test_case_filter)):
-
- # Append note if any:
- metric_id = row[1]
- try:
- note = notes[benchmark_id, metric_id]
- except KeyError:
- note = ""
-
- ranking.append((
- benchmark_id, metric_id, row[2], row[3], row[4],
- row[5], note))
-
-
- for row in ranking:
- context_ids.add(row[2])
-
- # Sort the table in ascending order on the 'pos' column, but
- # so that negative positions are ranked below any other positions:
- ranking.sort(key=lambda row: row[3], cmp=self.cmp_rank_pos)
-
- # Keep only the 'maxsize' highest ranked benchmarks:
- ranking = ranking if (self.maxsize < 0) else ranking[:self.maxsize]
-
- if context2_prev_id >= 0:
- # Compute deltas from previous ranking:
- ranking_prev_list = execQuery(
- "SELECT benchmarkId, metricId, pos"
- " FROM ranking"
- " WHERE context2Id = %s"
- " AND statId = %s",
- (context2_prev_id, stat_id))
- ranking_prev = {}
- for benchmark_id, metric_id, pos in ranking_prev_list:
- ranking_prev[benchmark_id, metric_id] = pos
-
- # Append deltas where applicable:
- ranking_without_deltas = ranking
- ranking = []
-
- for (benchmark_id, metric_id, context1_id, pos, value,
- lc_timestamp, note) in ranking_without_deltas:
- row = [benchmark_id, metric_id, context1_id, pos, value,
- lc_timestamp, note]
- if pos >= 0:
- try:
- pos_prev = ranking_prev[benchmark_id, metric_id]
- if pos_prev >= 0:
- delta = pos_prev - pos
- row.append(delta)
- except KeyError:
- pass
- ranking.append(row)
-
-
- # Add to main list:
- rankings[stat_name.lower()] = ranking;
-
-
- # Extract affected SHA-1s:
- assert len(context_ids) > 0
- sha1_infos = execQuery(
- "SELECT context.id, sha1Id, sha1.value"
- " FROM context, sha1"
- " WHERE context.id IN"
- " (%s" + ", %s"*(len(context_ids) - 1) + ")" +
- " AND sha1Id = sha1.id",
- tuple(context_ids))
-
-
- return sha1_infos, rankings
-
-
- # Extracts the individual snapshots in the maximum range spanned by
- # the SHA-1s in sha1_infos:
- def getSnapshotsInMaxRange(self, sha1_infos):
-
- min_timestamp = max_timestamp = first_sha1_id = last_sha1_id = None
- for context_id, sha1_id, sha1 in sha1_infos:
- timestamp = getTimestampFromContext(context_id)
- if min_timestamp == None:
- min_timestamp = max_timestamp = timestamp
- first_sha1_id = last_sha1_id = sha1_id
- elif timestamp < min_timestamp:
- min_timestamp = timestamp
- first_sha1_id = sha1_id
- elif timestamp > max_timestamp:
- max_timestamp = timestamp
- last_sha1_id = sha1_id
-
- snapshots = getSnapshots(
- self.host_id, self.platform_id, self.branch_id, first_sha1_id,
- last_sha1_id)
-
- return snapshots
-
-
- def execute(self):
- self.sha1_infos, self.rankings = self.getRankings()
- self.snapshots = self.getSnapshotsInMaxRange(self.sha1_infos)
-
- self.benchmarks = execQuery("SELECT id, value FROM benchmark", ())
- self.metrics = execQuery("SELECT id, value FROM metric", ())
-
- self.writeOutput()
-
-
- def writeOutputAsJSON(self):
- printJSONHeader()
- json.dump({
- 'database': database(),
- 'host': self.host,
- 'platform': self.platform,
- 'branch': self.branch,
- 'benchmarks': self.benchmarks,
- 'metrics': self.metrics,
- 'snapshots': map(
- lambda s: (idToText("sha1", s[0]), s[1]), self.snapshots),
- 'rankings': self.rankings
- }, sys.stdout)
-
-
-class GetRankingsAsJSON(GetRankings):
- def writeOutput(self):
- self.writeOutputAsJSON()