summaryrefslogtreecommitdiffstats
path: root/scripts/uploadresults.py
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/uploadresults.py')
-rwxr-xr-xscripts/uploadresults.py130
1 files changed, 23 insertions, 107 deletions
diff --git a/scripts/uploadresults.py b/scripts/uploadresults.py
index a50285c..89f97ab 100755
--- a/scripts/uploadresults.py
+++ b/scripts/uploadresults.py
@@ -5,7 +5,8 @@ from subprocess import Popen, PIPE
from xml.dom.minidom import parse, getDOMImplementation
from dbaccess import setDatabase, execQuery, commit
from misc import (
- getOptions, textToId, idToText, isValidSHA1, getContext, getAllSnapshots)
+ getOptions, textToId, idToText, findOrInsertId, isValidSHA1, getContext,
+ getAllSnapshots)
# --- BEGIN Global functions ----------------------------------------------
@@ -182,33 +183,6 @@ def extractResults(file):
return results
-# ### 2 B DOCUMENTED!
-def findOrInsertId(table, value, *args):
-
- query_result = execQuery(
- "SELECT id FROM " + table + " WHERE value = %s", (value,))
- if len(query_result) == 1:
- # Found, so return ID:
- return query_result[0][0]
-
- # Not found, so insert:
- query = "INSERT INTO " + table + " (value"
- for i in range(0, len(args), 2):
- query += ", " + args[i]
- query += ") VALUES (%s"
- values = [value]
- for i in range(0, len(args), 2):
- query += ", %s"
- values.append(args[i + 1])
-
- # ... and retrieve ID:
- query += ") RETURNING id"
- query_result = execQuery(query, values)
-
- assert len(query_result) == 1
- return query_result[0][0]
-
-
# Uploads a set of results to the database.
def uploadToDatabase(host, platform, branch, sha1, results):
@@ -237,7 +211,9 @@ def uploadToDatabase(host, platform, branch, sha1, results):
benchmark = (
result['testCase'] + ":" + result['testFunction'] + "(" +
str(result['dataTag']) + ")")
- benchmarkId = findOrInsertId("benchmark", benchmark)
+ testCaseId = findOrInsertId("testCase", result['testCase'])
+ benchmarkId = findOrInsertId(
+ "benchmark", benchmark, "testCaseId", testCaseId)
metricId = findOrInsertId(
"metric", result['metric'], "lowerIsBetter",
@@ -254,20 +230,6 @@ def uploadToDatabase(host, platform, branch, sha1, results):
commit()
-# Returns True iff rankings exist for the given context.
-def rankingsExist(options):
- context_id = getContext(
- textToId('host', options["host"]),
- textToId('platform', options["platform"]),
- textToId('branch', options["branch"]),
- textToId('sha1', options["sha1"]))
-
- matches = execQuery(
- "SELECT id FROM ranking WHERE context2Id = %s LIMIT 1", (context_id,))
-
- return len(matches) > 0
-
-
# Returns the context ID if found, otherwise -1:
def getContextIdFromNames(options):
host_id = textToId("host", options["host"])
@@ -297,35 +259,12 @@ def contextComplete(options):
return sample_size >= max_sample_size
-# Executes the external computerankings.py script with appropriate arguments.
-# If new_context is True, this snapshot is assumed to be the first one in
-# this time series (host/platform/branch combination), and no results have been
-# uploaded for this context yet. In that case, rankings will instead be
-# computed for the latest existing snapshot in this time series.
-def execComputeRankings(options, new_context):
-
- if new_context:
- # Attempt to use the latest available snapshot for this
- # host/platform/branch combination as the actual snapshot:
- host_id = textToId("host", options["host"])
- platform_id = textToId("platform", options["platform"])
- branch_id = textToId("branch", options["branch"])
- snapshots = getAllSnapshots(host_id, platform_id, branch_id)
- if len(snapshots) > 0:
- actual_snapshot = idToText("sha1", snapshots[-1][0])
- if actual_snapshot == options["sha1"]:
- sys.stderr.write(
- "error: context unexpectedly exists in database\n")
- sys.exit(1)
- else:
- return # special case when no snapshots exist yet
- else:
- actual_snapshot = options["sha1"]
-
+# Executes the external updatechanges.py script with appropriate arguments.
+def execUpdateChanges(options):
cmd = [
- "computerankings.py", "--db", options["db"], "--host", options["host"],
+ "updatechanges.py", "--db", options["db"], "--host", options["host"],
"--platform", options["platform"], "--branch", options["branch"],
- "--sha1", actual_snapshot, "--noprogress", "true"]
+ "--noprogress", "true"]
if "dbhost" in options:
cmd += ["--dbhost", options["dbhost"]]
if "dbport" in options:
@@ -339,7 +278,7 @@ def execComputeRankings(options, new_context):
sys.stdout.write(" stdout: >" + stdout.strip() + "<\n")
sys.stdout.write(" stderr: >" + stderr.strip() + "<\n")
else:
- sys.stdout.write("computerankings.py executed successfully:\n")
+ sys.stdout.write("updatechanges.py executed successfully:\n")
sys.stdout.write(" return code: " + str(p.returncode) + "\n")
sys.stdout.write(" stdout: >" + stdout.strip() + "<\n")
sys.stdout.write(" stderr: >" + stderr.strip() + "<\n")
@@ -494,42 +433,21 @@ sys.stdout.write("UPLOADING RESULTS, OPTIONS: " + str(options) + "\n")
sys.stdout.flush()
-# Reject uploading if this context is already complete (since it could
-# modify the input for (and thus invalidate) any rankings computed for
-# this snapshot):
+# Reject uploading if this context is already complete:
if contextComplete(options):
sys.stderr.write(
- "this snapshot is already complete for this time series -> uploading "
- "rejected!\n")
- sys.exit(1)
-
-
-# Reject uploading if rankings exist for this context, since that would
-# require a recomputation of those rankings.
-#
-# (Note that we only need to check the current SHA-1 as long as we assume that
-# results are uploaded in an order that is consistent with the order of the
-# corresponding SHA-1s in the branch in question: Rankings for earlier SHA-1s
-# will not be affected by results for this SHA-1, and rankings for later
-# SHA-1s cannot exist a this point (by assumption).)
-if rankingsExist(options):
- sys.stderr.write(
- "error: rankings have already been computed for this context\n")
+ "this snapshot is already complete -> uploading rejected!\n")
sys.exit(1)
-# If this is the first set of results for the current context, we compute
-# rankings for the same context, only one snapshot back:
+# If this is the first set of results for the current context, we update
+# changes for all time series in this host/platform/branch combination:
if not contextExists(options):
- sys.stdout.write(
- "new snapshot in this time series -> attempt to compute rankings "
- "for the previous snapshot ...\n")
+ sys.stdout.write("update changes (before registering new snapshot) ...\n")
sys.stdout.flush()
- execComputeRankings(options, True)
+ execUpdateChanges(options)
else:
- sys.stdout.write(
- "results for this snapshot have already been uploaded -> don't "
- "attempt to compute rankings at this point\n")
+ sys.stdout.write("skipping update changes (1)\n")
sys.stdout.flush()
@@ -549,22 +467,20 @@ sys.stdout.write("done\n")
sys.stdout.flush()
-# If no more results are expected in this context, we can compute rankings
+# If no more results are expected in this context, we can update changes
# already at this point. (Note: In the case that one or more uploads failed
# for this context, it will be regarded as incomplete forever. In that case,
-# computation of rankings for this context will be instead be triggered right
+# update of changes for this context will be instead be triggered right
# before uploading the first set of results for the next context (see code
# above).)
if contextComplete(options):
sys.stdout.write(
- "this snapshot is complete for this time series -> attempt to "
- "compute rankings for this snapshot ...\n")
+ "update changes (after completion of current snapshot) ...\n")
sys.stdout.flush()
- execComputeRankings(options, False)
+ execUpdateChanges(options)
else:
- sys.stdout.write(
- "this snapshot is incomplete for this time series -> "
- "don't attempt to compute rankings at this point\n")
+ sys.stdout.write("skipping update changes (2)\n")
+ sys.stdout.flush()
sys.stdout.write("UPLOADING RESULTS DONE\n")