summaryrefslogtreecommitdiffstats
path: root/scripts/uploadresults.py
blob: 460207f71f885467b5b522d878b7c590b960eec8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
#!/usr/bin/env python

import sys
from subprocess import Popen, PIPE
from xml.dom.minidom import parse
from dbaccess import setDatabase, execQuery, commit
from misc import (
    getOptions, textToId, idToText, isValidSHA1, getContext, getAllSnapshots)


# --- BEGIN Global functions ----------------------------------------------

def printUsage():
    print (
        "usage:" + sys.argv[0] +
        " --help | [--dbhost H --dbport P] --db D --host H --platform P "
        "--branch B --sha1 S --file F")

def printVerboseUsage():
    printUsage()
    print "\noptions:"
    print(
        "    --help: This help.")
    print(
        "  --dbhost: The database server host (overriding the default).")
    print(
        "  --dbport: The database server port (overriding the default).")
    print(
        "      --db: The database. One of 'bm' or 'bm-dev' (the latter "
        "intended for experimentation).")
    print(
        "    --host: The physical machine on which the results were "
        "produced (e.g. barbarella or 172.24.90.79).")
    print(
        "--platform: The OS/compiler/architecture combination "
        "(e.g. linux-g++-32).")
    print(
        "  --branch: The product branch (e.g. 'qt 4.6', 'qt 4.7', or "
        "'qt master').")
    print(
        "    --sha1: The tested revision within the branch. Can be "
        "extracted using 'git log -1 --pretty=format:%H' (assuming the "
        "tested revision is the current head revision).")
    print(
        "    --file: The results file in QTestLib XML output format.")

# Returns True iff a low value indicates better performance than a high
# value for the given metric.
def lowerIsBetter(metric):
    return {
        "walltimemilliseconds": True,
        "walltime": True,
        "cputicks": True,
        "instructionreads": True,
        "events": True,
        "bitspersecond": False,
        "bytespersecond": False,
        "framespersecond": False,
        "fps": False
        # add more if necessary ...
        }[metric.lower()]

# Returns the canonical (i.e. "unaliased") form of the metric name.
def canonicalMetric(metric):
    if (metric.lower() == "walltime"):
        return "WalltimeMilliseconds"
    if (metric.lower() == "framespersecond"):
        return "fps"
    return metric

# Returns True iff at least one of the given incidents indicates failure for
# the given data tag.
def matchesFailedIncident(dataTag, incidents):
    for incident in incidents:
        if (incident.getAttribute("type") == "fail"):
            try:
                dataTagElem = incident.getElementsByTagName("DataTag")[0]
            except:
                continue
            if (dataTagElem.childNodes[0].data == dataTag):
                return True
    return False

# Returns results extracted from a file in QTestLib XML format
# (note: multiple top-level TestCase elements are allowed).
def extractResults(file):

    def processBenchmarkResults(
        results, dom, testCase, testFuncElem, testFunction, incidents):
        # Loop over benchmark results ...
        bmResultElems = testFuncElem.getElementsByTagName("BenchmarkResult")
        for bmResultElem in bmResultElems:

            # Data tag (note that "" is a valid data tag):
            dataTag = bmResultElem.getAttribute("tag").strip()

            # Metric:
            metric = bmResultElem.getAttribute("metric").strip()
            try:
                lowerIsBetter_ = lowerIsBetter(metric)
            except:
                print(
                    "WARNING: skipping result for unsupported metric: >" +
                    metric + "<")
                continue
            metric = canonicalMetric(metric)

            # Value:
            value = float(bmResultElem.getAttribute("value"))

            # Iterations (optional):
            iterAttr = bmResultElem.getAttribute("iterations").strip()
            if (iterAttr != ""):
                try:
                    iterations = int(iterAttr)
                    assert iterations > 0
                    value = value / iterations
                except:
                    raise BaseException(
                        "found 'iterations' attribute that is not a " +
                        "positive integer: " + iterAttr)

            # Valid:
            valid = not matchesFailedIncident(dataTag, incidents)

            # Add item to array ...
            results.append(
                {'testCase': testCase,
                 'testFunction': testFunction,
                 'dataTag': dataTag,
                 'metric': metric,
                 'lowerIsBetter': lowerIsBetter_,
                 'value': value,
                 'valid': valid})

    def processTestFunctions(results, dom, testCaseElem, testCase):
        # Loop over test functions ...
        testFuncElems = testCaseElem.getElementsByTagName("TestFunction")
        for testFuncElem in testFuncElems:
            testFunction = testFuncElem.getAttribute("name").strip()
            assert testFunction != ""
            incidents = testFuncElem.getElementsByTagName("Incident")
            processBenchmarkResults(
                results, dom, testCase, testFuncElem,
                testFunction, incidents)

    def processTestCases(results, dom):
        # Loop over test cases ...
        testCaseElems = dom.getElementsByTagName("TestCase")
        for testCaseElem in testCaseElems:
            testCase = testCaseElem.getAttribute("name").strip()
            assert testCase != ""
            processTestFunctions(
                results, dom, testCaseElem, testCase)

    # Load DOM structure from file:
    try:
        dom = parse(file)
    except:
        raise BaseException(sys.exc_info())

    # Extract benchmark results from DOM structure:
    results = []
    processTestCases(results, dom)
    dom.unlink()

    return results

# ### 2 B DOCUMENTED!
def findOrInsertId(table, value, *args):

    #print "value: >" + value + "<, ",
    query_result = execQuery(
        "SELECT id FROM " + table + " WHERE value = '" + str(value) + "';")
    if len(query_result) == 1:
        # Found, so return ID:
        #print "returning existing ID: >" + str(query_result[0][0]) + "<"
        return query_result[0][0]

    # Not found, so insert:
    query = "INSERT INTO " + table + " (value"
    for i in range(0, len(args), 2):
        query += ", " + str(args[i])
    query += ") VALUES ('" + str(value) + "'"
    for i in range(0, len(args), 2):
        query += ", " + str(args[i + 1])

    # ... and retrieve ID:
    query += ") RETURNING id;"
    query_result = execQuery(query)

    assert len(query_result) == 1
    #print "returning new ID: >" + str(query_result[0][0]) + "<"
    return query_result[0][0]


# Uploads a set of results to the database.
def uploadToDatabase(host, platform, branch, sha1, results):

    # Append a row to the 'upload' table (to record this upload event) ...
    execQuery("INSERT INTO upload DEFAULT VALUES;", False)

    # Retrieve the ID of the row we just inserted ...
    uploadId = execQuery("SELECT currval('upload_id_seq');")[0][0]

    hostId = findOrInsertId("host", host)
    platformId = findOrInsertId("platform", platform)
    branchId = findOrInsertId("branch", branch)
    sha1Id = findOrInsertId("sha1", sha1)

    contextId = getContext(hostId, platformId, branchId, sha1Id)
    if contextId == -1:
        contextId = execQuery(
            "INSERT INTO context"
            " (hostId, platformId, branchId, sha1Id)"
            " VALUES (%d, %d, %d, %d)"
            " RETURNING id;"
            % (hostId, platformId, branchId, sha1Id))[0][0]

    # Append rows to the 'result' table ...
    for result in results:
        benchmark = (
            result['testCase'] + ":" + result['testFunction'] + "(" +
            str(result['dataTag']) + ")")
        benchmarkId = findOrInsertId("benchmark", benchmark)

        metricId = findOrInsertId(
            "metric", result['metric'], "lowerIsBetter",
            result['lowerIsBetter'])

        query = (
            "INSERT INTO result"
            " (contextId, benchmarkId, value, valid, metricId, uploadId)"
            " VALUES (%d, %d, %f, %s, %d, %d);"
            % (contextId, benchmarkId, result['value'], result['valid'],
               metricId, uploadId))

        execQuery(query, False)

    # Write to database:
    commit()


# Returns True iff rankings exist for the given context.
def rankingsExist(options):
    context_id = getContext(
        textToId('host', options["host"]),
        textToId('platform', options["platform"]),
        textToId('branch', options["branch"]),
        textToId('sha1', options["sha1"]))

    matches = execQuery(
        "SELECT id FROM ranking WHERE context2Id = %d LIMIT 1;" % context_id)

    return len(matches) > 0


# Returns the context ID if found, otherwise -1:
def getContextIdFromNames(options):
    host_id = textToId("host", options["host"])
    platform_id = textToId("platform", options["platform"])
    branch_id = textToId("branch", options["branch"])
    sha1_id = textToId("sha1", options["sha1"])
    return getContext(host_id, platform_id, branch_id, sha1_id)


# Returns True iff this context exists:
def contextExists(options):
    return getContextIdFromNames(options) != -1


# Returns True iff no more results are to be expected for this context:
def contextComplete(options):

    max_sample_size = 5 # WARNING: This value must match the corresponding value
                        # in the script that triggers benchmark execution.

    context_id = getContextIdFromNames(options)
    sample_size = execQuery(
        "SELECT count(*) FROM"
        " (SELECT DISTINCT uploadId from result where contextId=%d) AS foo;"
        % context_id)[0][0]

    return sample_size >= max_sample_size


# Executes the external computerankings.py script with appropriate arguments.
# If new_context is True, this snapshot is assumed to be the first one in
# this time series (host/platform/branch combination), and no results have been
# uploaded for this context yet. In that case, rankings will instead be
# computed for the latest existing snapshot in this time series.
def execComputeRankings(options, new_context):

    if new_context:
        # Attempt to use the latest available snapshot for this
        # host/platform/branch combination as the actual snapshot:
        host_id = textToId("host", options["host"])
        platform_id = textToId("platform", options["platform"])
        branch_id = textToId("branch", options["branch"])
        snapshots = getAllSnapshots(host_id, platform_id, branch_id)
        if len(snapshots) > 0:
            actual_snapshot = idToText("sha1", snapshots[-1][0])
            if actual_snapshot == options["sha1"]:
                sys.stderr.write(
                    "error: context unexpectedly exists in database\n")
                sys.exit(1)
        else:
            return # special case when no snapshots exist yet
    else:
        actual_snapshot = options["sha1"]

    cmd = [
        "computerankings.py", "--db", options["db"], "--host", options["host"],
        "--platform", options["platform"], "--branch", options["branch"],
        "--sha1", actual_snapshot, "--noprogress", "true"]
    if "dbhost" in options:
        cmd += ["--dbhost", options["dbhost"]]
    if "dbport" in options:
        cmd += ["--dbport", options["dbport"]]

    p = Popen(cmd, stdout = PIPE, stderr = PIPE)
    stdout, stderr = p.communicate()
    if (p.returncode != 0):
        print "failed to execute command '" + cmd + "':"
        print "  return code:", p.returncode
        print "  stdout: >%s<" % stdout.strip()
        print "  stderr: >%s<" % stderr.strip()
    else:
        print "computerankings.py executed successfully:"
        print "  return code:", p.returncode
        print "  stdout: >%s<" % stdout.strip()
        print "  stderr: >%s<" % stderr.strip()


# --- END Global functions ----------------------------------------------


# --- BEGIN Main program ----------------------------------------------

options, http_get = getOptions()

if "help" in options:
    printVerboseUsage()
    sys.exit(0)

if (not ("db" in options and "host" in options and "platform" in options and
    "branch" in options and "sha1" in options and "file" in options)):
    printUsage()
    sys.exit(0)

if not isValidSHA1(options["sha1"]):
    sys.stderr.write("error: invalid SHA-1: " + options["sha1"] + "\n")
    sys.exit(1)

setDatabase(
    options["dbhost"] if "dbhost" in options else None,
    options["dbport"] if "dbport" in options else None,
    options["db"])


sys.stdout.write("UPLOADING RESULTS, OPTIONS: " + str(options) + "\n")
sys.stdout.flush()


# Reject uploading if this context is already complete:
if contextComplete(options):
    sys.stderr.write(
        "this snapshot is already complete for this time series -> uploading "
        "rejected!\n")
    sys.exit(1)


# Reject uploading if rankings exist for this context, since that would
# require a recomputation of those rankings.
#
# (Note that we only need to check the current SHA-1 as long as we assume that
# results are uploaded in an order that is consistent with the order of the
# corresponding SHA-1s in the branch in question: Rankings for earlier SHA-1s
# will not be affected by results for this SHA-1, and rankings for later
# SHA-1s cannot exist a this point (by assumption).)
if rankingsExist(options):
    sys.stderr.write(
        "error: rankings have already been computed for this context\n")
    sys.exit(1)


# If this is the first set of results for the current context, we compute
# rankings for the same context, only one snapshot back:
if not contextExists(options):
    sys.stdout.write(
        "new snapshot in this time series -> attempt to compute rankings "
        "for the previous snapshot ...\n")
    sys.stdout.flush()
    execComputeRankings(options, True)
else:
    sys.stdout.write(
        "results for this snapshot have already been uploaded -> don't "
        "attempt to compute rankings at this point\n")
    sys.stdout.flush()


# Parse results and store them in the database:
sys.stdout.write("parsing results and uploading to database ... ")
sys.stdout.flush()
try:
    results = extractResults(options["file"])
except BaseException as e:
    sys.stderr.write(
        "error: failed to parse XML file: " + str(e.args[0]) + "\n")
    sys.exit(1)
uploadToDatabase(
    options["host"], options["platform"], options["branch"], options["sha1"],
    results)
sys.stdout.write("done\n")
sys.stdout.flush()


# If no more results are expected in this context, we can compute rankings
# already at this point. (Note: In the case that one or more uploads failed
# for this context, it will be regarded as incomplete forever. In that case,
# computation of rankings for this context will be instead be triggered right
# before uploading the first set of results for the next context (see code
# above).)
if contextComplete(options):
    sys.stdout.write(
        "this snapshot is complete for this time series -> attempt to "
        "compute rankings for this snapshot ...\n")
    sys.stdout.flush()
    execComputeRankings(options, False)
else:
    sys.stdout.write(
        "this snapshot is incomplete for this time series -> "
        "don't attempt to compute rankings at this point\n")


sys.stdout.write("UPLOADING RESULTS DONE\n")

# --- END Main program ----------------------------------------------