Mercurial > hg > graal-jvmci-8
comparison mx/sanitycheck.py @ 7567:a8bc60aeacb8
fix bug in parsing of SPECjvm2008 output
author | Doug Simon <doug.simon@oracle.com> |
---|---|
date | Wed, 30 Jan 2013 18:19:01 +0100 |
parents | c420a487b10f |
children | 7cae58134ff7 |
comparison
equal
deleted
inserted
replaced
7564:c420a487b10f | 7567:a8bc60aeacb8 |
---|---|
209 self.output = StringIO.StringIO() | 209 self.output = StringIO.StringIO() |
210 def eat(self, line): | 210 def eat(self, line): |
211 self.output.write(line) | 211 self.output.write(line) |
212 sys.stdout.write(line) | 212 sys.stdout.write(line) |
213 | 213 |
214 _debugBenchParser = False | |
215 | |
216 """ | 214 """ |
217 Encapsulates a single program that is a sanity test and/or a benchmark. | 215 Encapsulates a single program that is a sanity test and/or a benchmark. |
218 """ | 216 """ |
219 class Test: | 217 class Test: |
220 def __init__(self, name, cmd, successREs=[], failureREs=[], scoreMatchers=[], vmOpts=[], defaultCwd=None, ignoredVMs=[], benchmarkCompilationRate=True): | 218 def __init__(self, name, cmd, successREs=[], failureREs=[], scoreMatchers=[], vmOpts=[], defaultCwd=None, ignoredVMs=[], benchmarkCompilationRate=True): |
296 bps = re.compile(r"ParsedBytecodesPerSecond@final: (?P<rate>[0-9]+)") | 294 bps = re.compile(r"ParsedBytecodesPerSecond@final: (?P<rate>[0-9]+)") |
297 ibps = re.compile(r"InlinedBytecodesPerSecond@final: (?P<rate>[0-9]+)") | 295 ibps = re.compile(r"InlinedBytecodesPerSecond@final: (?P<rate>[0-9]+)") |
298 parser.addMatcher(ValuesMatcher(bps, {'group' : 'ParsedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'})) | 296 parser.addMatcher(ValuesMatcher(bps, {'group' : 'ParsedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'})) |
299 parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'})) | 297 parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'})) |
300 | 298 |
301 outputfile = self.name + '.output' | 299 startDelim = 'START: ' + self.name |
302 if _debugBenchParser and exists(outputfile): | 300 endDelim = 'END: ' + self.name |
301 | |
302 outputfile = os.environ.get('BENCH_OUTPUT', None) | |
303 if outputfile: | |
304 # Used only to debug output parsing | |
303 with open(outputfile) as fp: | 305 with open(outputfile) as fp: |
304 output = fp.read() | 306 output = fp.read() |
307 start = output.find(startDelim) | |
308 end = output.find(endDelim, start) | |
309 if start == -1 and end == -1: | |
310 return {} | |
311 output = output[start + len(startDelim + os.linesep): end] | |
312 mx.log(startDelim) | |
305 mx.log(output) | 313 mx.log(output) |
314 mx.log(endDelim) | |
306 else: | 315 else: |
307 tee = Tee() | 316 tee = Tee() |
317 mx.log(startDelim) | |
308 if commands.vm(self.vmOpts + opts + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, vmbuild=vmbuild) != 0: | 318 if commands.vm(self.vmOpts + opts + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, vmbuild=vmbuild) != 0: |
309 mx.abort("Benchmark failed (non-zero retcode)") | 319 mx.abort("Benchmark failed (non-zero retcode)") |
320 mx.log(endDelim) | |
310 output = tee.output.getvalue() | 321 output = tee.output.getvalue() |
311 if _debugBenchParser: | 322 |
312 with open(outputfile, 'wb') as fp: | 323 groups = {} |
313 fp.write(output) | |
314 | |
315 ret = {} | |
316 passed = False | 324 passed = False |
317 for valueMap in parser.parse(output): | 325 for valueMap in parser.parse(output): |
318 assert (valueMap.has_key('name') and valueMap.has_key('score') and valueMap.has_key('group')) or valueMap.has_key('passed') or valueMap.has_key('failed'), valueMap | 326 assert (valueMap.has_key('name') and valueMap.has_key('score') and valueMap.has_key('group')) or valueMap.has_key('passed') or valueMap.has_key('failed'), valueMap |
319 if valueMap.get('failed') == '1': | 327 if valueMap.get('failed') == '1': |
320 mx.abort("Benchmark failed") | 328 mx.abort("Benchmark failed") |
321 if valueMap.get('passed') == '1': | 329 if valueMap.get('passed') == '1': |
322 passed = True | 330 passed = True |
323 groupName = valueMap.get('group') | 331 groupName = valueMap.get('group') |
324 if groupName: | 332 if groupName: |
325 group = ret.setdefault(groupName, {}) | 333 group = groups.setdefault(groupName, {}) |
326 name = valueMap.get('name') | 334 name = valueMap.get('name') |
327 score = valueMap.get('score') | 335 score = valueMap.get('score') |
328 if name and score: | 336 if name and score: |
329 group[name] = score | 337 group[name] = score |
330 | 338 |
331 if not passed: | 339 if not passed: |
332 mx.abort("Benchmark failed (not passed)") | 340 mx.abort("Benchmark failed (not passed)") |
333 | 341 |
334 return ret | 342 return groups |