Mercurial > hg > truffle
comparison mx/sanitycheck.py @ 7685:7d66682cc901
Merge.
author | Christian Haeubl <haeubl@ssw.jku.at> |
---|---|
date | Fri, 01 Feb 2013 17:06:26 +0100 |
parents | 641a4c6ac1ce |
children | 01aeaf194641 |
comparison
equal
deleted
inserted
replaced
7684:bbf97d6688d3 | 7685:7d66682cc901 |
---|---|
21 # or visit www.oracle.com if you need additional information or have any | 21 # or visit www.oracle.com if you need additional information or have any |
22 # questions. | 22 # questions. |
23 # | 23 # |
24 # ---------------------------------------------------------------------------------------------------- | 24 # ---------------------------------------------------------------------------------------------------- |
25 | 25 |
26 from outputparser import OutputParser, Matcher | 26 from outputparser import OutputParser, ValuesMatcher |
27 import re | 27 import re, mx, commands, os, sys, StringIO, subprocess |
28 import mx | |
29 import os | |
30 from os.path import isfile, join, exists | 28 from os.path import isfile, join, exists |
31 | 29 |
32 dacapoSanityWarmup = { | 30 dacapoSanityWarmup = { |
33 'avrora': [0, 0, 3, 6, 13], | 31 'avrora': [0, 0, 3, 6, 13], |
34 'batik': [0, 0, 5, 5, 20], | 32 'batik': [0, 0, 5, 5, 20], |
101 | 99 |
102 specjbb2005 = mx.get_env('SPECJBB2005') | 100 specjbb2005 = mx.get_env('SPECJBB2005') |
103 if specjbb2005 is None or not exists(join(specjbb2005, 'jbb.jar')): | 101 if specjbb2005 is None or not exists(join(specjbb2005, 'jbb.jar')): |
104 mx.abort('Please set the SPECJBB2005 environment variable to a SPECjbb2005 directory') | 102 mx.abort('Please set the SPECJBB2005 environment variable to a SPECjbb2005 directory') |
105 | 103 |
106 score = re.compile(r"^Valid run, Score is (?P<score>[0-9]+)$") | 104 score = re.compile(r"^Valid run, Score is (?P<score>[0-9]+)$", re.MULTILINE) |
107 error = re.compile(r"VALIDATION ERROR") | 105 error = re.compile(r"VALIDATION ERROR") |
108 success = re.compile(r"^Valid run, Score is [0-9]+$") | 106 success = re.compile(r"^Valid run, Score is [0-9]+$", re.MULTILINE) |
109 matcher = Matcher(score, {'const:group' : "const:SPECjbb2005", 'const:name' : 'const:score', 'const:score' : 'score'}) | 107 matcher = ValuesMatcher(score, {'group' : 'SPECjbb2005', 'name' : 'score', 'score' : '<score>'}) |
110 classpath = ['jbb.jar', 'check.jar'] | 108 classpath = ['jbb.jar', 'check.jar'] |
111 return Test("SPECjbb2005", ['spec.jbb.JBBmain', '-propfile', 'SPECjbb.props'] + benchArgs, [success], [error], [matcher], vmOpts=['-Xms3g', '-XX:+UseSerialGC', '-XX:-UseCompressedOops', '-cp', os.pathsep.join(classpath)], defaultCwd=specjbb2005) | 109 return Test("SPECjbb2005", ['spec.jbb.JBBmain', '-propfile', 'SPECjbb.props'] + benchArgs, [success], [error], [matcher], vmOpts=['-Xms3g', '-XX:+UseSerialGC', '-XX:-UseCompressedOops', '-cp', os.pathsep.join(classpath)], defaultCwd=specjbb2005) |
110 | |
111 def getSPECjbb2013(benchArgs = []): | |
112 | |
113 specjbb2013 = mx.get_env('SPECJBB2013') | |
114 if specjbb2013 is None or not exists(join(specjbb2013, 'specjbb2013.jar')): | |
115 mx.abort('Please set the SPECJBB2013 environment variable to a SPECjbb2013 directory') | |
116 | |
117 jops = re.compile(r"^RUN RESULT: hbIR \(max attempted\) = [0-9]+, hbIR \(settled\) = [0-9]+, max-jOPS = (?P<max>[0-9]+), critical-jOPS = (?P<critical>[0-9]+)$", re.MULTILINE) | |
118 #error? | |
119 success = re.compile(r"org.spec.jbb.controller: Run finished", re.MULTILINE) | |
120 matcherMax = ValuesMatcher(jops, {'group' : 'SPECjbb2013', 'name' : 'max', 'score' : '<max>'}) | |
121 matcherCritical = ValuesMatcher(jops, {'group' : 'SPECjbb2013', 'name' : 'critical', 'score' : '<critical>'}) | |
122 return Test("SPECjbb2013", ['-jar', 'specjbb2013.jar', '-m', 'composite'] + benchArgs, [success], [], [matcherCritical, matcherMax], vmOpts=['-Xms7g', '-XX:+UseSerialGC', '-XX:-UseCompressedOops', '-XX:CompileCommand=exclude,*.FastMath::slowLog'], defaultCwd=specjbb2013) | |
112 | 123 |
113 def getSPECjvm2008(benchArgs = [], skipCheck=False, skipKitValidation=False, warmupTime=None, iterationTime=None): | 124 def getSPECjvm2008(benchArgs = [], skipCheck=False, skipKitValidation=False, warmupTime=None, iterationTime=None): |
114 | 125 |
115 specjvm2008 = mx.get_env('SPECJVM2008') | 126 specjvm2008 = mx.get_env('SPECJVM2008') |
116 if specjvm2008 is None or not exists(join(specjvm2008, 'SPECjvm2008.jar')): | 127 if specjvm2008 is None or not exists(join(specjvm2008, 'SPECjvm2008.jar')): |
117 mx.abort('Please set the SPECJVM2008 environment variable to a SPECjvm2008 directory') | 128 mx.abort('Please set the SPECJVM2008 environment variable to a SPECjvm2008 directory') |
118 | 129 |
119 score = re.compile(r"^(Score on|Noncompliant) (?P<benchmark>[a-zA-Z0-9\._]+)( result)?: (?P<score>[0-9]+((,|\.)[0-9]+)?)( SPECjvm2008 Base)? ops/m$") | 130 score = re.compile(r"^(Score on|Noncompliant) (?P<benchmark>[a-zA-Z0-9\._]+)( result)?: (?P<score>[0-9]+((,|\.)[0-9]+)?)( SPECjvm2008 Base)? ops/m$", re.MULTILINE) |
120 error = re.compile(r"^Errors in benchmark: ") | 131 error = re.compile(r"^Errors in benchmark: ", re.MULTILINE) |
121 # The ' ops/m' at the end of the success string is important : it's how you can tell valid and invalid runs apart | 132 # The ' ops/m' at the end of the success string is important : it's how you can tell valid and invalid runs apart |
122 success = re.compile(r"^(Noncompliant c|C)omposite result: [0-9]+((,|\.)[0-9]+)?( SPECjvm2008 (Base|Peak))? ops/m$") | 133 success = re.compile(r"^(Noncompliant c|C)omposite result: [0-9]+((,|\.)[0-9]+)?( SPECjvm2008 (Base|Peak))? ops/m$", re.MULTILINE) |
123 matcher = Matcher(score, {'const:group' : "const:SPECjvm2008", 'const:name' : 'benchmark', 'const:score' : 'score'}, startNewLine=True) | 134 matcher = ValuesMatcher(score, {'group' : 'SPECjvm2008', 'name' : '<benchmark>', 'score' : '<score>'}) |
124 | 135 |
125 opts = [] | 136 opts = [] |
126 if warmupTime is not None: | 137 if warmupTime is not None: |
127 opts += ['-wt', str(warmupTime)] | 138 opts += ['-wt', str(warmupTime)] |
128 if iterationTime is not None: | 139 if iterationTime is not None: |
154 mx.abort('DaCapo 9.12 jar file must be specified with DACAPO_CP environment variable or as DACAPO library') | 165 mx.abort('DaCapo 9.12 jar file must be specified with DACAPO_CP environment variable or as DACAPO library') |
155 | 166 |
156 if not isfile(dacapo) or not dacapo.endswith('.jar'): | 167 if not isfile(dacapo) or not dacapo.endswith('.jar'): |
157 mx.abort('Specified DaCapo jar file does not exist or is not a jar file: ' + dacapo) | 168 mx.abort('Specified DaCapo jar file does not exist or is not a jar file: ' + dacapo) |
158 | 169 |
159 dacapoSuccess = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====$") | 170 dacapoSuccess = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====$", re.MULTILINE) |
160 dacapoFail = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) FAILED (warmup|) =====$") | 171 dacapoFail = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) FAILED (warmup|) =====$", re.MULTILINE) |
161 dacapoTime = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====") | 172 dacapoTime = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====") |
162 dacapoTime1 = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) completed warmup 1 in (?P<time>[0-9]+) msec =====") | 173 dacapoTime1 = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) completed warmup 1 in (?P<time>[0-9]+) msec =====") |
163 | 174 |
164 dacapoMatcher = Matcher(dacapoTime, {'const:group' : "const:DaCapo", 'const:name' : 'benchmark', 'const:score' : 'time'}, startNewLine=True) | 175 dacapoMatcher = ValuesMatcher(dacapoTime, {'group' : 'DaCapo', 'name' : '<benchmark>', 'score' : '<time>'}) |
165 dacapoMatcher1 = Matcher(dacapoTime1, {'const:group' : "const:DaCapo-1stRun", 'const:name' : 'benchmark', 'const:score' : 'time'}) | 176 dacapoMatcher1 = ValuesMatcher(dacapoTime1, {'group' : 'DaCapo-1stRun', 'name' : '<benchmark>', 'score' : '<time>'}) |
166 | 177 |
167 return Test("DaCapo-" + name, ['-jar', dacapo, name, '-n', str(n), ] + dacapoArgs, [dacapoSuccess], [dacapoFail], [dacapoMatcher, dacapoMatcher1], ['-Xms2g', '-XX:+UseSerialGC', '-XX:-UseCompressedOops']) | 178 return Test("DaCapo-" + name, ['-jar', dacapo, name, '-n', str(n), ] + dacapoArgs, [dacapoSuccess], [dacapoFail], [dacapoMatcher, dacapoMatcher1], ['-Xms2g', '-XX:+UseSerialGC', '-XX:-UseCompressedOops']) |
168 | 179 |
169 def getScalaDacapos(level=SanityCheckLevel.Normal, gateBuildLevel=None, dacapoArgs=[]): | 180 def getScalaDacapos(level=SanityCheckLevel.Normal, gateBuildLevel=None, dacapoArgs=[]): |
170 checks = [] | 181 checks = [] |
186 mx.abort('Scala DaCapo 0.1.0 jar file must be specified with DACAPO_SCALA_CP environment variable or as DACAPO_SCALA library') | 197 mx.abort('Scala DaCapo 0.1.0 jar file must be specified with DACAPO_SCALA_CP environment variable or as DACAPO_SCALA library') |
187 | 198 |
188 if not isfile(dacapo) or not dacapo.endswith('.jar'): | 199 if not isfile(dacapo) or not dacapo.endswith('.jar'): |
189 mx.abort('Specified Scala DaCapo jar file does not exist or is not a jar file: ' + dacapo) | 200 mx.abort('Specified Scala DaCapo jar file does not exist or is not a jar file: ' + dacapo) |
190 | 201 |
191 dacapoSuccess = re.compile(r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====$") | 202 dacapoSuccess = re.compile(r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====$", re.MULTILINE) |
192 dacapoFail = re.compile(r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) FAILED (warmup|) =====$") | 203 dacapoFail = re.compile(r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) FAILED (warmup|) =====$", re.MULTILINE) |
193 dacapoTime = re.compile(r"===== DaCapo 0\.1\.0(-SNAPSHOT)? (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====") | 204 dacapoTime = re.compile(r"===== DaCapo 0\.1\.0(-SNAPSHOT)? (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====") |
194 | 205 |
195 dacapoMatcher = Matcher(dacapoTime, {'const:group' : "const:Scala-DaCapo", 'const:name' : 'benchmark', 'const:score' : 'time'}) | 206 dacapoMatcher = ValuesMatcher(dacapoTime, {'group' : "Scala-DaCapo", 'name' : '<benchmark>', 'score' : '<time>'}) |
196 | 207 |
197 return Test("Scala-DaCapo-" + name, ['-jar', dacapo, name, '-n', str(n), ] + dacapoArgs, [dacapoSuccess], [dacapoFail], [dacapoMatcher], ['-Xms2g', '-XX:+UseSerialGC', '-XX:-UseCompressedOops']) | 208 return Test("Scala-DaCapo-" + name, ['-jar', dacapo, name, '-n', str(n), ] + dacapoArgs, [dacapoSuccess], [dacapoFail], [dacapoMatcher], ['-Xms2g', '-XX:+UseSerialGC', '-XX:-UseCompressedOops']) |
198 | 209 |
199 def getBootstraps(): | 210 def getBootstraps(): |
200 time = re.compile(r"Bootstrapping Graal\.+ in (?P<time>[0-9]+) ms") | 211 time = re.compile(r"Bootstrapping Graal\.+ in (?P<time>[0-9]+) ms") |
201 scoreMatcher = Matcher(time, {'const:group' : 'const:Bootstrap', 'const:name' : 'const:BootstrapTime', 'const:score' : 'time'}) | 212 scoreMatcher = ValuesMatcher(time, {'group' : 'Bootstrap', 'name' : 'BootstrapTime', 'score' : '<time>'}) |
202 scoreMatcherBig = Matcher(time, {'const:group' : 'const:Bootstrap-bigHeap', 'const:name' : 'const:BootstrapTime', 'const:score' : 'time'}) | 213 scoreMatcherBig = ValuesMatcher(time, {'group' : 'Bootstrap-bigHeap', 'name' : 'BootstrapTime', 'score' : '<time>'}) |
203 | 214 |
204 tests = [] | 215 tests = [] |
205 tests.append(Test("Bootstrap", ['-version'], successREs=[time], scoreMatchers=[scoreMatcher], ingoreVms=['client', 'server'])) | 216 tests.append(Test("Bootstrap", ['-version'], successREs=[time], scoreMatchers=[scoreMatcher], ignoredVMs=['client', 'server'], benchmarkCompilationRate=False)) |
206 tests.append(Test("Bootstrap-bigHeap", ['-version'], successREs=[time], scoreMatchers=[scoreMatcherBig], vmOpts=['-Xms2g'], ingoreVms=['client', 'server'])) | 217 tests.append(Test("Bootstrap-bigHeap", ['-version'], successREs=[time], scoreMatchers=[scoreMatcherBig], vmOpts=['-Xms2g'], ignoredVMs=['client', 'server'], benchmarkCompilationRate=False)) |
207 return tests | 218 return tests |
219 | |
220 class Tee: | |
221 def __init__(self): | |
222 self.output = StringIO.StringIO() | |
223 def eat(self, line): | |
224 self.output.write(line) | |
225 sys.stdout.write(line) | |
208 | 226 |
209 """ | 227 """ |
210 Encapsulates a single program that is a sanity test and/or a benchmark. | 228 Encapsulates a single program that is a sanity test and/or a benchmark. |
211 """ | 229 """ |
212 class Test: | 230 class Test: |
213 def __init__(self, name, cmd, successREs=[], failureREs=[], scoreMatchers=[], vmOpts=[], defaultCwd=None, ingoreVms=[]): | 231 def __init__(self, name, cmd, successREs=[], failureREs=[], scoreMatchers=[], vmOpts=[], defaultCwd=None, ignoredVMs=[], benchmarkCompilationRate=True): |
232 | |
214 self.name = name | 233 self.name = name |
215 self.successREs = successREs | 234 self.successREs = successREs |
216 self.failureREs = failureREs + [re.compile(r"Exception occured in scope: ")] | 235 self.failureREs = failureREs + [re.compile(r"Exception occurred in scope: ")] |
217 self.scoreMatchers = scoreMatchers | 236 self.scoreMatchers = scoreMatchers |
218 self.vmOpts = vmOpts | 237 self.vmOpts = vmOpts |
219 self.cmd = cmd | 238 self.cmd = cmd |
220 self.defaultCwd = defaultCwd | 239 self.defaultCwd = defaultCwd |
221 self.ingoreVms = ingoreVms; | 240 self.ignoredVMs = ignoredVMs |
222 | 241 self.benchmarkCompilationRate = benchmarkCompilationRate |
242 if benchmarkCompilationRate: | |
243 self.vmOpts = self.vmOpts + ['-XX:+CITime'] | |
223 | 244 |
224 def __str__(self): | 245 def __str__(self): |
225 return self.name | 246 return self.name |
226 | 247 |
227 def test(self, vm, cwd=None, opts=[], vmbuild=None): | 248 def test(self, vm, cwd=None, opts=[], vmbuild=None): |
228 """ | 249 """ |
229 Run this program as a sanity test. | 250 Run this program as a sanity test. |
230 """ | 251 """ |
231 if (vm in self.ingoreVms): | 252 if (vm in self.ignoredVMs): |
232 return True; | 253 return True |
233 if cwd is None: | 254 if cwd is None: |
234 cwd = self.defaultCwd | 255 cwd = self.defaultCwd |
235 parser = OutputParser(nonZeroIsFatal = False) | 256 parser = OutputParser() |
236 jvmError = re.compile(r"(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)") | 257 jvmError = re.compile(r"(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)") |
237 parser.addMatcher(Matcher(jvmError, {'const:jvmError' : 'jvmerror'})) | 258 parser.addMatcher(ValuesMatcher(jvmError, {'jvmError' : '<jvmerror>'})) |
238 | 259 |
239 for successRE in self.successREs: | 260 for successRE in self.successREs: |
240 parser.addMatcher(Matcher(successRE, {'const:passed' : 'const:1'})) | 261 parser.addMatcher(ValuesMatcher(successRE, {'passed' : '1'})) |
241 for failureRE in self.failureREs: | 262 for failureRE in self.failureREs: |
242 parser.addMatcher(Matcher(failureRE, {'const:failed' : 'const:1'})) | 263 parser.addMatcher(ValuesMatcher(failureRE, {'failed' : '1'})) |
243 | 264 |
244 result = parser.parse(vm, self.vmOpts + opts + self.cmd, cwd, vmbuild) | 265 tee = Tee() |
245 | 266 retcode = commands.vm(self.vmOpts + opts + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, vmbuild=vmbuild) |
246 parsedLines = result['parsed'] | 267 output = tee.output.getvalue() |
247 if len(parsedLines) == 0: | 268 valueMaps = parser.parse(output) |
269 | |
270 if len(valueMaps) == 0: | |
248 return False | 271 return False |
249 | 272 |
250 assert len(parsedLines) == 1, 'Test matchers should not return more than one line' | 273 assert len(valueMaps) == 1, 'Test matchers should not return more than one record' |
251 | 274 |
252 parsed = parsedLines[0] | 275 record = valueMaps[0] |
253 | 276 |
254 if parsed.has_key('jvmError'): | 277 jvmErrorFile = record.get('jvmError') |
278 if jvmErrorFile: | |
255 mx.log('/!\\JVM Error : dumping error log...') | 279 mx.log('/!\\JVM Error : dumping error log...') |
256 f = open(parsed['jvmError'], 'rb'); | 280 with open(jvmErrorFile, 'rb') as fp: |
257 for line in iter(f.readline, ''): | 281 mx.log(fp.read()) |
258 mx.log(line.rstrip()) | 282 os.unlink(jvmErrorFile) |
259 f.close() | |
260 os.unlink(parsed['jvmError']) | |
261 return False | 283 return False |
262 | 284 |
263 if parsed.has_key('failed') and parsed['failed'] is '1': | 285 if record.get('failed') == '1': |
264 return False | 286 return False |
265 | 287 |
266 return result['retcode'] is 0 and parsed.has_key('passed') and parsed['passed'] is '1' | 288 return retcode == 0 and record.get('passed') == '1' |
267 | 289 |
268 def bench(self, vm, cwd=None, opts=[], vmbuild=None): | 290 def bench(self, vm, cwd=None, opts=[], vmbuild=None): |
269 """ | 291 """ |
270 Run this program as a benchmark. | 292 Run this program as a benchmark. |
271 """ | 293 """ |
272 if (vm in self.ingoreVms): | 294 if (vm in self.ignoredVMs): |
273 return {}; | 295 return {} |
274 if cwd is None: | 296 if cwd is None: |
275 cwd = self.defaultCwd | 297 cwd = self.defaultCwd |
276 parser = OutputParser(nonZeroIsFatal = False) | 298 parser = OutputParser() |
277 | 299 |
278 for successRE in self.successREs: | 300 for successRE in self.successREs: |
279 parser.addMatcher(Matcher(successRE, {'const:passed' : 'const:1'})) | 301 parser.addMatcher(ValuesMatcher(successRE, {'passed' : '1'})) |
280 for failureRE in self.failureREs: | 302 for failureRE in self.failureREs: |
281 parser.addMatcher(Matcher(failureRE, {'const:failed' : 'const:1'})) | 303 parser.addMatcher(ValuesMatcher(failureRE, {'failed' : '1'})) |
282 for scoreMatcher in self.scoreMatchers: | 304 for scoreMatcher in self.scoreMatchers: |
283 parser.addMatcher(scoreMatcher) | 305 parser.addMatcher(scoreMatcher) |
306 | |
307 if self.benchmarkCompilationRate: | |
308 if vm == 'graal': | |
309 bps = re.compile(r"ParsedBytecodesPerSecond@final: (?P<rate>[0-9]+)") | |
310 ibps = re.compile(r"InlinedBytecodesPerSecond@final: (?P<rate>[0-9]+)") | |
311 parser.addMatcher(ValuesMatcher(bps, {'group' : 'ParsedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'})) | |
312 parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'})) | |
313 else: | |
314 ibps = re.compile(r"(?P<compiler>[\w]+) compilation speed: +(?P<rate>[0-9]+) bytes/s {standard") | |
315 parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : '<compiler>:' + self.name, 'score' : '<rate>'})) | |
284 | 316 |
285 result = parser.parse(vm, self.vmOpts + opts + self.cmd, cwd, vmbuild) | 317 startDelim = 'START: ' + self.name |
286 if result['retcode'] is not 0: | 318 endDelim = 'END: ' + self.name |
287 mx.abort("Benchmark failed (non-zero retcode)") | 319 |
288 | 320 outputfile = os.environ.get('BENCH_OUTPUT', None) |
289 parsed = result['parsed'] | 321 if outputfile: |
290 | 322 # Used only to debug output parsing |
291 ret = {} | 323 with open(outputfile) as fp: |
292 | 324 output = fp.read() |
293 passed = False; | 325 start = output.find(startDelim) |
294 | 326 end = output.find(endDelim, start) |
295 for line in parsed: | 327 if start == -1 and end == -1: |
296 assert (line.has_key('name') and line.has_key('score') and line.has_key('group')) or line.has_key('passed') or line.has_key('failed') | 328 return {} |
297 if line.has_key('failed') and line['failed'] is '1': | 329 output = output[start + len(startDelim + os.linesep): end] |
330 mx.log(startDelim) | |
331 mx.log(output) | |
332 mx.log(endDelim) | |
333 else: | |
334 tee = Tee() | |
335 mx.log(startDelim) | |
336 if commands.vm(self.vmOpts + opts + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, vmbuild=vmbuild) != 0: | |
337 mx.abort("Benchmark failed (non-zero retcode)") | |
338 mx.log(endDelim) | |
339 output = tee.output.getvalue() | |
340 | |
341 groups = {} | |
342 passed = False | |
343 for valueMap in parser.parse(output): | |
344 assert (valueMap.has_key('name') and valueMap.has_key('score') and valueMap.has_key('group')) or valueMap.has_key('passed') or valueMap.has_key('failed'), valueMap | |
345 if valueMap.get('failed') == '1': | |
298 mx.abort("Benchmark failed") | 346 mx.abort("Benchmark failed") |
299 if line.has_key('passed') and line['passed'] is '1': | 347 if valueMap.get('passed') == '1': |
300 passed = True | 348 passed = True |
301 if line.has_key('name') and line.has_key('score') and line.has_key('group'): | 349 groupName = valueMap.get('group') |
302 if not ret.has_key(line['group']): | 350 if groupName: |
303 ret[line['group']] = {}; | 351 group = groups.setdefault(groupName, {}) |
304 ret[line['group']][line['name']] = line['score'] | 352 name = valueMap.get('name') |
353 score = valueMap.get('score') | |
354 if name and score: | |
355 group[name] = score | |
305 | 356 |
306 if not passed: | 357 if not passed: |
307 mx.abort("Benchmark failed (not passed)") | 358 mx.abort("Benchmark failed (not passed)") |
308 | 359 |
309 return ret | 360 return groups |