comparison mx.graal/sanitycheck.py @ 22017:66dd063eb6a0

renamed mx/ to mx.graal/ in preparation for working with mxtool2
author Doug Simon <doug.simon@oracle.com>
date Wed, 17 Jun 2015 13:56:55 +0200
parents mx/sanitycheck.py@be896a1983c0
children d5a51a47eb1b
comparison
equal deleted inserted replaced
22016:f2cf8824040b 22017:66dd063eb6a0
1 # ----------------------------------------------------------------------------------------------------
2 #
3 # Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
4 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 #
6 # This code is free software; you can redistribute it and/or modify it
7 # under the terms of the GNU General Public License version 2 only, as
8 # published by the Free Software Foundation.
9 #
10 # This code is distributed in the hope that it will be useful, but WITHOUT
11 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 # version 2 for more details (a copy is included in the LICENSE file that
14 # accompanied this code).
15 #
16 # You should have received a copy of the GNU General Public License version
17 # 2 along with this work; if not, write to the Free Software Foundation,
18 # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 #
20 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 # or visit www.oracle.com if you need additional information or have any
22 # questions.
23 #
24 # ----------------------------------------------------------------------------------------------------
25
26 from outputparser import OutputParser, ValuesMatcher
27 import re, mx, mx_graal, os, sys, StringIO, subprocess
28 from os.path import isfile, join, exists
29
30 gc = 'UseSerialGC'
31
32 dacapoSanityWarmup = {
33 'avrora': [0, 0, 3, 6, 13],
34 'batik': [0, 0, 5, 5, 20],
35 'eclipse': [0, 0, 0, 0, 0],
36 'fop': [4, 8, 10, 20, 30],
37 'h2': [0, 0, 5, 5, 8],
38 'jython': [0, 0, 5, 10, 13],
39 'luindex': [0, 0, 5, 10, 10],
40 'lusearch': [0, 4, 5, 5, 8],
41 'pmd': [0, 0, 5, 10, 13],
42 'sunflow': [0, 2, 5, 10, 15],
43 'tomcat': [0, 0, 5, 10, 15],
44 'tradebeans': [0, 0, 5, 10, 13],
45 'tradesoap': [0, 0, 5, 10, 15],
46 'xalan': [0, 0, 5, 10, 18],
47 }
48
49 dacapoScalaSanityWarmup = {
50 'actors': [0, 0, 2, 5, 5],
51 'apparat': [0, 0, 2, 5, 5],
52 'factorie': [0, 0, 2, 5, 5],
53 'kiama': [0, 4, 3, 13, 15],
54 'scalac': [0, 0, 5, 15, 20],
55 'scaladoc': [0, 0, 5, 15, 15],
56 'scalap': [0, 0, 5, 15, 20],
57 'scalariform':[0, 0, 6, 15, 20],
58 'scalatest': [0, 0, 2, 10, 12],
59 'scalaxb': [0, 0, 5, 15, 25],
60 # (gdub) specs sometimes returns a non-zero value event though there is no apparent failure
61 'specs': [0, 0, 0, 0, 0],
62 'tmt': [0, 0, 3, 10, 12]
63 }
64
65 dacapoGateBuildLevels = {
66 'avrora': ['product', 'fastdebug', 'debug'],
67 'batik': ['product', 'fastdebug', 'debug'],
68 # (lewurm): does not work with JDK8
69 'eclipse': [],
70 'fop': ['fastdebug', 'debug'],
71 'h2': ['product', 'fastdebug', 'debug'],
72 'jython': ['product', 'fastdebug', 'debug'],
73 'luindex': ['product', 'fastdebug', 'debug'],
74 'lusearch': ['product'],
75 'pmd': ['product', 'fastdebug', 'debug'],
76 'sunflow': ['fastdebug', 'debug'],
77 'tomcat': ['product', 'fastdebug', 'debug'],
78 'tradebeans': ['product', 'fastdebug', 'debug'],
79 # tradesoap is too unreliable for the gate, often crashing with concurrency problems:
80 # http://sourceforge.net/p/dacapobench/bugs/99/
81 'tradesoap': [],
82 'xalan': ['product', 'fastdebug', 'debug'],
83 }
84
85 dacapoScalaGateBuildLevels = {
86 'actors': ['product', 'fastdebug', 'debug'],
87 'apparat': ['product', 'fastdebug', 'debug'],
88 'factorie': ['product', 'fastdebug', 'debug'],
89 'kiama': ['fastdebug', 'debug'],
90 'scalac': ['product', 'fastdebug', 'debug'],
91 'scaladoc': ['product', 'fastdebug', 'debug'],
92 'scalap': ['product', 'fastdebug', 'debug'],
93 'scalariform':['product', 'fastdebug', 'debug'],
94 'scalatest': ['product', 'fastdebug', 'debug'],
95 'scalaxb': ['product', 'fastdebug', 'debug'],
96 'specs': ['product', 'fastdebug', 'debug'],
97 'tmt': ['product', 'fastdebug', 'debug'],
98 }
99
100 specjvm2008Names = [
101 'startup.helloworld',
102 'startup.compiler.compiler',
103 'startup.compiler.sunflow',
104 'startup.compress',
105 'startup.crypto.aes',
106 'startup.crypto.rsa',
107 'startup.crypto.signverify',
108 'startup.mpegaudio',
109 'startup.scimark.fft',
110 'startup.scimark.lu',
111 'startup.scimark.monte_carlo',
112 'startup.scimark.sor',
113 'startup.scimark.sparse',
114 'startup.serial',
115 'startup.sunflow',
116 'startup.xml.transform',
117 'startup.xml.validation',
118 'compiler.compiler',
119 'compiler.sunflow',
120 'compress',
121 'crypto.aes',
122 'crypto.rsa',
123 'crypto.signverify',
124 'derby',
125 'mpegaudio',
126 'scimark.fft.large',
127 'scimark.lu.large',
128 'scimark.sor.large',
129 'scimark.sparse.large',
130 'scimark.fft.small',
131 'scimark.lu.small',
132 'scimark.sor.small',
133 'scimark.sparse.small',
134 'scimark.monte_carlo',
135 'serial',
136 'sunflow',
137 'xml.transform',
138 'xml.validation'
139 ]
140
141 def _noneAsEmptyList(a):
142 if a is None:
143 return []
144 return a
145
146 class SanityCheckLevel:
147 Fast, Gate, Normal, Extensive, Benchmark = range(5)
148
149 def getSPECjbb2005(benchArgs=None):
150 benchArgs = [] if benchArgs is None else benchArgs
151
152 specjbb2005 = mx.get_env('SPECJBB2005')
153 if specjbb2005 is None or not exists(join(specjbb2005, 'jbb.jar')):
154 mx.abort('Please set the SPECJBB2005 environment variable to a SPECjbb2005 directory')
155
156 score = re.compile(r"^Valid run, Score is (?P<score>[0-9]+)$", re.MULTILINE)
157 error = re.compile(r"VALIDATION ERROR")
158 success = re.compile(r"^Valid run, Score is [0-9]+$", re.MULTILINE)
159 matcher = ValuesMatcher(score, {'group' : 'SPECjbb2005', 'name' : 'score', 'score' : '<score>'})
160 classpath = ['jbb.jar', 'check.jar']
161 return Test("SPECjbb2005", ['spec.jbb.JBBmain', '-propfile', 'SPECjbb.props'] + benchArgs, [success], [error], [matcher], vmOpts=['-Xms3g', '-XX:+' + gc, '-XX:-UseCompressedOops', '-cp', os.pathsep.join(classpath)], defaultCwd=specjbb2005)
162
163 def getSPECjbb2013(benchArgs=None):
164
165 specjbb2013 = mx.get_env('SPECJBB2013')
166 if specjbb2013 is None or not exists(join(specjbb2013, 'specjbb2013.jar')):
167 mx.abort('Please set the SPECJBB2013 environment variable to a SPECjbb2013 directory')
168
169 jops = re.compile(r"^RUN RESULT: hbIR \(max attempted\) = [0-9]+, hbIR \(settled\) = [0-9]+, max-jOPS = (?P<max>[0-9]+), critical-jOPS = (?P<critical>[0-9]+)$", re.MULTILINE)
170 # error?
171 success = re.compile(r"org.spec.jbb.controller: Run finished", re.MULTILINE)
172 matcherMax = ValuesMatcher(jops, {'group' : 'SPECjbb2013', 'name' : 'max', 'score' : '<max>'})
173 matcherCritical = ValuesMatcher(jops, {'group' : 'SPECjbb2013', 'name' : 'critical', 'score' : '<critical>'})
174 return Test("SPECjbb2013", ['-jar', 'specjbb2013.jar', '-m', 'composite'] +
175 _noneAsEmptyList(benchArgs), [success], [], [matcherCritical, matcherMax],
176 vmOpts=['-Xmx6g', '-Xms6g', '-Xmn3g', '-XX:+UseParallelOldGC', '-XX:-UseAdaptiveSizePolicy', '-XX:-UseBiasedLocking', '-XX:-UseCompressedOops'], defaultCwd=specjbb2013)
177
178 def getSPECjvm2008(benchArgs=None):
179
180 specjvm2008 = mx.get_env('SPECJVM2008')
181 if specjvm2008 is None or not exists(join(specjvm2008, 'SPECjvm2008.jar')):
182 mx.abort('Please set the SPECJVM2008 environment variable to a SPECjvm2008 directory')
183
184 score = re.compile(r"^(Score on|Noncompliant) (?P<benchmark>[a-zA-Z0-9\._]+)( result)?: (?P<score>[0-9]+((,|\.)[0-9]+)?)( SPECjvm2008 Base)? ops/m$", re.MULTILINE)
185 error = re.compile(r"^Errors in benchmark: ", re.MULTILINE)
186 # The ' ops/m' at the end of the success string is important : it's how you can tell valid and invalid runs apart
187 success = re.compile(r"^(Noncompliant c|C)omposite result: [0-9]+((,|\.)[0-9]+)?( SPECjvm2008 (Base|Peak))? ops/m$", re.MULTILINE)
188 matcher = ValuesMatcher(score, {'group' : 'SPECjvm2008', 'name' : '<benchmark>', 'score' : '<score>'})
189
190 return Test("SPECjvm2008", ['-jar', 'SPECjvm2008.jar'] + _noneAsEmptyList(benchArgs), [success], [error], [matcher], vmOpts=['-Xms3g', '-XX:+' + gc, '-XX:-UseCompressedOops'], defaultCwd=specjvm2008)
191
192 def getDacapos(level=SanityCheckLevel.Normal, gateBuildLevel=None, dacapoArgs=None):
193 checks = []
194
195 for (bench, ns) in dacapoSanityWarmup.items():
196 if ns[level] > 0:
197 if gateBuildLevel is None or gateBuildLevel in dacapoGateBuildLevels[bench]:
198 checks.append(getDacapo(bench, ['-n', str(ns[level])] + _noneAsEmptyList(dacapoArgs)))
199
200 return checks
201
202 def getDacapo(name, dacapoArgs=None):
203 dacapo = mx.get_env('DACAPO_CP')
204 if dacapo is None:
205 l = mx.library('DACAPO', False)
206 if l is not None:
207 dacapo = l.get_path(True)
208 else:
209 mx.abort('DaCapo 9.12 jar file must be specified with DACAPO_CP environment variable or as DACAPO library')
210
211 if not isfile(dacapo) or not dacapo.endswith('.jar'):
212 mx.abort('Specified DaCapo jar file does not exist or is not a jar file: ' + dacapo)
213
214 dacapoSuccess = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====", re.MULTILINE)
215 dacapoFail = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) FAILED (warmup|) =====", re.MULTILINE)
216 dacapoTime = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====")
217 dacapoTime1 = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) completed warmup 1 in (?P<time>[0-9]+) msec =====")
218
219 dacapoMatcher = ValuesMatcher(dacapoTime, {'group' : 'DaCapo', 'name' : '<benchmark>', 'score' : '<time>'})
220 dacapoMatcher1 = ValuesMatcher(dacapoTime1, {'group' : 'DaCapo-1stRun', 'name' : '<benchmark>', 'score' : '<time>'})
221
222 # Use ipv4 stack for dacapos; tomcat+solaris+ipv6_interface fails (see also: JDK-8072384)
223 return Test("DaCapo-" + name, ['-jar', mx._cygpathU2W(dacapo), name] + _noneAsEmptyList(dacapoArgs), [dacapoSuccess], [dacapoFail], [dacapoMatcher, dacapoMatcher1], ['-Xms2g', '-XX:+' + gc, '-XX:-UseCompressedOops', "-Djava.net.preferIPv4Stack=true"])
224
225 def getScalaDacapos(level=SanityCheckLevel.Normal, gateBuildLevel=None, dacapoArgs=None):
226 checks = []
227
228 for (bench, ns) in dacapoScalaSanityWarmup.items():
229 if ns[level] > 0:
230 if gateBuildLevel is None or gateBuildLevel in dacapoScalaGateBuildLevels[bench]:
231 checks.append(getScalaDacapo(bench, ['-n', str(ns[level])] + _noneAsEmptyList(dacapoArgs)))
232
233 return checks
234
235 def getScalaDacapo(name, dacapoArgs=None):
236 dacapo = mx.get_env('DACAPO_SCALA_CP')
237 if dacapo is None:
238 l = mx.library('DACAPO_SCALA', False)
239 if l is not None:
240 dacapo = l.get_path(True)
241 else:
242 mx.abort('Scala DaCapo 0.1.0 jar file must be specified with DACAPO_SCALA_CP environment variable or as DACAPO_SCALA library')
243
244 if not isfile(dacapo) or not dacapo.endswith('.jar'):
245 mx.abort('Specified Scala DaCapo jar file does not exist or is not a jar file: ' + dacapo)
246
247 dacapoSuccess = re.compile(r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====", re.MULTILINE)
248 dacapoFail = re.compile(r"^===== DaCapo 0\.1\.0(-SNAPSHOT)? ([a-zA-Z0-9_]+) FAILED (warmup|) =====", re.MULTILINE)
249 dacapoTime = re.compile(r"===== DaCapo 0\.1\.0(-SNAPSHOT)? (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====")
250
251 dacapoMatcher = ValuesMatcher(dacapoTime, {'group' : "Scala-DaCapo", 'name' : '<benchmark>', 'score' : '<time>'})
252
253 return Test("Scala-DaCapo-" + name, ['-jar', mx._cygpathU2W(dacapo), name] + _noneAsEmptyList(dacapoArgs), [dacapoSuccess], [dacapoFail], [dacapoMatcher], ['-Xms2g', '-XX:+' + gc, '-XX:-UseCompressedOops'])
254
255 def getBootstraps():
256 time = re.compile(r"Bootstrapping Graal\.+ in (?P<time>[0-9]+) ms( \(compiled (?P<methods>[0-9]+) methods\))?")
257 scoreMatcher = ValuesMatcher(time, {'group' : 'Bootstrap', 'name' : 'BootstrapTime', 'score' : '<time>'})
258 methodMatcher = ValuesMatcher(time, {'group' : 'Bootstrap', 'name' : 'BootstrapMethods', 'score' : '<methods>'})
259 scoreMatcherBig = ValuesMatcher(time, {'group' : 'Bootstrap-bigHeap', 'name' : 'BootstrapTime', 'score' : '<time>'})
260 methodMatcherBig = ValuesMatcher(time, {'group' : 'Bootstrap-bigHeap', 'name' : 'BootstrapMethods', 'score' : '<methods>'})
261
262 tests = []
263 tests.append(Test("Bootstrap", ['-version'], successREs=[time], scoreMatchers=[scoreMatcher, methodMatcher], ignoredVMs=['client', 'server'], benchmarkCompilationRate=False))
264 tests.append(Test("Bootstrap-bigHeap", ['-version'], successREs=[time], scoreMatchers=[scoreMatcherBig, methodMatcherBig], vmOpts=['-Xms2g'], ignoredVMs=['client', 'server'], benchmarkCompilationRate=False))
265 return tests
266
267 class CTWMode:
268 Full, NoInline = range(2)
269
270 def getCTW(vm, mode):
271 time = re.compile(r"CompileTheWorld : Done \([0-9]+ classes, [0-9]+ methods, (?P<time>[0-9]+) ms\)")
272 scoreMatcher = ValuesMatcher(time, {'group' : 'CompileTheWorld', 'name' : 'CompileTime', 'score' : '<time>'})
273
274 jre = os.environ.get('JAVA_HOME')
275 if exists(join(jre, 'jre')):
276 jre = join(jre, 'jre')
277 rtjar = join(jre, 'lib', 'rt.jar')
278
279
280 args = ['-XX:+CompileTheWorld', '-Xbootclasspath/p:' + rtjar]
281 if vm == 'jvmci':
282 args += ['-XX:+BootstrapGraal']
283 if mode >= CTWMode.NoInline:
284 if not mx_graal.isJVMCIEnabled(vm):
285 args.append('-XX:-Inline')
286 else:
287 args.append('-G:CompileTheWordConfig=-Inline')
288
289 return Test("CompileTheWorld", args, successREs=[time], scoreMatchers=[scoreMatcher], benchmarkCompilationRate=False)
290
291
292 class Tee:
293 def __init__(self):
294 self.output = StringIO.StringIO()
295 def eat(self, line):
296 self.output.write(line)
297 sys.stdout.write(line)
298
299 """
300 Encapsulates a single program that is a sanity test and/or a benchmark.
301 """
302 class Test:
303 def __init__(self, name, cmd, successREs=None, failureREs=None, scoreMatchers=None, vmOpts=None, defaultCwd=None, ignoredVMs=None, benchmarkCompilationRate=False):
304
305 self.name = name
306 self.successREs = _noneAsEmptyList(successREs)
307 self.failureREs = _noneAsEmptyList(failureREs) + [re.compile(r"Exception occurred in scope: ")]
308 self.scoreMatchers = _noneAsEmptyList(scoreMatchers)
309 self.vmOpts = _noneAsEmptyList(vmOpts)
310 self.cmd = cmd
311 self.defaultCwd = defaultCwd
312 self.ignoredVMs = _noneAsEmptyList(ignoredVMs)
313 self.benchmarkCompilationRate = benchmarkCompilationRate
314 if benchmarkCompilationRate:
315 self.vmOpts = self.vmOpts + ['-XX:+CITime']
316
317 def __str__(self):
318 return self.name
319
320 def test(self, vm, cwd=None, extraVmOpts=None, vmbuild=None):
321 """
322 Run this program as a sanity test.
323 """
324 if vm in self.ignoredVMs:
325 return True
326 if cwd is None:
327 cwd = self.defaultCwd
328 parser = OutputParser()
329 jvmError = re.compile(r"(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)")
330 parser.addMatcher(ValuesMatcher(jvmError, {'jvmError' : '<jvmerror>'}))
331
332 for successRE in self.successREs:
333 parser.addMatcher(ValuesMatcher(successRE, {'passed' : '1'}))
334 for failureRE in self.failureREs:
335 parser.addMatcher(ValuesMatcher(failureRE, {'failed' : '1'}))
336
337 tee = Tee()
338 retcode = mx_graal.vm(self.vmOpts + _noneAsEmptyList(extraVmOpts) + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, vmbuild=vmbuild)
339 output = tee.output.getvalue()
340 valueMaps = parser.parse(output)
341
342 if len(valueMaps) == 0:
343 return False
344
345 record = {}
346 for valueMap in valueMaps:
347 for key, value in valueMap.items():
348 if record.has_key(key) and record[key] != value:
349 mx.abort('Inconsistant values returned by test machers : ' + str(valueMaps))
350 record[key] = value
351
352 jvmErrorFile = record.get('jvmError')
353 if jvmErrorFile:
354 mx.log('/!\\JVM Error : dumping error log...')
355 with open(jvmErrorFile, 'rb') as fp:
356 mx.log(fp.read())
357 os.unlink(jvmErrorFile)
358 return False
359
360 if record.get('failed') == '1':
361 return False
362
363 return retcode == 0 and record.get('passed') == '1'
364
365 def bench(self, vm, cwd=None, extraVmOpts=None, vmbuild=None):
366 """
367 Run this program as a benchmark.
368 """
369 if vm in self.ignoredVMs:
370 return {}
371 if cwd is None:
372 cwd = self.defaultCwd
373 parser = OutputParser()
374
375 for successRE in self.successREs:
376 parser.addMatcher(ValuesMatcher(successRE, {'passed' : '1'}))
377 for failureRE in self.failureREs:
378 parser.addMatcher(ValuesMatcher(failureRE, {'failed' : '1'}))
379 for scoreMatcher in self.scoreMatchers:
380 parser.addMatcher(scoreMatcher)
381
382 if self.benchmarkCompilationRate:
383 if vm == 'jvmci':
384 bps = re.compile(r"ParsedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
385 ibps = re.compile(r"InlinedBytecodesPerSecond@final: (?P<rate>[0-9]+)")
386 parser.addMatcher(ValuesMatcher(bps, {'group' : 'ParsedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'}))
387 parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : self.name, 'score' : '<rate>'}))
388 else:
389 ibps = re.compile(r"(?P<compiler>[\w]+) compilation speed: +(?P<rate>[0-9]+) bytes/s {standard")
390 parser.addMatcher(ValuesMatcher(ibps, {'group' : 'InlinedBytecodesPerSecond', 'name' : '<compiler>:' + self.name, 'score' : '<rate>'}))
391
392 startDelim = 'START: ' + self.name
393 endDelim = 'END: ' + self.name
394
395 outputfile = os.environ.get('BENCH_OUTPUT', None)
396 if outputfile:
397 # Used only to debug output parsing
398 with open(outputfile) as fp:
399 output = fp.read()
400 start = output.find(startDelim)
401 end = output.find(endDelim, start)
402 if start == -1 and end == -1:
403 return {}
404 output = output[start + len(startDelim + os.linesep): end]
405 mx.log(startDelim)
406 mx.log(output)
407 mx.log(endDelim)
408 else:
409 tee = Tee()
410 mx.log(startDelim)
411 if mx_graal.vm(self.vmOpts + _noneAsEmptyList(extraVmOpts) + self.cmd, vm, nonZeroIsFatal=False, out=tee.eat, err=subprocess.STDOUT, cwd=cwd, vmbuild=vmbuild) != 0:
412 mx.abort("Benchmark failed (non-zero retcode)")
413 mx.log(endDelim)
414 output = tee.output.getvalue()
415
416 groups = {}
417 passed = False
418 for valueMap in parser.parse(output):
419 assert (valueMap.has_key('name') and valueMap.has_key('score') and valueMap.has_key('group')) or valueMap.has_key('passed') or valueMap.has_key('failed'), valueMap
420 if valueMap.get('failed') == '1':
421 mx.abort("Benchmark failed")
422 if valueMap.get('passed') == '1':
423 passed = True
424 groupName = valueMap.get('group')
425 if groupName:
426 group = groups.setdefault(groupName, {})
427 name = valueMap.get('name')
428 score = valueMap.get('score')
429 if name and score:
430 group[name] = score
431
432 if not passed:
433 mx.abort("Benchmark failed (not passed)")
434
435 return groups