Mercurial > hg > truffle
comparison mx/sanitycheck.py @ 4215:a2caa019ba3a
Fix mx : commands' scripts mx_init hook should be called before parsing command line arguments.
Fix mx : call the mx_post_parse_cmd_line hook from commands' scripts
OutputParser : cosmetic changes to logged output, return the retcode along yith the parsed output
Add a new Test class representing a sanity check and/or a benchmark
Port dacapo command to use this class, begning work on benchmarks
author | Gilles Duboscq <gilles.m.duboscq@gmail.com> |
---|---|
date | Wed, 04 Jan 2012 13:52:46 +0100 |
parents | c78bace5086a |
children | f3271682fe5a |
comparison
equal
deleted
inserted
replaced
4213:e4cfa571d8c4 | 4215:a2caa019ba3a |
---|---|
1 from outputparser import OutputParser, Matcher | 1 from outputparser import OutputParser, Matcher |
2 import re | 2 import re |
3 import mx | 3 import mx |
4 import os | 4 import os |
5 import commands | |
5 from os.path import isfile | 6 from os.path import isfile |
6 | |
7 dacapoVMOpts = ['-Xms1g', '-Xmx2g'] | |
8 | 7 |
9 dacapoSanityWarmup = { | 8 dacapoSanityWarmup = { |
10 'avrora': [0, 0, 3, 6], | 9 'avrora': [0, 0, 3, 6], |
11 'batik': [0 , 0, 5, 5], | 10 'batik': [0 , 0, 5, 5], |
12 'eclipse': [1 , 4, 5, 10], | 11 'eclipse': [2 , 4, 5, 10], |
13 'fop': [4 , 8, 10, 20], | 12 'fop': [4 , 8, 10, 20], |
14 'h2': [0 , 0, 5, 5], | 13 'h2': [0 , 0, 5, 5], |
15 'jython': [0 , 0, 5, 10], | 14 'jython': [0 , 0, 5, 10], |
16 'luindex': [0 , 0, 5, 10], | 15 'luindex': [0 , 0, 5, 10], |
17 'lusearch': [0 , 4, 5, 10], | 16 'lusearch': [0 , 4, 5, 10], |
18 'pmd': [0 , 0, 5, 10], | 17 'pmd': [0 , 0, 5, 10], |
19 'sunflow': [0 , 0, 5, 10], | 18 'sunflow': [0 , 0, 5, 10], |
20 'tomcat': [0 , 0, 5, 10], | 19 'tomcat': [0 , 0, 5, 10], |
21 'tradebeans': [0 , 0, 5, 10], | 20 'tradebeans': [0 , 0, 5, 10], |
22 'tradesoap': [0 , 4, 5, 10], | 21 'tradesoap': [2 , 4, 5, 10], |
23 'xalan': [0 , 0, 5, 10], | 22 'xalan': [0 , 0, 5, 10], |
24 } | 23 } |
25 | |
26 def getDacapoCmd(bench, vmOpts=dacapoVMOpts,n=5): | |
27 dacapo = mx.check_get_env('DACAPO_CP') | |
28 if not isfile(dacapo) or not dacapo.endswith('.jar'): | |
29 mx.abort('Specified DaCapo jar file does not exist or is not a jar file: ' + dacapo) | |
30 return vmOpts + ['-cp', dacapo, 'Harness', '-n', str(n), bench] | |
31 | 24 |
32 class SanityCheckLevel: | 25 class SanityCheckLevel: |
33 Fast, Gate, Normal, Extensive = range(4) | 26 Fast, Gate, Normal, Extensive = range(4) |
34 | 27 |
35 def getSanityChecks(level=SanityCheckLevel.Normal): | 28 def getDacapos(level=SanityCheckLevel.Normal, dacapoArgs=None): |
36 checks = [] | 29 checks = {} |
37 | 30 |
38 dacapoSuccess = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====$") | |
39 for (bench, ns) in dacapoSanityWarmup.items(): | 31 for (bench, ns) in dacapoSanityWarmup.items(): |
40 if ns[level] > 0: | 32 if ns[level] > 0: |
41 checks.append({'cmd' : getDacapoCmd(bench, vmOpts=dacapoVMOpts + ['-esa'], n=ns[level]), 'success' : dacapoSuccess}) | 33 checks[bench] = getDacapo(bench, ns[level], dacapoArgs) |
42 | |
43 bootstrapSuccess = re.compile(r"in [0-9]+ ms$"); | |
44 checks.append({'cmd' : ['-esa', '-version'], 'success' : bootstrapSuccess}) | |
45 | 34 |
46 return checks | 35 return checks |
47 | 36 |
48 def runSanityCheck(cmd, successRE, cwd=None): | 37 def getDacapo(name, n, dacapoArgs=None): |
49 parser = OutputParser(nonZeroIsFatal=False) | 38 dacapo = mx.get_env('DACAPO_CP') |
50 jvmError = re.compile(r"\b(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)\b") | 39 if dacapo is None: |
51 parser.addMatcher(Matcher(successRE, {'const:passed' : 'const:1'})) | 40 dacapo = commands._graal_home + r'/lib/dacapo-9.12-bach.jar' |
52 parser.addMatcher(Matcher(jvmError, {'const:jvmError' : 'jvmerror'})) | |
53 | 41 |
54 result = parser.parse(cmd, cwd) | 42 if not isfile(dacapo) or not dacapo.endswith('.jar'): |
55 assert len(result) == 1, 'Sanity check matchers should not return more than one line' | 43 mx.abort('Specified DaCapo jar file does not exist or is not a jar file: ' + dacapo) |
56 parsed = result[0] | |
57 | 44 |
58 if parsed.has_key('jvmError'): | 45 dacapoSuccess = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) PASSED in ([0-9]+) msec =====$") |
59 mx.log('JVM Error : dumping error log...') | 46 dacapoFail = re.compile(r"^===== DaCapo 9\.12 ([a-zA-Z0-9_]+) FAILED (warmup|) =====$") |
60 f = open(parsed['jvmError'], 'rb'); | 47 dacapoTime = re.compile(r"===== DaCapo 9\.12 (?P<benchmark>[a-zA-Z0-9_]+) PASSED in (?P<time>[0-9]+) msec =====") |
61 for line in iter(f.readline(), ''): | |
62 mx.log(line) | |
63 f.close() | |
64 os.unlink(parsed['jvmError']) | |
65 return False | |
66 return parsed.has_key('passed') | |
67 | 48 |
49 dacapoMatcher = Matcher(dacapoTime, {'const:name' : 'benchmark', 'const:score' : 'time'}) | |
50 | |
51 return Test("DaCapo-" + name, "DaCapo", ['-jar', dacapo, name, '-n', str(n), ] + dacapoArgs, [dacapoSuccess], [dacapoFail], dacapoMatcher, ['-Xms1g', '-Xmx2g', '-XX:MaxPermSize=256m']) | |
52 | |
53 class Test: | |
54 def __init__(self, name, group, cmd, succesREs=None, failureREs=None, scoreMatchers=None, vmOpts=None): | |
55 self.name = name | |
56 self.group = group | |
57 self.succesREs = succesREs | |
58 self.failureREs = failureREs | |
59 self.scoreMatchers = scoreMatchers | |
60 self.vmOpts = vmOpts | |
61 self.cmd = cmd | |
62 | |
63 def test(self, vm, cwd=None, opts=None): | |
64 parser = OutputParser(nonZeroIsFatal = False) | |
65 jvmError = re.compile(r"(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)") | |
66 parser.addMatcher(Matcher(jvmError, {'const:jvmError' : 'jvmerror'})) | |
67 | |
68 for succesRE in self.succesREs: | |
69 parser.addMatcher(Matcher(succesRE, {'const:passed' : 'const:1'})) | |
70 for failureRE in self.failureREs: | |
71 parser.addMatcher(Matcher(failureRE, {'const:failed' : 'const:1'})) | |
72 | |
73 result = parser.parse(vm, self.vmOpts + opts + self.cmd, cwd) | |
74 | |
75 parsedLines = result['parsed'] | |
76 assert len(parsedLines) == 1, 'Test matchers should not return more than one line' | |
77 | |
78 parsed = parsedLines[0] | |
79 | |
80 if parsed.has_key('jvmError'): | |
81 mx.log('/!\\JVM Error : dumping error log...') | |
82 f = open(parsed['jvmError'], 'rb'); | |
83 for line in iter(f.readline, ''): | |
84 mx.log(line.rstrip()) | |
85 f.close() | |
86 os.unlink(parsed['jvmError']) | |
87 return False | |
88 | |
89 if parsed.has_key('failed') and parsed['failed'] is 1: | |
90 return False | |
91 | |
92 return result['retcode'] is 0 and parsed.has_key('passed') and parsed['passed'] is '1' | |
93 | |
94 def bench(self, vm, cwd=None, opts=None): | |
95 parser = OutputParser(nonZeroIsFatal = False) | |
96 | |
97 for scoreMatcher in self.scoreMatchers: | |
98 parser.addMatcher(scoreMatcher) | |
99 | |
100 result = parser.parse(self.vmOpts + opts + self.cmd, vm, cwd) | |
101 | |
102 parsed = result['parsed'] | |
103 | |
104 ret = {} | |
105 | |
106 for line in parsed: | |
107 assert line.has_key('name') and line.has_key('score') | |
108 ret[line['name']] = line['score'] | |
109 | |
110 return ret | |
111 |