Mercurial > hg > graal-jvmci-8
comparison mx/sanitycheck.py @ 4219:47f7d91d34cf
Fix javac build issues with classpath
WIP for benchmarks
author | Gilles Duboscq <gilles.m.duboscq@gmail.com> |
---|---|
date | Wed, 04 Jan 2012 22:54:27 +0100 |
parents | f3271682fe5a |
children | 339cf8d4904d |
comparison
equal
deleted
inserted
replaced
4218:03eaec130ed1 | 4219:47f7d91d34cf |
---|---|
4 import os | 4 import os |
5 import commands | 5 import commands |
6 from os.path import isfile | 6 from os.path import isfile |
7 | 7 |
8 dacapoSanityWarmup = { | 8 dacapoSanityWarmup = { |
9 'avrora': [0, 0, 3, 6], | 9 'avrora': [0, 0, 3, 6, 10], |
10 'batik': [0 , 0, 5, 5], | 10 'batik': [0 , 0, 5, 5, 20], |
11 'eclipse': [2 , 4, 5, 10], | 11 'eclipse': [2 , 4, 5, 10, 13], |
12 'fop': [4 , 8, 10, 20], | 12 'fop': [4 , 8, 10, 20, 30], |
13 'h2': [0 , 0, 5, 5], | 13 'h2': [0 , 0, 5, 5, 5], |
14 'jython': [0 , 0, 5, 10], | 14 'jython': [0 , 0, 5, 10, 10], |
15 'luindex': [0 , 0, 5, 10], | 15 'luindex': [0 , 0, 5, 10, 10], |
16 'lusearch': [0 , 4, 5, 10], | 16 'lusearch': [0 , 4, 5, 5, 5], |
17 'pmd': [0 , 0, 5, 10], | 17 'pmd': [0 , 0, 5, 10, 10], |
18 'sunflow': [0 , 0, 5, 10], | 18 'sunflow': [0 , 0, 5, 10, 15], |
19 'tomcat': [0 , 0, 5, 10], | 19 'tomcat': [0 , 0, 5, 10, 10], |
20 'tradebeans': [0 , 0, 5, 10], | 20 'tradebeans': [0 , 0, 5, 10, 10], |
21 'tradesoap': [2 , 4, 5, 10], | 21 'tradesoap': [2 , 4, 5, 10, 10], |
22 'xalan': [0 , 0, 5, 10], | 22 'xalan': [0 , 0, 5, 10, 15], |
23 } | 23 } |
24 | 24 |
25 class SanityCheckLevel: | 25 class SanityCheckLevel: |
26 Fast, Gate, Normal, Extensive = range(4) | 26 Fast, Gate, Normal, Extensive, Benchmark = range(5) |
27 | |
28 def getSPECjvm2008(): | |
29 score = re.compile(r"^((Score on|Noncompliant) )?(?P<benchmark>[a-zA-Z0-9\.-]+)( result)?: (?P<score>[0-9]+,[0-9]+)( SPECjvm2008 Base)? ops/m$") | |
30 matcher = Matcher(score, {'const:name' : 'benchmark', 'const:score' : 'score'}) | |
27 | 31 |
28 def getDacapos(level=SanityCheckLevel.Normal, dacapoArgs=[]): | 32 def getDacapos(level=SanityCheckLevel.Normal, dacapoArgs=[]): |
29 checks = [] | 33 checks = [] |
30 | 34 |
31 for (bench, ns) in dacapoSanityWarmup.items(): | 35 for (bench, ns) in dacapoSanityWarmup.items(): |
48 | 52 |
49 dacapoMatcher = Matcher(dacapoTime, {'const:name' : 'benchmark', 'const:score' : 'time'}) | 53 dacapoMatcher = Matcher(dacapoTime, {'const:name' : 'benchmark', 'const:score' : 'time'}) |
50 | 54 |
51 return Test("DaCapo-" + name, "DaCapo", ['-jar', dacapo, name, '-n', str(n), ] + dacapoArgs, [dacapoSuccess], [dacapoFail], [dacapoMatcher], ['-Xms1g', '-Xmx2g', '-XX:MaxPermSize=256m']) | 55 return Test("DaCapo-" + name, "DaCapo", ['-jar', dacapo, name, '-n', str(n), ] + dacapoArgs, [dacapoSuccess], [dacapoFail], [dacapoMatcher], ['-Xms1g', '-Xmx2g', '-XX:MaxPermSize=256m']) |
52 | 56 |
57 def getBootstraps(): | |
58 time = re.compile(r"Bootstrapping Graal............... in (?P<time>[0-9]+) ms") | |
59 scoreMatcher = Matcher(time, {'const:name' : 'const:BootstrapTime', 'const:score' : 'time'}) | |
60 tests = [] | |
61 tests.append(Test("Bootstrap", "Bootstrap", ['-version'], succesREs=[time], scoreMatchers=[scoreMatcher])) | |
62 tests.append(Test("Bootstrap", "Bootstrap-bigHeap", ['-version'], succesREs=[time], scoreMatchers=[scoreMatcher], vmOpts=['-Xms2g'])) | |
63 return tests | |
64 | |
53 class Test: | 65 class Test: |
54 def __init__(self, name, group, cmd, succesREs=[], failureREs=[], scoreMatchers=[], vmOpts=[]): | 66 def __init__(self, name, group, cmd, succesREs=[], failureREs=[], scoreMatchers=[], vmOpts=[]): |
55 self.name = name | 67 self.name = name |
56 self.group = group | 68 self.group = group |
57 self.succesREs = succesREs | 69 self.succesREs = succesREs |
58 self.failureREs = failureREs | 70 self.failureREs = failureREs |
59 self.scoreMatchers = scoreMatchers | 71 self.scoreMatchers = scoreMatchers |
60 self.vmOpts = vmOpts | 72 self.vmOpts = vmOpts |
61 self.cmd = cmd | 73 self.cmd = cmd |
62 | 74 |
63 def test(self, vm, cwd=None, opts=[]): | 75 def test(self, vm, cwd=None, opts=[], vmbuild=None): |
64 parser = OutputParser(nonZeroIsFatal = False) | 76 parser = OutputParser(nonZeroIsFatal = False) |
65 jvmError = re.compile(r"(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)") | 77 jvmError = re.compile(r"(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)") |
66 parser.addMatcher(Matcher(jvmError, {'const:jvmError' : 'jvmerror'})) | 78 parser.addMatcher(Matcher(jvmError, {'const:jvmError' : 'jvmerror'})) |
67 | 79 |
68 for succesRE in self.succesREs: | 80 for succesRE in self.succesREs: |
69 parser.addMatcher(Matcher(succesRE, {'const:passed' : 'const:1'})) | 81 parser.addMatcher(Matcher(succesRE, {'const:passed' : 'const:1'})) |
70 for failureRE in self.failureREs: | 82 for failureRE in self.failureREs: |
71 parser.addMatcher(Matcher(failureRE, {'const:failed' : 'const:1'})) | 83 parser.addMatcher(Matcher(failureRE, {'const:failed' : 'const:1'})) |
72 | 84 |
73 result = parser.parse(vm, self.vmOpts + opts + self.cmd, cwd) | 85 result = parser.parse(vm, self.vmOpts + opts + self.cmd, cwd, vmbuild) |
74 | 86 |
75 parsedLines = result['parsed'] | 87 parsedLines = result['parsed'] |
76 assert len(parsedLines) == 1, 'Test matchers should not return more than one line' | 88 assert len(parsedLines) == 1, 'Test matchers should not return more than one line' |
77 | 89 |
78 parsed = parsedLines[0] | 90 parsed = parsedLines[0] |
89 if parsed.has_key('failed') and parsed['failed'] is 1: | 101 if parsed.has_key('failed') and parsed['failed'] is 1: |
90 return False | 102 return False |
91 | 103 |
92 return result['retcode'] is 0 and parsed.has_key('passed') and parsed['passed'] is '1' | 104 return result['retcode'] is 0 and parsed.has_key('passed') and parsed['passed'] is '1' |
93 | 105 |
94 def bench(self, vm, cwd=None, opts=[]): | 106 def bench(self, vm, cwd=None, opts=[], vmbuild=None): |
95 parser = OutputParser(nonZeroIsFatal = False) | 107 parser = OutputParser(nonZeroIsFatal = False) |
96 | 108 |
97 for scoreMatcher in self.scoreMatchers: | 109 for scoreMatcher in self.scoreMatchers: |
98 parser.addMatcher(scoreMatcher) | 110 parser.addMatcher(scoreMatcher) |
99 | 111 |
100 result = parser.parse(vm, self.vmOpts + opts + self.cmd, cwd) | 112 result = parser.parse(vm, self.vmOpts + opts + self.cmd, cwd, vmbuild) |
101 if result['retcode'] is not 0: | 113 if result['retcode'] is not 0: |
102 return {} | 114 return {} |
103 | 115 |
104 parsed = result['parsed'] | 116 parsed = result['parsed'] |
105 | 117 |