Mercurial > hg > graal-compiler
comparison mx.graal/mx_graal_bench.py @ 22952:1ba34f16d176
Factor out benchmarks from mx_graal_n.py and add specjbb2015
author | Tom Rodriguez <tom.rodriguez@oracle.com> |
---|---|
date | Wed, 04 Nov 2015 11:05:27 -0800 |
parents | |
children | debc2385369b |
comparison
equal
deleted
inserted
replaced
22951:d74202a599fe | 22952:1ba34f16d176 |
---|---|
1 # | |
2 # ---------------------------------------------------------------------------------------------------- | |
3 # | |
4 # Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved. | |
5 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
6 # | |
7 # This code is free software; you can redistribute it and/or modify it | |
8 # under the terms of the GNU General Public License version 2 only, as | |
9 # published by the Free Software Foundation. | |
10 # | |
11 # This code is distributed in the hope that it will be useful, but WITHOUT | |
12 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 # version 2 for more details (a copy is included in the LICENSE file that | |
15 # accompanied this code). | |
16 # | |
17 # You should have received a copy of the GNU General Public License version | |
18 # 2 along with this work; if not, write to the Free Software Foundation, | |
19 # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
20 # | |
21 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | |
22 # or visit www.oracle.com if you need additional information or have any | |
23 # questions. | |
24 # | |
25 # ---------------------------------------------------------------------------------------------------- | |
26 | |
27 import sanitycheck | |
28 import itertools | |
29 import json | |
30 | |
31 import mx | |
32 from mx_jvmci import run_vm | |
33 from mx_graal import get_vm | |
34 | |
35 def _run_benchmark(args, availableBenchmarks, runBenchmark): | |
36 | |
37 vmOpts, benchmarksAndOptions = mx.extract_VM_args(args, useDoubleDash=availableBenchmarks is None) | |
38 | |
39 if availableBenchmarks is None: | |
40 harnessArgs = benchmarksAndOptions | |
41 return runBenchmark(None, harnessArgs, vmOpts) | |
42 | |
43 if len(benchmarksAndOptions) == 0: | |
44 mx.abort('at least one benchmark name or "all" must be specified') | |
45 benchmarks = list(itertools.takewhile(lambda x: not x.startswith('-'), benchmarksAndOptions)) | |
46 harnessArgs = benchmarksAndOptions[len(benchmarks):] | |
47 | |
48 if 'all' in benchmarks: | |
49 benchmarks = availableBenchmarks | |
50 else: | |
51 for bm in benchmarks: | |
52 if bm not in availableBenchmarks: | |
53 mx.abort('unknown benchmark: ' + bm + '\nselect one of: ' + str(availableBenchmarks)) | |
54 | |
55 failed = [] | |
56 for bm in benchmarks: | |
57 if not runBenchmark(bm, harnessArgs, vmOpts): | |
58 failed.append(bm) | |
59 | |
60 if len(failed) != 0: | |
61 mx.abort('Benchmark failures: ' + str(failed)) | |
62 | |
63 def deoptalot(args): | |
64 """bootstrap a VM with DeoptimizeALot and VerifyOops on | |
65 | |
66 If the first argument is a number, the process will be repeated | |
67 this number of times. All other arguments are passed to the VM.""" | |
68 count = 1 | |
69 if len(args) > 0 and args[0].isdigit(): | |
70 count = int(args[0]) | |
71 del args[0] | |
72 | |
73 for _ in range(count): | |
74 if not run_vm(['-XX:-TieredCompilation', '-XX:+DeoptimizeALot', '-XX:+VerifyOops'] + args + ['-version']) == 0: | |
75 mx.abort("Failed") | |
76 | |
77 def longtests(args): | |
78 | |
79 deoptalot(['15', '-Xmx48m']) | |
80 | |
81 dacapo(['100', 'eclipse', '-esa']) | |
82 | |
83 def dacapo(args): | |
84 """run one or more DaCapo benchmarks""" | |
85 | |
86 def launcher(bm, harnessArgs, extraVmOpts): | |
87 return sanitycheck.getDacapo(bm, harnessArgs).test(get_vm(), extraVmOpts=extraVmOpts) | |
88 | |
89 _run_benchmark(args, sanitycheck.dacapoSanityWarmup.keys(), launcher) | |
90 | |
91 def scaladacapo(args): | |
92 """run one or more Scala DaCapo benchmarks""" | |
93 | |
94 def launcher(bm, harnessArgs, extraVmOpts): | |
95 return sanitycheck.getScalaDacapo(bm, harnessArgs).test(get_vm(), extraVmOpts=extraVmOpts) | |
96 | |
97 _run_benchmark(args, sanitycheck.dacapoScalaSanityWarmup.keys(), launcher) | |
98 | |
99 | |
100 """ | |
101 Extra benchmarks to run from 'bench()'. | |
102 """ | |
103 extraBenchmarks = [] | |
104 | |
105 def bench(args): | |
106 """run benchmarks and parse their output for results | |
107 | |
108 Results are JSON formated : {group : {benchmark : score}}.""" | |
109 resultFile = None | |
110 if '-resultfile' in args: | |
111 index = args.index('-resultfile') | |
112 if index + 1 < len(args): | |
113 resultFile = args[index + 1] | |
114 del args[index] | |
115 del args[index] | |
116 else: | |
117 mx.abort('-resultfile must be followed by a file name') | |
118 resultFileCSV = None | |
119 if '-resultfilecsv' in args: | |
120 index = args.index('-resultfilecsv') | |
121 if index + 1 < len(args): | |
122 resultFileCSV = args[index + 1] | |
123 del args[index] | |
124 del args[index] | |
125 else: | |
126 mx.abort('-resultfilecsv must be followed by a file name') | |
127 vm = get_vm() | |
128 if len(args) is 0: | |
129 args = ['all'] | |
130 | |
131 vmArgs = [arg for arg in args if arg.startswith('-')] | |
132 | |
133 def benchmarks_in_group(group): | |
134 prefix = group + ':' | |
135 return [a[len(prefix):] for a in args if a.startswith(prefix)] | |
136 | |
137 results = {} | |
138 benchmarks = [] | |
139 # DaCapo | |
140 if 'dacapo' in args or 'all' in args: | |
141 benchmarks += sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Benchmark) | |
142 else: | |
143 dacapos = benchmarks_in_group('dacapo') | |
144 for dacapo in dacapos: | |
145 if dacapo not in sanitycheck.dacapoSanityWarmup.keys(): | |
146 mx.abort('Unknown DaCapo : ' + dacapo) | |
147 iterations = sanitycheck.dacapoSanityWarmup[dacapo][sanitycheck.SanityCheckLevel.Benchmark] | |
148 if iterations > 0: | |
149 benchmarks += [sanitycheck.getDacapo(dacapo, ['-n', str(iterations)])] | |
150 | |
151 if 'scaladacapo' in args or 'all' in args: | |
152 benchmarks += sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Benchmark) | |
153 else: | |
154 scaladacapos = benchmarks_in_group('scaladacapo') | |
155 for scaladacapo in scaladacapos: | |
156 if scaladacapo not in sanitycheck.dacapoScalaSanityWarmup.keys(): | |
157 mx.abort('Unknown Scala DaCapo : ' + scaladacapo) | |
158 iterations = sanitycheck.dacapoScalaSanityWarmup[scaladacapo][sanitycheck.SanityCheckLevel.Benchmark] | |
159 if iterations > 0: | |
160 benchmarks += [sanitycheck.getScalaDacapo(scaladacapo, ['-n', str(iterations)])] | |
161 | |
162 # Bootstrap | |
163 if 'bootstrap' in args or 'all' in args: | |
164 benchmarks += sanitycheck.getBootstraps() | |
165 # SPECjvm2008 | |
166 if 'specjvm2008' in args or 'all' in args: | |
167 benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120'])] | |
168 else: | |
169 specjvms = benchmarks_in_group('specjvm2008') | |
170 for specjvm in specjvms: | |
171 benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120', specjvm])] | |
172 | |
173 if 'specjbb2005' in args or 'all' in args: | |
174 benchmarks += [sanitycheck.getSPECjbb2005()] | |
175 | |
176 if 'specjbb2013' in args: # or 'all' in args //currently not in default set | |
177 benchmarks += [sanitycheck.getSPECjbb2013()] | |
178 | |
179 if 'ctw-full' in args: | |
180 benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.Full)) | |
181 if 'ctw-noinline' in args: | |
182 benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoInline)) | |
183 | |
184 for f in extraBenchmarks: | |
185 f(args, vm, benchmarks) | |
186 | |
187 for test in benchmarks: | |
188 for (groupName, res) in test.bench(vm, extraVmOpts=vmArgs).items(): | |
189 group = results.setdefault(groupName, {}) | |
190 group.update(res) | |
191 mx.log(json.dumps(results)) | |
192 if resultFile: | |
193 with open(resultFile, 'w') as f: | |
194 f.write(json.dumps(results)) | |
195 if resultFileCSV: | |
196 with open(resultFileCSV, 'w') as f: | |
197 for key1, value1 in results.iteritems(): | |
198 f.write('%s;\n' % (str(key1))) | |
199 for key2, value2 in sorted(value1.iteritems()): | |
200 f.write('%s; %s;\n' % (str(key2), str(value2))) | |
201 | |
202 def specjvm2008(args): | |
203 """run one or more SPECjvm2008 benchmarks""" | |
204 | |
205 def launcher(bm, harnessArgs, extraVmOpts): | |
206 return sanitycheck.getSPECjvm2008(harnessArgs + [bm]).bench(get_vm(), extraVmOpts=extraVmOpts) | |
207 | |
208 availableBenchmarks = set(sanitycheck.specjvm2008Names) | |
209 if "all" not in args: | |
210 # only add benchmark groups if we are not running "all" | |
211 for name in sanitycheck.specjvm2008Names: | |
212 parts = name.rsplit('.', 1) | |
213 if len(parts) > 1: | |
214 assert len(parts) == 2 | |
215 group = parts[0] | |
216 availableBenchmarks.add(group) | |
217 | |
218 _run_benchmark(args, sorted(availableBenchmarks), launcher) | |
219 | |
220 def specjbb2013(args): | |
221 """run the composite SPECjbb2013 benchmark""" | |
222 | |
223 def launcher(bm, harnessArgs, extraVmOpts): | |
224 assert bm is None | |
225 return sanitycheck.getSPECjbb2013(harnessArgs).bench(get_vm(), extraVmOpts=extraVmOpts) | |
226 | |
227 _run_benchmark(args, None, launcher) | |
228 | |
229 def specjbb2015(args): | |
230 """run the composite SPECjbb2015 benchmark""" | |
231 | |
232 def launcher(bm, harnessArgs, extraVmOpts): | |
233 assert bm is None | |
234 return sanitycheck.getSPECjbb2015(harnessArgs).bench(get_vm(), extraVmOpts=extraVmOpts) | |
235 | |
236 _run_benchmark(args, None, launcher) | |
237 | |
238 def specjbb2005(args): | |
239 """run the composite SPECjbb2005 benchmark""" | |
240 | |
241 def launcher(bm, harnessArgs, extraVmOpts): | |
242 assert bm is None | |
243 return sanitycheck.getSPECjbb2005(harnessArgs).bench(get_vm(), extraVmOpts=extraVmOpts) | |
244 | |
245 _run_benchmark(args, None, launcher) | |
246 | |
247 mx.update_commands(mx.suite('graal'), { | |
248 'dacapo': [dacapo, '[VM options] benchmarks...|"all" [DaCapo options]'], | |
249 'scaladacapo': [scaladacapo, '[VM options] benchmarks...|"all" [Scala DaCapo options]'], | |
250 'specjvm2008': [specjvm2008, '[VM options] benchmarks...|"all" [SPECjvm2008 options]'], | |
251 'specjbb2013': [specjbb2013, '[VM options] [-- [SPECjbb2013 options]]'], | |
252 'specjbb2015': [specjbb2015, '[VM options] [-- [SPECjbb2015 options]]'], | |
253 'specjbb2005': [specjbb2005, '[VM options] [-- [SPECjbb2005 options]]'], | |
254 'bench' : [bench, '[-resultfile file] [all(default)|dacapo|specjvm2008|bootstrap]'], | |
255 'deoptalot' : [deoptalot, '[n]'], | |
256 'longtests' : [longtests, ''], | |
257 }) |