changeset 22952:1ba34f16d176

Factor out benchmarks from mx_graal_n.py and add specjbb2015
author Tom Rodriguez <tom.rodriguez@oracle.com>
date Wed, 04 Nov 2015 11:05:27 -0800
parents d74202a599fe
children 81fbb33e5d4b
files mx.graal/mx_graal.py mx.graal/mx_graal_8.py mx.graal/mx_graal_9.py mx.graal/mx_graal_bench.py mx.graal/sanitycheck.py
diffstat 5 files changed, 282 insertions(+), 425 deletions(-) [+]
line wrap: on
line diff
--- a/mx.graal/mx_graal.py	Thu Nov 05 18:26:45 2015 +0100
+++ b/mx.graal/mx_graal.py	Wed Nov 04 11:05:27 2015 -0800
@@ -27,10 +27,17 @@
 import mx
 JDK9 = mx.get_jdk(tag='default').javaCompliance >= "1.9"
 
+def get_vm():
+    return _get_vm()
+
 if JDK9:
     import mx_graal_9 # pylint: disable=unused-import
     from mx_graal_9 import mx_post_parse_cmd_line, run_vm, isJVMCIEnabled # pylint: disable=unused-import
+    from mx_graal_9 import get_vm as _get_vm # pylint: disable=unused-import
 
 else:
     import mx_graal_8 # pylint: disable=unused-import
     from mx_graal_8 import mx_post_parse_cmd_line, run_vm, isJVMCIEnabled # pylint: disable=unused-import
+    from mx_graal_8 import get_vm as _get_vm # pylint: disable=unused-import
+
+import mx_graal_bench # pylint: disable=unused-import
--- a/mx.graal/mx_graal_8.py	Thu Nov 05 18:26:45 2015 +0100
+++ b/mx.graal/mx_graal_8.py	Wed Nov 04 11:05:27 2015 -0800
@@ -28,8 +28,6 @@
 from os.path import join, exists, basename
 from argparse import ArgumentParser
 import sanitycheck
-import itertools
-import json
 import re
 
 import mx
@@ -39,6 +37,7 @@
 from sanitycheck import _noneAsEmptyList
 
 from mx_unittest import unittest
+from mx_graal_bench import dacapo
 import mx_gate
 
 _suite = mx.suite('graal')
@@ -98,50 +97,6 @@
 mx_gate.add_jacoco_includes(['com.oracle.graal.*'])
 mx_gate.add_jacoco_excluded_annotations(['@Snippet', '@ClassSubstitution'])
 
-def _run_benchmark(args, availableBenchmarks, runBenchmark):
-
-    vmOpts, benchmarksAndOptions = mx.extract_VM_args(args, useDoubleDash=availableBenchmarks is None)
-
-    if availableBenchmarks is None:
-        harnessArgs = benchmarksAndOptions
-        return runBenchmark(None, harnessArgs, vmOpts)
-
-    if len(benchmarksAndOptions) == 0:
-        mx.abort('at least one benchmark name or "all" must be specified')
-    benchmarks = list(itertools.takewhile(lambda x: not x.startswith('-'), benchmarksAndOptions))
-    harnessArgs = benchmarksAndOptions[len(benchmarks):]
-
-    if 'all' in benchmarks:
-        benchmarks = availableBenchmarks
-    else:
-        for bm in benchmarks:
-            if bm not in availableBenchmarks:
-                mx.abort('unknown benchmark: ' + bm + '\nselect one of: ' + str(availableBenchmarks))
-
-    failed = []
-    for bm in benchmarks:
-        if not runBenchmark(bm, harnessArgs, vmOpts):
-            failed.append(bm)
-
-    if len(failed) != 0:
-        mx.abort('Benchmark failures: ' + str(failed))
-
-def dacapo(args):
-    """run one or more DaCapo benchmarks"""
-
-    def launcher(bm, harnessArgs, extraVmOpts):
-        return sanitycheck.getDacapo(bm, harnessArgs).test(get_vm(), extraVmOpts=extraVmOpts)
-
-    _run_benchmark(args, sanitycheck.dacapoSanityWarmup.keys(), launcher)
-
-def scaladacapo(args):
-    """run one or more Scala DaCapo benchmarks"""
-
-    def launcher(bm, harnessArgs, extraVmOpts):
-        return sanitycheck.getScalaDacapo(bm, harnessArgs).test(get_vm(), extraVmOpts=extraVmOpts)
-
-    _run_benchmark(args, sanitycheck.dacapoScalaSanityWarmup.keys(), launcher)
-
 # This is different than the 'jmh' commmand in that it
 # looks for internal JMH benchmarks (i.e. those that
 # depend on the JMH library).
@@ -349,164 +304,6 @@
 mx_gate.add_gate_runner(_suite, _graal_gate_runner)
 mx_gate.add_gate_argument('--extra-vm-argument', action='append', help='add extra vm argument to gate tasks if applicable (multiple occurrences allowed)')
 
-def deoptalot(args):
-    """bootstrap a VM with DeoptimizeALot and VerifyOops on
-
-    If the first argument is a number, the process will be repeated
-    this number of times. All other arguments are passed to the VM."""
-    count = 1
-    if len(args) > 0 and args[0].isdigit():
-        count = int(args[0])
-        del args[0]
-
-    for _ in range(count):
-        if not run_vm(['-XX:-TieredCompilation', '-XX:+DeoptimizeALot', '-XX:+VerifyOops'] + args + ['-version']) == 0:
-            mx.abort("Failed")
-
-def longtests(args):
-
-    deoptalot(['15', '-Xmx48m'])
-
-    dacapo(['100', 'eclipse', '-esa'])
-
-"""
-Extra benchmarks to run from 'bench()'.
-"""
-extraBenchmarks = []
-
-def bench(args):
-    """run benchmarks and parse their output for results
-
-    Results are JSON formated : {group : {benchmark : score}}."""
-    resultFile = None
-    if '-resultfile' in args:
-        index = args.index('-resultfile')
-        if index + 1 < len(args):
-            resultFile = args[index + 1]
-            del args[index]
-            del args[index]
-        else:
-            mx.abort('-resultfile must be followed by a file name')
-    resultFileCSV = None
-    if '-resultfilecsv' in args:
-        index = args.index('-resultfilecsv')
-        if index + 1 < len(args):
-            resultFileCSV = args[index + 1]
-            del args[index]
-            del args[index]
-        else:
-            mx.abort('-resultfilecsv must be followed by a file name')
-    vm = get_vm()
-    if len(args) is 0:
-        args = ['all']
-
-    vmArgs = [arg for arg in args if arg.startswith('-')]
-
-    def benchmarks_in_group(group):
-        prefix = group + ':'
-        return [a[len(prefix):] for a in args if a.startswith(prefix)]
-
-    results = {}
-    benchmarks = []
-    # DaCapo
-    if 'dacapo' in args or 'all' in args:
-        benchmarks += sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
-    else:
-        dacapos = benchmarks_in_group('dacapo')
-        for dacapo in dacapos:
-            if dacapo not in sanitycheck.dacapoSanityWarmup.keys():
-                mx.abort('Unknown DaCapo : ' + dacapo)
-            iterations = sanitycheck.dacapoSanityWarmup[dacapo][sanitycheck.SanityCheckLevel.Benchmark]
-            if iterations > 0:
-                benchmarks += [sanitycheck.getDacapo(dacapo, ['-n', str(iterations)])]
-
-    if 'scaladacapo' in args or 'all' in args:
-        benchmarks += sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
-    else:
-        scaladacapos = benchmarks_in_group('scaladacapo')
-        for scaladacapo in scaladacapos:
-            if scaladacapo not in sanitycheck.dacapoScalaSanityWarmup.keys():
-                mx.abort('Unknown Scala DaCapo : ' + scaladacapo)
-            iterations = sanitycheck.dacapoScalaSanityWarmup[scaladacapo][sanitycheck.SanityCheckLevel.Benchmark]
-            if iterations > 0:
-                benchmarks += [sanitycheck.getScalaDacapo(scaladacapo, ['-n', str(iterations)])]
-
-    # Bootstrap
-    if 'bootstrap' in args or 'all' in args:
-        benchmarks += sanitycheck.getBootstraps()
-    # SPECjvm2008
-    if 'specjvm2008' in args or 'all' in args:
-        benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120'])]
-    else:
-        specjvms = benchmarks_in_group('specjvm2008')
-        for specjvm in specjvms:
-            benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120', specjvm])]
-
-    if 'specjbb2005' in args or 'all' in args:
-        benchmarks += [sanitycheck.getSPECjbb2005()]
-
-    if 'specjbb2013' in args:  # or 'all' in args //currently not in default set
-        benchmarks += [sanitycheck.getSPECjbb2013()]
-
-    if 'ctw-full' in args:
-        benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.Full))
-    if 'ctw-noinline' in args:
-        benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoInline))
-
-    for f in extraBenchmarks:
-        f(args, vm, benchmarks)
-
-    for test in benchmarks:
-        for (groupName, res) in test.bench(vm, extraVmOpts=vmArgs).items():
-            group = results.setdefault(groupName, {})
-            group.update(res)
-    mx.log(json.dumps(results))
-    if resultFile:
-        with open(resultFile, 'w') as f:
-            f.write(json.dumps(results))
-    if resultFileCSV:
-        with open(resultFileCSV, 'w') as f:
-            for key1, value1 in results.iteritems():
-                f.write('%s;\n' % (str(key1)))
-                for key2, value2 in sorted(value1.iteritems()):
-                    f.write('%s; %s;\n' % (str(key2), str(value2)))
-
-def specjvm2008(args):
-    """run one or more SPECjvm2008 benchmarks"""
-
-    def launcher(bm, harnessArgs, extraVmOpts):
-        return sanitycheck.getSPECjvm2008(harnessArgs + [bm]).bench(get_vm(), extraVmOpts=extraVmOpts)
-
-    availableBenchmarks = set(sanitycheck.specjvm2008Names)
-    if "all" not in args:
-        # only add benchmark groups if we are not running "all"
-        for name in sanitycheck.specjvm2008Names:
-            parts = name.rsplit('.', 1)
-            if len(parts) > 1:
-                assert len(parts) == 2
-                group = parts[0]
-                availableBenchmarks.add(group)
-
-    _run_benchmark(args, sorted(availableBenchmarks), launcher)
-
-def specjbb2013(args):
-    """run the composite SPECjbb2013 benchmark"""
-
-    def launcher(bm, harnessArgs, extraVmOpts):
-        assert bm is None
-        return sanitycheck.getSPECjbb2013(harnessArgs).bench(get_vm(), extraVmOpts=extraVmOpts)
-
-    _run_benchmark(args, None, launcher)
-
-def specjbb2005(args):
-    """run the composite SPECjbb2005 benchmark"""
-
-    def launcher(bm, harnessArgs, extraVmOpts):
-        assert bm is None
-        return sanitycheck.getSPECjbb2005(harnessArgs).bench(get_vm(), extraVmOpts=extraVmOpts)
-
-    _run_benchmark(args, None, launcher)
-
 def jdkartifactstats(args):
     """show stats about JDK deployed Graal artifacts"""
     artifacts = {}
@@ -567,17 +364,9 @@
         print '{:>10}  {}'.format('<missing>', jvmLib)
 
 mx.update_commands(_suite, {
+    'jdkartifactstats' : [jdkartifactstats, ''],
     'ctw': [ctw, '[-vmoptions|noinline|nocomplex|full]'],
-    'dacapo': [dacapo, '[VM options] benchmarks...|"all" [DaCapo options]'],
-    'jdkartifactstats' : [jdkartifactstats, ''],
-    'scaladacapo': [scaladacapo, '[VM options] benchmarks...|"all" [Scala DaCapo options]'],
-    'specjvm2008': [specjvm2008, '[VM options] benchmarks...|"all" [SPECjvm2008 options]'],
-    'specjbb2013': [specjbb2013, '[VM options] [-- [SPECjbb2013 options]]'],
-    'specjbb2005': [specjbb2005, '[VM options] [-- [SPECjbb2005 options]]'],
-    'bench' : [bench, '[-resultfile file] [all(default)|dacapo|specjvm2008|bootstrap]'],
     'microbench' : [microbench, '[VM options] [-- [JMH options]]'],
-    'deoptalot' : [deoptalot, '[n]'],
-    'longtests' : [longtests, ''],
 })
 
 
--- a/mx.graal/mx_graal_9.py	Thu Nov 05 18:26:45 2015 +0100
+++ b/mx.graal/mx_graal_9.py	Wed Nov 04 11:05:27 2015 -0800
@@ -28,8 +28,6 @@
 from os.path import join
 from argparse import ArgumentParser
 import sanitycheck
-import itertools
-import json
 import re
 
 import mx
@@ -37,6 +35,7 @@
 from sanitycheck import _noneAsEmptyList
 
 from mx_unittest import unittest
+from mx_graal_bench import dacapo
 import mx_gate
 import mx_unittest
 
@@ -115,50 +114,6 @@
 mx_gate.add_jacoco_includes(['com.oracle.graal.*'])
 mx_gate.add_jacoco_excluded_annotations(['@Snippet', '@ClassSubstitution'])
 
-def _run_benchmark(args, availableBenchmarks, runBenchmark):
-
-    vmOpts, benchmarksAndOptions = mx.extract_VM_args(args, useDoubleDash=availableBenchmarks is None)
-
-    if availableBenchmarks is None:
-        harnessArgs = benchmarksAndOptions
-        return runBenchmark(None, harnessArgs, vmOpts)
-
-    if len(benchmarksAndOptions) == 0:
-        mx.abort('at least one benchmark name or "all" must be specified')
-    benchmarks = list(itertools.takewhile(lambda x: not x.startswith('-'), benchmarksAndOptions))
-    harnessArgs = benchmarksAndOptions[len(benchmarks):]
-
-    if 'all' in benchmarks:
-        benchmarks = availableBenchmarks
-    else:
-        for bm in benchmarks:
-            if bm not in availableBenchmarks:
-                mx.abort('unknown benchmark: ' + bm + '\nselect one of: ' + str(availableBenchmarks))
-
-    failed = []
-    for bm in benchmarks:
-        if not runBenchmark(bm, harnessArgs, vmOpts):
-            failed.append(bm)
-
-    if len(failed) != 0:
-        mx.abort('Benchmark failures: ' + str(failed))
-
-def dacapo(args):
-    """run one or more DaCapo benchmarks"""
-
-    def launcher(bm, harnessArgs, extraVmOpts):
-        return sanitycheck.getDacapo(bm, harnessArgs).test(get_vm(), extraVmOpts=extraVmOpts)
-
-    _run_benchmark(args, sanitycheck.dacapoSanityWarmup.keys(), launcher)
-
-def scaladacapo(args):
-    """run one or more Scala DaCapo benchmarks"""
-
-    def launcher(bm, harnessArgs, extraVmOpts):
-        return sanitycheck.getScalaDacapo(bm, harnessArgs).test(get_vm(), extraVmOpts=extraVmOpts)
-
-    _run_benchmark(args, sanitycheck.dacapoScalaSanityWarmup.keys(), launcher)
-
 # This is different than the 'jmh' commmand in that it
 # looks for internal JMH benchmarks (i.e. those that
 # depend on the JMH library).
@@ -343,164 +298,6 @@
 
 mx_unittest.set_vm_launcher('JDK9 VM launcher', _unittest_vm_launcher)
 
-def deoptalot(args):
-    """bootstrap a VM with DeoptimizeALot and VerifyOops on
-
-    If the first argument is a number, the process will be repeated
-    this number of times. All other arguments are passed to the VM."""
-    count = 1
-    if len(args) > 0 and args[0].isdigit():
-        count = int(args[0])
-        del args[0]
-
-    for _ in range(count):
-        if not run_vm(['-XX:-TieredCompilation', '-XX:+DeoptimizeALot', '-XX:+VerifyOops'] + args + ['-version']) == 0:
-            mx.abort("Failed")
-
-def longtests(args):
-
-    deoptalot(['15', '-Xmx48m'])
-
-    dacapo(['100', 'eclipse', '-esa'])
-
-"""
-Extra benchmarks to run from 'bench()'.
-"""
-extraBenchmarks = []
-
-def bench(args):
-    """run benchmarks and parse their output for results
-
-    Results are JSON formated : {group : {benchmark : score}}."""
-    resultFile = None
-    if '-resultfile' in args:
-        index = args.index('-resultfile')
-        if index + 1 < len(args):
-            resultFile = args[index + 1]
-            del args[index]
-            del args[index]
-        else:
-            mx.abort('-resultfile must be followed by a file name')
-    resultFileCSV = None
-    if '-resultfilecsv' in args:
-        index = args.index('-resultfilecsv')
-        if index + 1 < len(args):
-            resultFileCSV = args[index + 1]
-            del args[index]
-            del args[index]
-        else:
-            mx.abort('-resultfilecsv must be followed by a file name')
-    vm = get_vm()
-    if len(args) is 0:
-        args = ['all']
-
-    vmArgs = [arg for arg in args if arg.startswith('-')]
-
-    def benchmarks_in_group(group):
-        prefix = group + ':'
-        return [a[len(prefix):] for a in args if a.startswith(prefix)]
-
-    results = {}
-    benchmarks = []
-    # DaCapo
-    if 'dacapo' in args or 'all' in args:
-        benchmarks += sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
-    else:
-        dacapos = benchmarks_in_group('dacapo')
-        for dacapo in dacapos:
-            if dacapo not in sanitycheck.dacapoSanityWarmup.keys():
-                mx.abort('Unknown DaCapo : ' + dacapo)
-            iterations = sanitycheck.dacapoSanityWarmup[dacapo][sanitycheck.SanityCheckLevel.Benchmark]
-            if iterations > 0:
-                benchmarks += [sanitycheck.getDacapo(dacapo, ['-n', str(iterations)])]
-
-    if 'scaladacapo' in args or 'all' in args:
-        benchmarks += sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
-    else:
-        scaladacapos = benchmarks_in_group('scaladacapo')
-        for scaladacapo in scaladacapos:
-            if scaladacapo not in sanitycheck.dacapoScalaSanityWarmup.keys():
-                mx.abort('Unknown Scala DaCapo : ' + scaladacapo)
-            iterations = sanitycheck.dacapoScalaSanityWarmup[scaladacapo][sanitycheck.SanityCheckLevel.Benchmark]
-            if iterations > 0:
-                benchmarks += [sanitycheck.getScalaDacapo(scaladacapo, ['-n', str(iterations)])]
-
-    # Bootstrap
-    if 'bootstrap' in args or 'all' in args:
-        benchmarks += sanitycheck.getBootstraps()
-    # SPECjvm2008
-    if 'specjvm2008' in args or 'all' in args:
-        benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120'])]
-    else:
-        specjvms = benchmarks_in_group('specjvm2008')
-        for specjvm in specjvms:
-            benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120', specjvm])]
-
-    if 'specjbb2005' in args or 'all' in args:
-        benchmarks += [sanitycheck.getSPECjbb2005()]
-
-    if 'specjbb2013' in args:  # or 'all' in args //currently not in default set
-        benchmarks += [sanitycheck.getSPECjbb2013()]
-
-    if 'ctw-full' in args:
-        benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.Full))
-    if 'ctw-noinline' in args:
-        benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoInline))
-
-    for f in extraBenchmarks:
-        f(args, vm, benchmarks)
-
-    for test in benchmarks:
-        for (groupName, res) in test.bench(vm, extraVmOpts=vmArgs).items():
-            group = results.setdefault(groupName, {})
-            group.update(res)
-    mx.log(json.dumps(results))
-    if resultFile:
-        with open(resultFile, 'w') as f:
-            f.write(json.dumps(results))
-    if resultFileCSV:
-        with open(resultFileCSV, 'w') as f:
-            for key1, value1 in results.iteritems():
-                f.write('%s;\n' % (str(key1)))
-                for key2, value2 in sorted(value1.iteritems()):
-                    f.write('%s; %s;\n' % (str(key2), str(value2)))
-
-def specjvm2008(args):
-    """run one or more SPECjvm2008 benchmarks"""
-
-    def launcher(bm, harnessArgs, extraVmOpts):
-        return sanitycheck.getSPECjvm2008(harnessArgs + [bm]).bench(get_vm(), extraVmOpts=extraVmOpts)
-
-    availableBenchmarks = set(sanitycheck.specjvm2008Names)
-    if "all" not in args:
-        # only add benchmark groups if we are not running "all"
-        for name in sanitycheck.specjvm2008Names:
-            parts = name.rsplit('.', 1)
-            if len(parts) > 1:
-                assert len(parts) == 2
-                group = parts[0]
-                availableBenchmarks.add(group)
-
-    _run_benchmark(args, sorted(availableBenchmarks), launcher)
-
-def specjbb2013(args):
-    """run the composite SPECjbb2013 benchmark"""
-
-    def launcher(bm, harnessArgs, extraVmOpts):
-        assert bm is None
-        return sanitycheck.getSPECjbb2013(harnessArgs).bench(get_vm(), extraVmOpts=extraVmOpts)
-
-    _run_benchmark(args, None, launcher)
-
-def specjbb2005(args):
-    """run the composite SPECjbb2005 benchmark"""
-
-    def launcher(bm, harnessArgs, extraVmOpts):
-        assert bm is None
-        return sanitycheck.getSPECjbb2005(harnessArgs).bench(get_vm(), extraVmOpts=extraVmOpts)
-
-    _run_benchmark(args, None, launcher)
-
 def _parseVmArgs(jdk, args, addDefaultArgs=True):
     args = mx.expand_project_in_args(args, insitu=False)
     jacocoArgs = mx_gate.get_jacoco_agent_args()
@@ -602,15 +399,7 @@
 mx.update_commands(_suite, {
     'vm': [run_vm, '[-options] class [args...]'],
     'ctw': [ctw, '[-vmoptions|noinline|nocomplex|full]'],
-    'dacapo': [dacapo, '[VM options] benchmarks...|"all" [DaCapo options]'],
-    'scaladacapo': [scaladacapo, '[VM options] benchmarks...|"all" [Scala DaCapo options]'],
-    'specjvm2008': [specjvm2008, '[VM options] benchmarks...|"all" [SPECjvm2008 options]'],
-    'specjbb2013': [specjbb2013, '[VM options] [-- [SPECjbb2013 options]]'],
-    'specjbb2005': [specjbb2005, '[VM options] [-- [SPECjbb2005 options]]'],
-    'bench' : [bench, '[-resultfile file] [all(default)|dacapo|specjvm2008|bootstrap]'],
     'microbench' : [microbench, '[VM options] [-- [JMH options]]'],
-    'deoptalot' : [deoptalot, '[n]'],
-    'longtests' : [longtests, ''],
 })
 
 mx.add_argument('-M', '--jvmci-mode', action='store', choices=sorted(_jvmciModes.viewkeys()), help='the JVM variant type to build/run (default: ' + _vm.jvmciMode + ')')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mx.graal/mx_graal_bench.py	Wed Nov 04 11:05:27 2015 -0800
@@ -0,0 +1,257 @@
+#
+# ----------------------------------------------------------------------------------------------------
+#
+# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+# ----------------------------------------------------------------------------------------------------
+
+import sanitycheck
+import itertools
+import json
+
+import mx
+from mx_jvmci import run_vm
+from mx_graal import get_vm
+
+def _run_benchmark(args, availableBenchmarks, runBenchmark):
+
+    vmOpts, benchmarksAndOptions = mx.extract_VM_args(args, useDoubleDash=availableBenchmarks is None)
+
+    if availableBenchmarks is None:
+        harnessArgs = benchmarksAndOptions
+        return runBenchmark(None, harnessArgs, vmOpts)
+
+    if len(benchmarksAndOptions) == 0:
+        mx.abort('at least one benchmark name or "all" must be specified')
+    benchmarks = list(itertools.takewhile(lambda x: not x.startswith('-'), benchmarksAndOptions))
+    harnessArgs = benchmarksAndOptions[len(benchmarks):]
+
+    if 'all' in benchmarks:
+        benchmarks = availableBenchmarks
+    else:
+        for bm in benchmarks:
+            if bm not in availableBenchmarks:
+                mx.abort('unknown benchmark: ' + bm + '\nselect one of: ' + str(availableBenchmarks))
+
+    failed = []
+    for bm in benchmarks:
+        if not runBenchmark(bm, harnessArgs, vmOpts):
+            failed.append(bm)
+
+    if len(failed) != 0:
+        mx.abort('Benchmark failures: ' + str(failed))
+
+def deoptalot(args):
+    """bootstrap a VM with DeoptimizeALot and VerifyOops on
+
+    If the first argument is a number, the process will be repeated
+    this number of times. All other arguments are passed to the VM."""
+    count = 1
+    if len(args) > 0 and args[0].isdigit():
+        count = int(args[0])
+        del args[0]
+
+    for _ in range(count):
+        if not run_vm(['-XX:-TieredCompilation', '-XX:+DeoptimizeALot', '-XX:+VerifyOops'] + args + ['-version']) == 0:
+            mx.abort("Failed")
+
+def longtests(args):
+
+    deoptalot(['15', '-Xmx48m'])
+
+    dacapo(['100', 'eclipse', '-esa'])
+
+def dacapo(args):
+    """run one or more DaCapo benchmarks"""
+
+    def launcher(bm, harnessArgs, extraVmOpts):
+        return sanitycheck.getDacapo(bm, harnessArgs).test(get_vm(), extraVmOpts=extraVmOpts)
+
+    _run_benchmark(args, sanitycheck.dacapoSanityWarmup.keys(), launcher)
+
+def scaladacapo(args):
+    """run one or more Scala DaCapo benchmarks"""
+
+    def launcher(bm, harnessArgs, extraVmOpts):
+        return sanitycheck.getScalaDacapo(bm, harnessArgs).test(get_vm(), extraVmOpts=extraVmOpts)
+
+    _run_benchmark(args, sanitycheck.dacapoScalaSanityWarmup.keys(), launcher)
+
+
+"""
+Extra benchmarks to run from 'bench()'.
+"""
+extraBenchmarks = []
+
+def bench(args):
+    """run benchmarks and parse their output for results
+
+    Results are JSON formated : {group : {benchmark : score}}."""
+    resultFile = None
+    if '-resultfile' in args:
+        index = args.index('-resultfile')
+        if index + 1 < len(args):
+            resultFile = args[index + 1]
+            del args[index]
+            del args[index]
+        else:
+            mx.abort('-resultfile must be followed by a file name')
+    resultFileCSV = None
+    if '-resultfilecsv' in args:
+        index = args.index('-resultfilecsv')
+        if index + 1 < len(args):
+            resultFileCSV = args[index + 1]
+            del args[index]
+            del args[index]
+        else:
+            mx.abort('-resultfilecsv must be followed by a file name')
+    vm = get_vm()
+    if len(args) is 0:
+        args = ['all']
+
+    vmArgs = [arg for arg in args if arg.startswith('-')]
+
+    def benchmarks_in_group(group):
+        prefix = group + ':'
+        return [a[len(prefix):] for a in args if a.startswith(prefix)]
+
+    results = {}
+    benchmarks = []
+    # DaCapo
+    if 'dacapo' in args or 'all' in args:
+        benchmarks += sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
+    else:
+        dacapos = benchmarks_in_group('dacapo')
+        for dacapo in dacapos:
+            if dacapo not in sanitycheck.dacapoSanityWarmup.keys():
+                mx.abort('Unknown DaCapo : ' + dacapo)
+            iterations = sanitycheck.dacapoSanityWarmup[dacapo][sanitycheck.SanityCheckLevel.Benchmark]
+            if iterations > 0:
+                benchmarks += [sanitycheck.getDacapo(dacapo, ['-n', str(iterations)])]
+
+    if 'scaladacapo' in args or 'all' in args:
+        benchmarks += sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
+    else:
+        scaladacapos = benchmarks_in_group('scaladacapo')
+        for scaladacapo in scaladacapos:
+            if scaladacapo not in sanitycheck.dacapoScalaSanityWarmup.keys():
+                mx.abort('Unknown Scala DaCapo : ' + scaladacapo)
+            iterations = sanitycheck.dacapoScalaSanityWarmup[scaladacapo][sanitycheck.SanityCheckLevel.Benchmark]
+            if iterations > 0:
+                benchmarks += [sanitycheck.getScalaDacapo(scaladacapo, ['-n', str(iterations)])]
+
+    # Bootstrap
+    if 'bootstrap' in args or 'all' in args:
+        benchmarks += sanitycheck.getBootstraps()
+    # SPECjvm2008
+    if 'specjvm2008' in args or 'all' in args:
+        benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120'])]
+    else:
+        specjvms = benchmarks_in_group('specjvm2008')
+        for specjvm in specjvms:
+            benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120', specjvm])]
+
+    if 'specjbb2005' in args or 'all' in args:
+        benchmarks += [sanitycheck.getSPECjbb2005()]
+
+    if 'specjbb2013' in args:  # or 'all' in args //currently not in default set
+        benchmarks += [sanitycheck.getSPECjbb2013()]
+
+    if 'ctw-full' in args:
+        benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.Full))
+    if 'ctw-noinline' in args:
+        benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoInline))
+
+    for f in extraBenchmarks:
+        f(args, vm, benchmarks)
+
+    for test in benchmarks:
+        for (groupName, res) in test.bench(vm, extraVmOpts=vmArgs).items():
+            group = results.setdefault(groupName, {})
+            group.update(res)
+    mx.log(json.dumps(results))
+    if resultFile:
+        with open(resultFile, 'w') as f:
+            f.write(json.dumps(results))
+    if resultFileCSV:
+        with open(resultFileCSV, 'w') as f:
+            for key1, value1 in results.iteritems():
+                f.write('%s;\n' % (str(key1)))
+                for key2, value2 in sorted(value1.iteritems()):
+                    f.write('%s; %s;\n' % (str(key2), str(value2)))
+
+def specjvm2008(args):
+    """run one or more SPECjvm2008 benchmarks"""
+
+    def launcher(bm, harnessArgs, extraVmOpts):
+        return sanitycheck.getSPECjvm2008(harnessArgs + [bm]).bench(get_vm(), extraVmOpts=extraVmOpts)
+
+    availableBenchmarks = set(sanitycheck.specjvm2008Names)
+    if "all" not in args:
+        # only add benchmark groups if we are not running "all"
+        for name in sanitycheck.specjvm2008Names:
+            parts = name.rsplit('.', 1)
+            if len(parts) > 1:
+                assert len(parts) == 2
+                group = parts[0]
+                availableBenchmarks.add(group)
+
+    _run_benchmark(args, sorted(availableBenchmarks), launcher)
+
+def specjbb2013(args):
+    """run the composite SPECjbb2013 benchmark"""
+
+    def launcher(bm, harnessArgs, extraVmOpts):
+        assert bm is None
+        return sanitycheck.getSPECjbb2013(harnessArgs).bench(get_vm(), extraVmOpts=extraVmOpts)
+
+    _run_benchmark(args, None, launcher)
+
+def specjbb2015(args):
+    """run the composite SPECjbb2015 benchmark"""
+
+    def launcher(bm, harnessArgs, extraVmOpts):
+        assert bm is None
+        return sanitycheck.getSPECjbb2015(harnessArgs).bench(get_vm(), extraVmOpts=extraVmOpts)
+
+    _run_benchmark(args, None, launcher)
+
+def specjbb2005(args):
+    """run the composite SPECjbb2005 benchmark"""
+
+    def launcher(bm, harnessArgs, extraVmOpts):
+        assert bm is None
+        return sanitycheck.getSPECjbb2005(harnessArgs).bench(get_vm(), extraVmOpts=extraVmOpts)
+
+    _run_benchmark(args, None, launcher)
+
+mx.update_commands(mx.suite('graal'), {
+    'dacapo': [dacapo, '[VM options] benchmarks...|"all" [DaCapo options]'],
+    'scaladacapo': [scaladacapo, '[VM options] benchmarks...|"all" [Scala DaCapo options]'],
+    'specjvm2008': [specjvm2008, '[VM options] benchmarks...|"all" [SPECjvm2008 options]'],
+    'specjbb2013': [specjbb2013, '[VM options] [-- [SPECjbb2013 options]]'],
+    'specjbb2015': [specjbb2015, '[VM options] [-- [SPECjbb2015 options]]'],
+    'specjbb2005': [specjbb2005, '[VM options] [-- [SPECjbb2005 options]]'],
+    'bench' : [bench, '[-resultfile file] [all(default)|dacapo|specjvm2008|bootstrap]'],
+    'deoptalot' : [deoptalot, '[n]'],
+    'longtests' : [longtests, ''],
+})
--- a/mx.graal/sanitycheck.py	Thu Nov 05 18:26:45 2015 +0100
+++ b/mx.graal/sanitycheck.py	Wed Nov 04 11:05:27 2015 -0800
@@ -175,6 +175,21 @@
                 _noneAsEmptyList(benchArgs), [success], [], [matcherCritical, matcherMax],
                 vmOpts=['-Xmx6g', '-Xms6g', '-Xmn3g', '-XX:+UseParallelOldGC', '-XX:-UseAdaptiveSizePolicy', '-XX:-UseBiasedLocking', '-XX:-UseCompressedOops'], defaultCwd=specjbb2013)
 
+def getSPECjbb2015(benchArgs=None):
+
+    specjbb2015 = mx.get_env('SPECJBB2015')
+    if specjbb2015 is None or not exists(join(specjbb2015, 'specjbb2015.jar')):
+        mx.abort('Please set the SPECJBB2015 environment variable to a SPECjbb2015 directory')
+
+    jops = re.compile(r"^RUN RESULT: hbIR \(max attempted\) = [0-9]+, hbIR \(settled\) = [0-9]+, max-jOPS = (?P<max>[0-9]+), critical-jOPS = (?P<critical>[0-9]+)$", re.MULTILINE)
+    # error?
+    success = re.compile(r"org.spec.jbb.controller: Run finished", re.MULTILINE)
+    matcherMax = ValuesMatcher(jops, {'group' : 'SPECjbb2015', 'name' : 'max', 'score' : '<max>'})
+    matcherCritical = ValuesMatcher(jops, {'group' : 'SPECjbb2015', 'name' : 'critical', 'score' : '<critical>'})
+    return Test("SPECjbb2015", ['-jar', 'specjbb2015.jar', '-m', 'composite'] +
+                _noneAsEmptyList(benchArgs), [success], [], [matcherCritical, matcherMax],
+                vmOpts=['-Xmx6g', '-Xms6g', '-Xmn3g', '-XX:+UseParallelOldGC', '-XX:-UseAdaptiveSizePolicy', '-XX:-UseBiasedLocking', '-XX:-UseCompressedOops'], defaultCwd=specjbb2015)
+
 def getSPECjvm2008(benchArgs=None):
 
     specjvm2008 = mx.get_env('SPECJVM2008')