changeset 4228:e872562f95f8

add a simple specjvm2008 command, fail a benchmark if one of the failureRE matches
author Gilles Duboscq <gilles.m.duboscq@gmail.com>
date Thu, 05 Jan 2012 14:53:37 +0100
parents 1fe200db8c30
children 8074251d1e05
files mx/commands.py mx/sanitycheck.py
diffstat 2 files changed, 52 insertions(+), 22 deletions(-) [+]
line wrap: on
line diff
--- a/mx/commands.py	Thu Jan 05 14:25:58 2012 +0100
+++ b/mx/commands.py	Thu Jan 05 14:53:37 2012 +0100
@@ -588,13 +588,17 @@
         results[test.group].update(test.bench('-graal'))
     print results
     
+def specjvm2008(args):
+    sanitycheck.getSPECjvm2008().bench('-graal')
+    
 def mx_init():
     _vmbuild = 'product'
     commands = {
         'build': [build, '[-options]'],
         'clean': [clean, ''],
         'copyrightcheck': [copyrightcheck, ''],
-        'dacapo': [dacapo, '[benchmark] [VM options|DaCapo options]'],
+        'dacapo': [dacapo, '[[n] benchmark] [VM options|@DaCapo options]'],
+        'specjvm2008': [specjvm2008, ''],
         'example': [example, '[-v] example names...'],
         'gate' : [gate, ''],
         'bench' : [bench, ''],
--- a/mx/sanitycheck.py	Thu Jan 05 14:25:58 2012 +0100
+++ b/mx/sanitycheck.py	Thu Jan 05 14:53:37 2012 +0100
@@ -28,31 +28,49 @@
 import mx
 import os
 import commands
-from os.path import isfile
+from os.path import isfile, join, exists
 
 dacapoSanityWarmup = {
-    'avrora': [0, 0, 3, 6, 10],
-    'batik': [0 , 0, 5, 5, 20],
-    'eclipse': [2 , 4, 5, 10, 13],
-    'fop': [4 , 8, 10, 20, 30],
-    'h2': [0 , 0, 5, 5, 5],
-    'jython': [0 , 0, 5, 10, 10],
-    'luindex': [0 , 0, 5, 10, 10],
-    'lusearch': [0 , 4, 5, 5, 5],
-    'pmd': [0 , 0, 5, 10, 10],
-    'sunflow': [0 , 0, 5, 10, 15],
-    'tomcat': [0 , 0, 5, 10, 10],
-    'tradebeans': [0 , 0, 5, 10, 10],
-    'tradesoap': [2 , 4, 5, 10, 10],
-    'xalan': [0 , 0, 5, 10, 15],
+    'avrora':     [0, 0,  3,  6, 10],
+    'batik':      [0, 0,  5,  5, 20],
+    'eclipse':    [2, 4,  5, 10, 13],
+    'fop':        [4, 8, 10, 20, 30],
+    'h2':         [0, 0,  5,  5,  5],
+    'jython':     [0, 0,  5, 10, 10],
+    'luindex':    [0, 0,  5, 10, 10],
+    'lusearch':   [0, 4,  5,  5,  5],
+    'pmd':        [0, 0,  5, 10, 10],
+    'sunflow':    [0, 0,  5, 10, 15],
+    'tomcat':     [0, 0,  5, 10, 10],
+    'tradebeans': [0, 0,  5, 10, 10],
+    'tradesoap':  [2, 4,  5, 10, 10],
+    'xalan':      [0, 0,  5, 10, 15],
 }
 
 class SanityCheckLevel:
     Fast, Gate, Normal, Extensive, Benchmark = range(5)
     
-def getSPECjvm2008():
-    score = re.compile(r"^((Score on|Noncompliant) )?(?P<benchmark>[a-zA-Z0-9\.-]+)( result)?: (?P<score>[0-9]+,[0-9]+)( SPECjvm2008 Base)? ops/m$")
-    matcher = Matcher(score, {'const:name' : 'benchmark', 'const:score' : 'score'})
+def getSPECjvm2008(skipKitValidation=False, warmupTime=None, iterationTime=None):
+    
+    specjvm2008 = mx.get_env('SPECJVM2008')
+    if specjvm2008 is None or not exists(join(specjvm2008, 'SPECjvm2008.jar')):
+        mx.abort('Please set the SPECJVM2008 environment variable to a SPECjvm2008 directory')
+    
+    score = re.compile(r"^(Score on|Noncompliant) (?P<benchmark>[a-zA-Z0-9\.\-]+)( result)?: (?P<score>[0-9]+(,|\.)[0-9]+)( SPECjvm2008 Base)? ops/m$")
+    error = re.compile(r"^Errors in benchmark: ")
+    # The ' ops/m' at the end of the success string is important : it's how you can tell valid and invalid runs apart
+    success = re.compile(r"^(Noncompliant c|C)omposite result: [0-9]+,[0-9]+( SPECjvm2008 (Base|Peak))? ops/m$")
+    matcher = Matcher(score, {'const:name' : 'benchmark', 'const:score' : 'score'}, startNewLine=True)
+    
+    opts = []
+    if warmupTime is not None:
+        opts +0 ['-wt', str(warmupTime)]
+    if iterationTime is not None:
+        opts +0 ['-it', str(iterationTime)]
+    if skipKitValidation:
+        opts += ['-ikv']
+    
+    return Test("SPECjvm2008", "SPECjvm2008", ['-jar', 'SPECjvm2008.jar'] + opts, [success], [error], [matcher], vmOpts=['-Xms2g'], defaultCwd=specjvm2008)
 
 def getDacapos(level=SanityCheckLevel.Normal, dacapoArgs=[]):
     checks = []
@@ -84,14 +102,14 @@
     scoreMatcher = Matcher(time, {'const:name' : 'const:BootstrapTime', 'const:score' : 'time'})
     tests = []
     tests.append(Test("Bootstrap", "Bootstrap", ['-version'], succesREs=[time], scoreMatchers=[scoreMatcher]))
-    tests.append(Test("Bootstrap", "Bootstrap-bigHeap", ['-version'], succesREs=[time], scoreMatchers=[scoreMatcher], vmOpts=['-Xms2g']))
+    tests.append(Test("Bootstrap-bigHeap", "Bootstrap-bigHeap", ['-version'], succesREs=[time], scoreMatchers=[scoreMatcher], vmOpts=['-Xms2g']))
     return tests
 
 """
 Encapsulates a single program that is a sanity test and/or a benchmark.
 """
 class Test:
-    def __init__(self, name, group, cmd, successREs=[], failureREs=[], scoreMatchers=[], vmOpts=[]):
+    def __init__(self, name, group, cmd, successREs=[], failureREs=[], scoreMatchers=[], vmOpts=[], defaultCwd=None):
         self.name = name
         self.group = group
         self.successREs = successREs
@@ -99,11 +117,14 @@
         self.scoreMatchers = scoreMatchers
         self.vmOpts = vmOpts
         self.cmd = cmd
+        self.defaultCwd = defaultCwd
     
     def test(self, vm, cwd=None, opts=[], vmbuild=None):
         """
         Run this program as a sanity test.
         """
+        if cwd is None:
+            cwd = self.defaultCwd
         parser = OutputParser(nonZeroIsFatal = False)
         jvmError = re.compile(r"(?P<jvmerror>([A-Z]:|/).*[/\\]hs_err_pid[0-9]+\.log)")
         parser.addMatcher(Matcher(jvmError, {'const:jvmError' : 'jvmerror'}))
@@ -138,8 +159,12 @@
         """
         Run this program as a benchmark.
         """
+        if cwd is None:
+            cwd = self.defaultCwd
         parser = OutputParser(nonZeroIsFatal = False)
         
+        for failureRE in self.failureREs:
+            parser.addMatcher(Matcher(failureRE, {'const:failed' : 'const:1'}))
         for scoreMatcher in self.scoreMatchers:
             parser.addMatcher(scoreMatcher)
             
@@ -153,7 +178,8 @@
         
         for line in parsed:
             assert line.has_key('name') and line.has_key('score')
+            if line.has_key('failed') and parsed['failed'] is 1:
+                return {}
             ret[line['name']] = line['score']
         
         return ret
-        
\ No newline at end of file