changeset 14521:29ccc4cbabca

Merge
author Gilles Duboscq <duboscq@ssw.jku.at>
date Wed, 12 Mar 2014 13:30:08 +0100
parents f84115370178 (current diff) d8041d695d19 (diff)
children 7c36ec150036
files agent/src/share/classes/sun/jvm/hotspot/memory/FreeList.java graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java make/bsd/makefiles/gcc.make src/os/bsd/dtrace/hotspot.d src/os/bsd/dtrace/hotspot_jni.d src/os/bsd/dtrace/hs_private.d src/os/solaris/dtrace/hotspot.d src/os/solaris/dtrace/hotspot_jni.d src/os/solaris/dtrace/hs_private.d src/share/vm/code/nmethod.cpp src/share/vm/code/nmethod.hpp src/share/vm/opto/node.cpp src/share/vm/runtime/deoptimization.cpp src/share/vm/utilities/dtrace_usdt2_disabled.hpp
diffstat 1206 files changed, 76212 insertions(+), 14738 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Mar 11 15:34:06 2014 +0100
+++ b/.hgtags	Wed Mar 12 13:30:08 2014 +0100
@@ -404,3 +404,7 @@
 fca262db9c4309f99d2f5542ab0780e45c2f1578 jdk8-b120
 41f4cad94c581034d4c427d2aaabcc20f26342d0 hs25-b63
 b124e22eb772806c13d942cc110de38da0108147 graal-0.1
+ce2d7e46f3c7e41241f3b407705a4071323a11ab jdk9-b00
+050a626a88951140df874f7b163e304d07b6c296 jdk9-b01
+b188446de75bda5fc52d102cddf242c3ef5ecbdf jdk9-b02
+b2fee789d23f3cdabb3db4e51af43038e5692d3a jdk9-b03
--- a/THIRD_PARTY_README	Tue Mar 11 15:34:06 2014 +0100
+++ b/THIRD_PARTY_README	Wed Mar 12 13:30:08 2014 +0100
@@ -2,11 +2,12 @@
 -----------------------------
 
 %% This notice is provided with respect to ASM Bytecode Manipulation 
-Framework v3.1, which is included with JRE 7, JDK 7, and OpenJDK 7.
+Framework v5.0, which may be included with JRE 8, and JDK 8, and 
+OpenJDK 8.
 
 --- begin of LICENSE ---
 
-Copyright (c) 2000-2005 INRIA, France Telecom
+Copyright (c) 2000-2011 France Télécom
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -40,8 +41,41 @@
 
 --------------------------------------------------------------------------------
 
-%% This notice is provided with respect to CodeViewer 1.0, which is included 
-with JDK 7.
+%% This notice is provided with respect to BSDiff v4.3, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+Copyright 2003-2005 Colin Percival
+All rights reserved
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted providing that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to CodeViewer 1.0, which may be
+included with JDK 8.
 
 --- begin of LICENSE ---
 
@@ -81,8 +115,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Cryptix AES 3.2.0, which is
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to Cryptix AES 3.2.0, which may be
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -121,7 +155,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to CUP Parser Generator for 
-Java 0.10k, which is included with JRE 7, JDK 7, and OpenJDK 7.
+Java 0.10k, which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -148,7 +182,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to Document Object Model (DOM) Level 2
-& 3, which is included with JRE 7, JDK 7, and OpenJDK 7.
+& 3, which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -212,19 +246,52 @@
 
 -------------------------------------------------------------------------------
 
+%% This notice is provided with respect to Dynalink v0.5, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+Copyright (c) 2009-2013, Attila Szegedi
+
+All rights reserved.Redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:* Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.  * Redistributions in
+binary form must reproduce the above copyright notice,   this list of
+conditions and the following disclaimer in the documentation  and/or other
+materials provided with the distribution.  * Neither the name of Attila
+Szegedi nor the names of its contributors may be used to endorse or promote
+products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF SUCH DAMAGE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
 %% This notice is provided with respect to Elliptic Curve Cryptography, which 
-is included with JRE 7, JDK 7, and OpenJDK 7.
+may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 You are receiving a copy of the Elliptic Curve Cryptography library in source
-form with the JDK 7 source distribution and object code in the JRE 7 & JDK 7
-runtime.
-
-The terms of the Oracle license do NOT apply to the Elliptic Curve
-Cryptography library program; it is licensed under the following license,
-separately from the Oracle programs you receive. If you do not wish to install
-this program, you may delete the library named libsunec.so (on Solaris and
-Linux systems) or sunec.dll (on Windows systems) from the JRE bin directory
-reserved for native libraries.
+form with the JDK 8 and OpenJDK 8 source distributions, and as object code in
+the JRE 8 & JDK 8 runtimes.
+
+In the case of the JRE 8 & JDK 8 runtimes, the terms of the Oracle license do
+NOT apply to the Elliptic Curve Cryptography library; it is licensed under the
+following license, separately from Oracle's JDK & JRE.  If you do not wish to
+install the Elliptic Curve Cryptography library, you may delete the library
+named libsunec.so (on Solaris and Linux systems) or sunec.dll (on Windows
+systems) from the JRE bin directory reserved for native libraries.
+
 
 --- begin of LICENSE ---
 
@@ -735,13 +802,138 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to FontConfig 2.5, which is 
-included with JRE 7, JDK 7, and OpenJDK 7 source distributions on
+%% This notice is provided with respect to  ECMAScript Language
+Specification ECMA-262 Edition 5.1 which may be included with 
+JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+Copyright notice
+Copyright © 2011 Ecma International
+Ecma International
+Rue du Rhone 114
+CH-1204 Geneva
+Tel: +41 22 849 6000
+Fax: +41 22 849 6001
+Web: http://www.ecma-international.org
+
+This document and possible translations of it may be copied and furnished to
+others, and derivative works that comment on or otherwise explain it or assist
+in its implementation may be prepared, copied, published, and distributed, in
+whole or in part, without restriction of any kind, provided that the above
+copyright notice and this section are included on all such copies and derivative
+works. However, this document itself may not be modified in any way, including
+by removing the copyright notice or references to Ecma International, except as
+needed for the purpose of developing any document or deliverable produced by
+Ecma International (in which case the rules applied to copyrights must be
+followed) or as required to translate it into languages other than English. The
+limited permissions granted above are perpetual and will not be revoked by Ecma
+International or its successors or assigns. This document and the information
+contained herein is provided on an "AS IS" basis and ECMA INTERNATIONAL
+DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY
+WARRANTY THAT THE USE OF THE INFORMATION HEREIN WILL NOT INFRINGE ANY OWNERSHIP
+RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR
+PURPOSE." Software License
+
+All Software contained in this document ("Software)" is protected by copyright
+and is being made available under the "BSD License", included below. This
+Software may be subject to third party rights (rights from parties other than
+Ecma International), including patent rights, and no licenses under such third
+party rights are granted under this license even if the third party concerned is
+a member of Ecma International. SEE THE ECMA CODE OF CONDUCT IN PATENT MATTERS
+AVAILABLE AT http://www.ecma-international.org/memento/codeofconduct.htm FOR
+INFORMATION REGARDING THE LICENSING OF PATENT CLAIMS THAT ARE REQUIRED TO
+IMPLEMENT ECMA INTERNATIONAL STANDARDS*. Redistribution and use in source and
+binary forms, with or without modification, are permitted provided that the
+following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+3. Neither the name of the authors nor Ecma International may be used to endorse
+or promote products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE ECMA INTERNATIONAL "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+SHALL ECMA INTERNATIONAL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+OF SUCH DAMAGE.
+--- end of LICENSE ---
+
+%% This notice is provided with respect to Dynalink library which is included
+with the Nashorn technology.
+
+--- begin of LICENSE ---
+Copyright (c) 2009-2013, Attila Szegedi
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+* Neither the name of the copyright holder nor the names of
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+--- end of LICENSE ---
+
+%% This notice is provided with respect to Joni library which is included
+with the Nashorn technology.
+
+--- begin of LICENSE ---
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to FontConfig 2.5, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8 source distributions on
 Linux and Solaris.
 
 --- begin of LICENSE ---
 
-Copyright © 2001,2003 Keith Packard
+Copyright © 2001,2003 Keith Packard
 
 Permission to use, copy, modify, distribute, and sell this software and its
 documentation for any purpose is hereby granted without fee, provided that the
@@ -765,7 +957,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to IAIK PKCS#11 Wrapper, 
-which is included with JRE 7, JDK 7, and OpenJDK 7.
+which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -816,7 +1008,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to ICU4C 4.0.1 and ICU4J 4.4, which 
-is included with JRE 7, JDK 7, and OpenJDK 7.
+may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -852,8 +1044,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to IJG JPEG 6b, which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to IJG JPEG 6b, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -891,8 +1083,35 @@
 
 --------------------------------------------------------------------------------
 
-%% This notice is provided with respect to JOpt-Simple v3.0,  which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to Joni v1.1.9, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to JOpt-Simple v3.0,  which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -921,8 +1140,39 @@
 
 --------------------------------------------------------------------------------
 
+%% This notice is provided with respect to JSON, which may be included 
+with JRE 8 & JDK 8.
+
+--- begin of LICENSE ---
+
+Copyright (c) 2002 JSON.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+The Software shall be used for Good, not Evil.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
 %% This notice is provided with respect to Kerberos functionality, which 
-which is included with JRE 7, JDK 7, and OpenJDK 7.
+which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -934,7 +1184,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to Kerberos functionality from 
-FundsXpress, INC., which is included with JRE 7, JDK 7, and OpenJDK 7.
+FundsXpress, INC., which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -967,8 +1217,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Kronos OpenGL headers, which is 
-included with JDK 7 and OpenJDK 7 source distributions.
+%% This notice is provided with respect to Kronos OpenGL headers, which may be 
+included with JDK 8 and OpenJDK 8 source distributions.
 
 --- begin of LICENSE ---
 
@@ -1000,8 +1250,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to libpng 1.2.18, which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to libpng 1.5.4, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1014,8 +1264,10 @@
 If you modify libpng you may insert additional notices immediately following
 this sentence.
 
-libpng versions 1.2.6, August 15, 2004, through 1.2.18, May 15, 2007, are
-Copyright (c) 2004, 2006-2007 Glenn Randers-Pehrson, and are
+This code is released under the libpng license.
+
+libpng versions 1.2.6, August 15, 2004, through 1.5.4, July 7, 2011, are
+Copyright (c) 2004, 2006-2011 Glenn Randers-Pehrson, and are
 distributed according to the same disclaimer and license as libpng-1.2.5
 with the following individual added to the list of Contributing Authors
 
@@ -1112,14 +1364,14 @@
 
 Glenn Randers-Pehrson
 glennrp at users.sourceforge.net
-May 15, 2007
+July 7, 2011
 
 --- end of LICENSE ---
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to libungif 4.1.3, which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to libungif 4.1.3, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1147,8 +1399,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Little CMS 2.0, which is 
-included with OpenJDK 7.
+%% This notice is provided with respect to Little CMS 2.4, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1183,7 +1435,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to Mesa 3D Graphics Library v4.1,
-which is included with JRE 7, JDK 7, and OpenJDK 7 source distributions.
+which may be included with JRE 8, JDK 8, and OpenJDK 8 source distributions.
 
 --- begin of LICENSE ---
 
@@ -1213,8 +1465,402 @@
 
 -------------------------------------------------------------------------------
 
+%% This notice is provided with respect to Mozilla Network Security
+Services (NSS), which is supplied with the JDK test suite in the OpenJDK
+source code repository. It is licensed under Mozilla Public License (MPL),
+version 2.0.
+
+The NSS libraries are supplied in executable form, built from unmodified
+NSS source code labeled with the "NSS_3.13.1_RTM" release tag.
+
+The NSS source code is available in the OpenJDK source code repository at:
+    jdk/test/sun/security/pkcs11/nss/src
+
+The NSS libraries are available in the OpenJDK source code repository at:
+    jdk/test/sun/security/pkcs11/nss/lib
+
+--- begin of LICENSE ---
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+    means each individual or legal entity that creates, contributes to
+    the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+    means the combination of the Contributions of others (if any) used
+    by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+    means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+    means Source Code Form to which the initial Contributor has attached
+    the notice in Exhibit A, the Executable Form of such Source Code
+    Form, and Modifications of such Source Code Form, in each case
+    including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+    means
+
+    (a) that the initial Contributor has attached the notice described
+        in Exhibit B to the Covered Software; or
+
+    (b) that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the
+        terms of a Secondary License.
+
+1.6. "Executable Form"
+    means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+    means a work that combines Covered Software with other material, in 
+    a separate file or files, that is not Covered Software.
+
+1.8. "License"
+    means this document.
+
+1.9. "Licensable"
+    means having the right to grant, to the maximum extent possible,
+    whether at the time of the initial grant or subsequently, any and
+    all of the rights conveyed by this License.
+
+1.10. "Modifications"
+    means any of the following:
+
+    (a) any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered
+        Software; or
+
+    (b) any new file in Source Code Form that contains any Covered
+        Software.
+
+1.11. "Patent Claims" of a Contributor
+    means any patent claim(s), including without limitation, method,
+    process, and apparatus claims, in any patent Licensable by such
+    Contributor that would be infringed, but for the grant of the
+    License, by the making, using, selling, offering for sale, having
+    made, import, or transfer of either its Contributions or its
+    Contributor Version.
+
+1.12. "Secondary License"
+    means either the GNU General Public License, Version 2.0, the GNU
+    Lesser General Public License, Version 2.1, the GNU Affero General
+    Public License, Version 3.0, or any later versions of those
+    licenses.
+
+1.13. "Source Code Form"
+    means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+    means an individual or a legal entity exercising rights under this
+    License. For legal entities, "You" includes any entity that
+    controls, is controlled by, or is under common control with You. For
+    purposes of this definition, "control" means (a) the power, direct
+    or indirect, to cause the direction or management of such entity,
+    whether by contract or otherwise, or (b) ownership of more than
+    fifty percent (50%) of the outstanding shares or beneficial
+    ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+    Licensable by such Contributor to use, reproduce, make available,
+    modify, display, perform, distribute, and otherwise exploit its
+    Contributions, either on an unmodified basis, with Modifications, or
+    as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+    for sale, have made, import, and otherwise transfer either its
+    Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+    or
+
+(b) for infringements caused by: (i) Your and any other third party's
+    modifications of Covered Software, or (ii) the combination of its
+    Contributions with other software (except as part of its Contributor
+    Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+    its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+    Form, as described in Section 3.1, and You must inform recipients of
+    the Executable Form how they can obtain a copy of such Source Code
+    Form by reasonable means in a timely manner, at a charge no more
+    than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+    License, or sublicense it under different terms, provided that the
+    license for the Executable Form does not attempt to limit or alter
+    the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+*                                                                      *
+*  6. Disclaimer of Warranty                                           *
+*  -------------------------                                           *
+*                                                                      *
+*  Covered Software is provided under this License on an "as is"       *
+*  basis, without warranty of any kind, either expressed, implied, or  *
+*  statutory, including, without limitation, warranties that the       *
+*  Covered Software is free of defects, merchantable, fit for a        *
+*  particular purpose or non-infringing. The entire risk as to the     *
+*  quality and performance of the Covered Software is with You.        *
+*  Should any Covered Software prove defective in any respect, You     *
+*  (not any Contributor) assume the cost of any necessary servicing,   *
+*  repair, or correction. This disclaimer of warranty constitutes an   *
+*  essential part of this License. No use of any Covered Software is   *
+*  authorized under this License except under this disclaimer.         *
+*                                                                      *
+************************************************************************
+
+************************************************************************
+*                                                                      *
+*  7. Limitation of Liability                                          *
+*  --------------------------                                          *
+*                                                                      *
+*  Under no circumstances and under no legal theory, whether tort      *
+*  (including negligence), contract, or otherwise, shall any           *
+*  Contributor, or anyone who distributes Covered Software as          *
+*  permitted above, be liable to You for any direct, indirect,         *
+*  special, incidental, or consequential damages of any character      *
+*  including, without limitation, damages for lost profits, loss of    *
+*  goodwill, work stoppage, computer failure or malfunction, or any    *
+*  and all other commercial damages or losses, even if such party      *
+*  shall have been informed of the possibility of such damages. This   *
+*  limitation of liability shall not apply to liability for death or   *
+*  personal injury resulting from such party's negligence to the       *
+*  extent applicable law prohibits such limitation. Some               *
+*  jurisdictions do not allow the exclusion or limitation of           *
+*  incidental or consequential damages, so this exclusion and          *
+*  limitation may not apply to You.                                    *
+*                                                                      *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+  This Source Code Form is subject to the terms of the Mozilla Public
+  License, v. 2.0. If a copy of the MPL was not distributed with this
+  file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+  This Source Code Form is "Incompatible With Secondary Licenses", as
+  defined by the Mozilla Public License, v. 2.0.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
 %% This notice is provided with respect to PC/SC Lite for Suse Linux v.1.1.1,
-which is included with JRE 7, JDK 7, and OpenJDK 7 on Linux and Solaris.
+which may be included with JRE 8, JDK 8, and OpenJDK 8 on Linux and Solaris.
 
 --- begin of LICENSE ---
 
@@ -1257,8 +1903,30 @@
 
 -------------------------------------------------------------------------------
 
+%% This notice is provided with respect to PorterStemmer v4, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+See: http://tartarus.org/~martin/PorterStemmer
+
+The software is completely free for any purpose, unless notes at the head of
+the program text indicates otherwise (which is rare). In any case, the notes
+about licensing are never more restrictive than the BSD License.
+
+In every case where the software is not written by me (Martin Porter), this
+licensing arrangement has been endorsed by the contributor, and it is
+therefore unnecessary to ask the contributor again to confirm it.
+
+I have not asked any contributors (or their employers, if they have them) for
+proofs that they have the right to distribute their software in this way.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
 %% This notice is provided with respect to Relax NG Object/Parser v.20050510,
-which is included with JRE 7, JDK 7, and OpenJDK 7.
+which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1285,8 +1953,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to RelaxNGCC v1.12, which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to RelaxNGCC v1.12, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1335,487 +2003,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Mozilla Rhino v1.7R3, which 
-is included with JRE 7, JDK 7, and OpenJDK 7
-
---- begin of LICENSE ---
-
-                          MOZILLA PUBLIC LICENSE
-                                Version 1.1
-
-                              ---------------
-
-1. Definitions.
-
-     1.0.1. "Commercial Use" means distribution or otherwise making the
-     Covered Code available to a third party.
-
-     1.1. "Contributor" means each entity that creates or contributes to
-     the creation of Modifications.
-
-     1.2. "Contributor Version" means the combination of the Original
-     Code, prior Modifications used by a Contributor, and the Modifications
-     made by that particular Contributor.
-
-     1.3. "Covered Code" means the Original Code or Modifications or the
-     combination of the Original Code and Modifications, in each case
-     including portions thereof.
-
-     1.4. "Electronic Distribution Mechanism" means a mechanism generally
-     accepted in the software development community for the electronic
-     transfer of data.
-
-     1.5. "Executable" means Covered Code in any form other than Source
-     Code.
-
-     1.6. "Initial Developer" means the individual or entity identified
-     as the Initial Developer in the Source Code notice required by Exhibit
-     A.
-
-     1.7. "Larger Work" means a work which combines Covered Code or
-     portions thereof with code not governed by the terms of this License.
-
-     1.8. "License" means this document.
-
-     1.8.1. "Licensable" means having the right to grant, to the maximum
-     extent possible, whether at the time of the initial grant or
-     subsequently acquired, any and all of the rights conveyed herein.
-
-     1.9. "Modifications" means any addition to or deletion from the
-     substance or structure of either the Original Code or any previous
-     Modifications. When Covered Code is released as a series of files, a
-     Modification is:
-          A. Any addition to or deletion from the contents of a file
-          containing Original Code or previous Modifications.
-
-          B. Any new file that contains any part of the Original Code or
-          previous Modifications.
-
-     1.10. "Original Code" means Source Code of computer software code
-     which is described in the Source Code notice required by Exhibit A as
-     Original Code, and which, at the time of its release under this
-     License is not already Covered Code governed by this License.
-
-     1.10.1. "Patent Claims" means any patent claim(s), now owned or
-     hereafter acquired, including without limitation,  method, process,
-     and apparatus claims, in any patent Licensable by grantor.
-
-     1.11. "Source Code" means the preferred form of the Covered Code for
-     making modifications to it, including all modules it contains, plus
-     any associated interface definition files, scripts used to control
-     compilation and installation of an Executable, or source code
-     differential comparisons against either the Original Code or another
-     well known, available Covered Code of the Contributor's choice. The
-     Source Code can be in a compressed or archival form, provided the
-     appropriate decompression or de-archiving software is widely available
-     for no charge.
-
-     1.12. "You" (or "Your")  means an individual or a legal entity
-     exercising rights under, and complying with all of the terms of, this
-     License or a future version of this License issued under Section 6.1.
-     For legal entities, "You" includes any entity which controls, is
-     controlled by, or is under common control with You. For purposes of
-     this definition, "control" means (a) the power, direct or indirect,
-     to cause the direction or management of such entity, whether by
-     contract or otherwise, or (b) ownership of more than fifty percent
-     (50%) of the outstanding shares or beneficial ownership of such
-     entity.
-
-2. Source Code License.
-
-     2.1. The Initial Developer Grant.
-     The Initial Developer hereby grants You a world-wide, royalty-free,
-     non-exclusive license, subject to third party intellectual property
-     claims:
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Initial Developer to use, reproduce,
-          modify, display, perform, sublicense and distribute the Original
-          Code (or portions thereof) with or without Modifications, and/or
-          as part of a Larger Work; and
-
-          (b) under Patents Claims infringed by the making, using or
-          selling of Original Code, to make, have made, use, practice,
-          sell, and offer for sale, and/or otherwise dispose of the
-          Original Code (or portions thereof).
-
-          (c) the licenses granted in this Section 2.1(a) and (b) are
-          effective on the date Initial Developer first distributes
-          Original Code under the terms of this License.
-
-          (d) Notwithstanding Section 2.1(b) above, no patent license is
-          granted: 1) for code that You delete from the Original Code; 2)
-          separate from the Original Code;  or 3) for infringements caused
-          by: i) the modification of the Original Code or ii) the
-          combination of the Original Code with other software or devices.
-
-     2.2. Contributor Grant.
-     Subject to third party intellectual property claims, each Contributor
-     hereby grants You a world-wide, royalty-free, non-exclusive license
-
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Contributor, to use, reproduce, modify,
-          display, perform, sublicense and distribute the Modifications
-          created by such Contributor (or portions thereof) either on an
-          unmodified basis, with other Modifications, as Covered Code
-          and/or as part of a Larger Work; and
-
-          (b) under Patent Claims infringed by the making, using, or
-          selling of  Modifications made by that Contributor either alone
-          and/or in combination with its Contributor Version (or portions
-          of such combination), to make, use, sell, offer for sale, have
-          made, and/or otherwise dispose of: 1) Modifications made by that
-          Contributor (or portions thereof); and 2) the combination of
-          Modifications made by that Contributor with its Contributor
-          Version (or portions of such combination).
-
-          (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
-          effective on the date Contributor first makes Commercial Use of
-          the Covered Code.
-
-          (d)    Notwithstanding Section 2.2(b) above, no patent license is
-          granted: 1) for any code that Contributor has deleted from the
-          Contributor Version; 2)  separate from the Contributor Version;
-          3)  for infringements caused by: i) third party modifications of
-          Contributor Version or ii)  the combination of Modifications made
-          by that Contributor with other software  (except as part of the
-          Contributor Version) or other devices; or 4) under Patent Claims
-          infringed by Covered Code in the absence of Modifications made by
-          that Contributor.
-
-3. Distribution Obligations.
-
-     3.1. Application of License.
-     The Modifications which You create or to which You contribute are
-     governed by the terms of this License, including without limitation
-     Section 2.2. The Source Code version of Covered Code may be
-     distributed only under the terms of this License or a future version
-     of this License released under Section 6.1, and You must include a
-     copy of this License with every copy of the Source Code You
-     distribute. You may not offer or impose any terms on any Source Code
-     version that alters or restricts the applicable version of this
-     License or the recipients' rights hereunder. However, You may include
-     an additional document offering the additional rights described in
-     Section 3.5.
-
-     3.2. Availability of Source Code.
-     Any Modification which You create or to which You contribute must be
-     made available in Source Code form under the terms of this License
-     either on the same media as an Executable version or via an accepted
-     Electronic Distribution Mechanism to anyone to whom you made an
-     Executable version available; and if made available via Electronic
-     Distribution Mechanism, must remain available for at least twelve (12)
-     months after the date it initially became available, or at least six
-     (6) months after a subsequent version of that particular Modification
-     has been made available to such recipients. You are responsible for
-     ensuring that the Source Code version remains available even if the
-     Electronic Distribution Mechanism is maintained by a third party.
-
-     3.3. Description of Modifications.
-     You must cause all Covered Code to which You contribute to contain a
-     file documenting the changes You made to create that Covered Code and
-     the date of any change. You must include a prominent statement that
-     the Modification is derived, directly or indirectly, from Original
-     Code provided by the Initial Developer and including the name of the
-     Initial Developer in (a) the Source Code, and (b) in any notice in an
-     Executable version or related documentation in which You describe the
-     origin or ownership of the Covered Code.
-
-     3.4. Intellectual Property Matters
-          (a) Third Party Claims.
-          If Contributor has knowledge that a license under a third party's
-          intellectual property rights is required to exercise the rights
-          granted by such Contributor under Sections 2.1 or 2.2,
-          Contributor must include a text file with the Source Code
-          distribution titled "LEGAL" which describes the claim and the
-          party making the claim in sufficient detail that a recipient will
-          know whom to contact. If Contributor obtains such knowledge after
-          the Modification is made available as described in Section 3.2,
-          Contributor shall promptly modify the LEGAL file in all copies
-          Contributor makes available thereafter and shall take other steps
-          (such as notifying appropriate mailing lists or newsgroups)
-          reasonably calculated to inform those who received the Covered
-          Code that new knowledge has been obtained.
-
-          (b) Contributor APIs.
-          If Contributor's Modifications include an application programming
-          interface and Contributor has knowledge of patent licenses which
-          are reasonably necessary to implement that API, Contributor must
-          also include this information in the LEGAL file.
-
-               (c)    Representations.
-          Contributor represents that, except as disclosed pursuant to
-          Section 3.4(a) above, Contributor believes that Contributor's
-          Modifications are Contributor's original creation(s) and/or
-          Contributor has sufficient rights to grant the rights conveyed by
-          this License.
-
-     3.5. Required Notices.
-     You must duplicate the notice in Exhibit A in each file of the Source
-     Code.  If it is not possible to put such notice in a particular Source
-     Code file due to its structure, then You must include such notice in a
-     location (such as a relevant directory) where a user would be likely
-     to look for such a notice.  If You created one or more Modification(s)
-     You may add your name as a Contributor to the notice described in
-     Exhibit A.  You must also duplicate this License in any documentation
-     for the Source Code where You describe recipients' rights or ownership
-     rights relating to Covered Code.  You may choose to offer, and to
-     charge a fee for, warranty, support, indemnity or liability
-     obligations to one or more recipients of Covered Code. However, You
-     may do so only on Your own behalf, and not on behalf of the Initial
-     Developer or any Contributor. You must make it absolutely clear than
-     any such warranty, support, indemnity or liability obligation is
-     offered by You alone, and You hereby agree to indemnify the Initial
-     Developer and every Contributor for any liability incurred by the
-     Initial Developer or such Contributor as a result of warranty,
-     support, indemnity or liability terms You offer.
-
-     3.6. Distribution of Executable Versions.
-     You may distribute Covered Code in Executable form only if the
-     requirements of Section 3.1-3.5 have been met for that Covered Code,
-     and if You include a notice stating that the Source Code version of
-     the Covered Code is available under the terms of this License,
-     including a description of how and where You have fulfilled the
-     obligations of Section 3.2. The notice must be conspicuously included
-     in any notice in an Executable version, related documentation or
-     collateral in which You describe recipients' rights relating to the
-     Covered Code. You may distribute the Executable version of Covered
-     Code or ownership rights under a license of Your choice, which may
-     contain terms different from this License, provided that You are in
-     compliance with the terms of this License and that the license for the
-     Executable version does not attempt to limit or alter the recipient's
-     rights in the Source Code version from the rights set forth in this
-     License. If You distribute the Executable version under a different
-     license You must make it absolutely clear that any terms which differ
-     from this License are offered by You alone, not by the Initial
-     Developer or any Contributor. You hereby agree to indemnify the
-     Initial Developer and every Contributor for any liability incurred by
-     the Initial Developer or such Contributor as a result of any such
-     terms You offer.
-
-     3.7. Larger Works.
-     You may create a Larger Work by combining Covered Code with other code
-     not governed by the terms of this License and distribute the Larger
-     Work as a single product. In such a case, You must make sure the
-     requirements of this License are fulfilled for the Covered Code.
-
-4. Inability to Comply Due to Statute or Regulation.
-
-     If it is impossible for You to comply with any of the terms of this
-     License with respect to some or all of the Covered Code due to
-     statute, judicial order, or regulation then You must: (a) comply with
-     the terms of this License to the maximum extent possible; and (b)
-     describe the limitations and the code they affect. Such description
-     must be included in the LEGAL file described in Section 3.4 and must
-     be included with all distributions of the Source Code. Except to the
-     extent prohibited by statute or regulation, such description must be
-     sufficiently detailed for a recipient of ordinary skill to be able to
-     understand it.
-
-5. Application of this License.
-
-     This License applies to code to which the Initial Developer has
-     attached the notice in Exhibit A and to related Covered Code.
-
-6. Versions of the License.
-
-     6.1. New Versions.
-     Netscape Communications Corporation ("Netscape") may publish revised
-     and/or new versions of the License from time to time. Each version
-     will be given a distinguishing version number.
-
-     6.2. Effect of New Versions.
-     Once Covered Code has been published under a particular version of the
-     License, You may always continue to use it under the terms of that
-     version. You may also choose to use such Covered Code under the terms
-     of any subsequent version of the License published by Netscape. No one
-     other than Netscape has the right to modify the terms applicable to
-     Covered Code created under this License.
-
-     6.3. Derivative Works.
-     If You create or use a modified version of this License (which you may
-     only do in order to apply it to code which is not already Covered Code
-     governed by this License), You must (a) rename Your license so that
-     the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
-     "MPL", "NPL" or any confusingly similar phrase do not appear in your
-     license (except to note that your license differs from this License)
-     and (b) otherwise make it clear that Your version of the license
-     contains terms which differ from the Mozilla Public License and
-     Netscape Public License. (Filling in the name of the Initial
-     Developer, Original Code or Contributor in the notice described in
-     Exhibit A shall not of themselves be deemed to be modifications of
-     this License.)
-
-7. DISCLAIMER OF WARRANTY.
-
-     COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-     WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
-     WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
-     DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
-     THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
-     IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
-     YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
-     COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
-     OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
-     ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-8. TERMINATION.
-
-     8.1.  This License and the rights granted hereunder will terminate
-     automatically if You fail to comply with terms herein and fail to cure
-     such breach within 30 days of becoming aware of the breach. All
-     sublicenses to the Covered Code which are properly granted shall
-     survive any termination of this License. Provisions which, by their
-     nature, must remain in effect beyond the termination of this License
-     shall survive.
-
-     8.2.  If You initiate litigation by asserting a patent infringement
-     claim (excluding declatory judgment actions) against Initial Developer
-     or a Contributor (the Initial Developer or Contributor against whom
-     You file such action is referred to as "Participant")  alleging that:
-
-     (a)  such Participant's Contributor Version directly or indirectly
-     infringes any patent, then any and all rights granted by such
-     Participant to You under Sections 2.1 and/or 2.2 of this License
-     shall, upon 60 days notice from Participant terminate prospectively,
-     unless if within 60 days after receipt of notice You either: (i)
-     agree in writing to pay Participant a mutually agreeable reasonable
-     royalty for Your past and future use of Modifications made by such
-     Participant, or (ii) withdraw Your litigation claim with respect to
-     the Contributor Version against such Participant.  If within 60 days
-     of notice, a reasonable royalty and payment arrangement are not
-     mutually agreed upon in writing by the parties or the litigation claim
-     is not withdrawn, the rights granted by Participant to You under
-     Sections 2.1 and/or 2.2 automatically terminate at the expiration of
-     the 60 day notice period specified above.
-
-     (b)  any software, hardware, or device, other than such Participant's
-     Contributor Version, directly or indirectly infringes any patent, then
-     any rights granted to You by such Participant under Sections 2.1(b)
-     and 2.2(b) are revoked effective as of the date You first made, used,
-     sold, distributed, or had made, Modifications made by that
-     Participant.
-
-     8.3.  If You assert a patent infringement claim against Participant
-     alleging that such Participant's Contributor Version directly or
-     indirectly infringes any patent where such claim is resolved (such as
-     by license or settlement) prior to the initiation of patent
-     infringement litigation, then the reasonable value of the licenses
-     granted by such Participant under Sections 2.1 or 2.2 shall be taken
-     into account in determining the amount or value of any payment or
-     license.
-
-     8.4.  In the event of termination under Sections 8.1 or 8.2 above,
-     all end user license agreements (excluding distributors and resellers)
-     which have been validly granted by You or any distributor hereunder
-     prior to termination shall survive termination.
-
-9. LIMITATION OF LIABILITY.
-
-     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-     (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
-     DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
-     OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
-     ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
-     CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
-     WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
-     COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
-     INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
-     LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
-     RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
-     PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
-     EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
-     THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-10. U.S. GOVERNMENT END USERS.
-
-     The Covered Code is a "commercial item," as that term is defined in
-     48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-     software" and "commercial computer software documentation," as such
-     terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
-     C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
-     all U.S. Government End Users acquire Covered Code with only those
-     rights set forth herein.
-
-11. MISCELLANEOUS.
-
-     This License represents the complete agreement concerning subject
-     matter hereof. If any provision of this License is held to be
-     unenforceable, such provision shall be reformed only to the extent
-     necessary to make it enforceable. This License shall be governed by
-     California law provisions (except to the extent applicable law, if
-     any, provides otherwise), excluding its conflict-of-law provisions.
-     With respect to disputes in which at least one party is a citizen of,
-     or an entity chartered or registered to do business in the United
-     States of America, any litigation relating to this License shall be
-     subject to the jurisdiction of the Federal Courts of the Northern
-     District of California, with venue lying in Santa Clara County,
-     California, with the losing party responsible for costs, including
-     without limitation, court costs and reasonable attorneys' fees and
-     expenses. The application of the United Nations Convention on
-     Contracts for the International Sale of Goods is expressly excluded.
-     Any law or regulation which provides that the language of a contract
-     shall be construed against the drafter shall not apply to this
-     License.
-
-12. RESPONSIBILITY FOR CLAIMS.
-
-     As between Initial Developer and the Contributors, each party is
-     responsible for claims and damages arising, directly or indirectly,
-     out of its utilization of rights under this License and You agree to
-     work with Initial Developer and Contributors to distribute such
-     responsibility on an equitable basis. Nothing herein is intended or
-     shall be deemed to constitute any admission of liability.
-
-13. MULTIPLE-LICENSED CODE.
-
-     Initial Developer may designate portions of the Covered Code as
-     "Multiple-Licensed".  "Multiple-Licensed" means that the Initial
-     Developer permits you to utilize portions of the Covered Code under
-     Your choice of the NPL or the alternative licenses, if any, specified
-     by the Initial Developer in the file described in Exhibit A.
-
-EXHIBIT A - Mozilla Public License.
-
-     ``The contents of this file are subject to the Mozilla Public License
-     Version 1.1 (the "License"); you may not use this file except in
-     compliance with the License. You may obtain a copy of the License at
-     http://www.mozilla.org/MPL/
-
-     Software distributed under the License is distributed on an "AS IS"
-     basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-     License for the specific language governing rights and limitations
-     under the License.
-
-     The Original Code is ______________________________________.
-
-     The Initial Developer of the Original Code is ________________________.
-     Portions created by ______________________ are Copyright (C) ______
-     _______________________. All Rights Reserved.
-
-     Contributor(s): ______________________________________.
-
-     Alternatively, the contents of this file may be used under the terms
-     of the _____ license (the  "[___] License"), in which case the
-     provisions of [______] License are applicable instead of those
-     above.  If you wish to allow use of your version of this file only
-     under the terms of the [____] License and not to allow others to use
-     your version of this file under the MPL, indicate your decision by
-     deleting  the provisions above and replace  them with the notice and
-     other provisions required by the [___] License.  If you do not delete
-     the provisions above, a recipient may use your version of this file
-     under either the MPL or the [___] License."
-
-     [NOTE: The text of this Exhibit A may differ slightly from the text of
-     the notices in the Source Code files of the Original Code. You should
-     use the text of this Exhibit A rather than the text found in the
-     Original Code Source Code for Your Modifications.]
-
---- end of LICENSE ---
-
--------------------------------------------------------------------------------
-
-%% This notice is provided with respect to SAX 2.0.1, which is included 
-with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to SAX 2.0.1, which may be included 
+with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1876,8 +2065,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to SoftFloat version 2b, which is 
-included with JRE 7, JDK 7, and OpenJDK 7 on Linux/ARM.
+%% This notice is provided with respect to SoftFloat version 2b, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8 on Linux/ARM.
 
 --- begin of LICENSE ---
 
@@ -1909,12 +2098,41 @@
 
 -------------------------------------------------------------------------------
 
+%% This notice is provided with respect to Sparkle 1.5,
+which may be included with JRE 8 on Mac OS X.
+
+--- begin of LICENSE ---
+
+Copyright (c) 2012 Sparkle.org and Andy Matuschak
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
 %% Portions licensed from Taligent, Inc.
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Thai Dictionary, which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to Thai Dictionary, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1947,8 +2165,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Unicode 6.0.0, CLDR v1.4.1, & CLDR
-v1.9, which is included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to Unicode 6.2.0 & CLDR 21.0.1
+which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1959,7 +2177,7 @@
 Trademark Usage Policy.
 
 A. Unicode Copyright.
-   1. Copyright © 1991-2011 Unicode, Inc. All rights reserved.
+   1. Copyright © 1991-2013 Unicode, Inc. All rights reserved.
 
    2. Certain documents and files on this website contain a legend indicating
       that "Modification is permitted." Any person is hereby authorized,
@@ -2094,7 +2312,7 @@
 
 COPYRIGHT AND PERMISSION NOTICE
 
-Copyright © 1991-2011 Unicode, Inc. All rights reserved. Distributed under the
+Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the
 Terms of Use in http://www.unicode.org/copyright.html.
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -2134,8 +2352,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to UPX v3.01, which is included 
-with JRE 7 on Windows.
+%% This notice is provided with respect to UPX v3.01, which may be included 
+with JRE 8 on Windows.
 
 --- begin of LICENSE ---
 
@@ -2274,7 +2492,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to Xfree86-VidMode Extension 1.0,
-which is included with JRE 7, JDK 7, and OpenJDK 7 on Linux and Solaris.
+which may be included with JRE 8, JDK 8, and OpenJDK 8 on Linux and Solaris.
 
 --- begin of LICENSE ---
 
@@ -2326,8 +2544,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to X Window System 6.8.2, which is 
-included with JRE 7, JDK 7, and OpenJDK 7 on Linux and Solaris.
+%% This notice is provided with respect to X Window System 6.8.2, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8 on Linux and Solaris.
 
 --- begin of LICENSE ---
 
@@ -3131,12 +3349,12 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to zlib v1.2.3, which is included 
-with JRE 7, JDK 7, and OpenJDK 7
+%% This notice is provided with respect to zlib v1.2.5, which may be included 
+with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
-  version 1.2.3, July 18th, 2005
+  version 1.2.5, July 18th, 2005
 
   Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler
 
@@ -3163,16 +3381,18 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to the following which is 
-included with JRE 7, JDK 7, and OpenJDK 7, except where noted:
-
-  Apache Derby 10.8.1.2        [included with JDK 7 only]
+%% This notice is provided with respect to the following which may be 
+included with JRE 8, JDK 8, and OpenJDK 8, except where noted:
+
+  Apache Commons Math 2.2
+  Apache Derby 10.10.1.2        [included with JDK 8]
   Apache Jakarta BCEL 5.2 
   Apache Jakarta Regexp 1.4 
-  Apache Santuario XMLSec-Java 1.4.2
+  Apache Santuario XML Security for Java 1.5.4
   Apache Xalan-Java 2.7.1 
-  Apache Xerces2 Java 2.10.0 
+  Apache Xerces Java 2.10.0 
   Apache XML Resolver 1.1 
+  Dynalink 0.5
 
 
 --- begin of LICENSE ---
--- a/agent/make/Makefile	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/make/Makefile	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/agent/make/mkinstall	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/make/mkinstall	Wed Mar 12 13:30:08 2014 +0100
@@ -27,7 +27,9 @@
 
 cp ../src/os/solaris/proc/amd64/libsaproc.so $SA_NAME/solaris/amd64
 cp ../src/os/solaris/proc/sparc/libsaproc.so $SA_NAME/solaris/sparc
+cp ../src/os/solaris/proc/sparc/libsaproc_audit.so $SA_NAME/solaris/sparc
 cp ../src/os/solaris/proc/sparcv9/libsaproc.so $SA_NAME/solaris/sparcv9
+cp ../src/os/solaris/proc/sparcv9/libsaproc_audit.so $SA_NAME/solaris/sparcv9
 cp ../src/os/solaris/proc/i386/libsaproc.so $SA_NAME/solaris/i386
 cp ../src/os/linux/i386/libsaproc.so $SA_NAME/linux/i386
 cp ../src/os/linux/ia64/libsaproc.so $SA_NAME/linux/ia64
--- a/agent/make/saenv.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/make/saenv.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -48,16 +48,17 @@
      CPU=i386
    fi
 else
-   # configure audit helper library if SA_ALTROOT is set
-   if [ -n "$SA_ALTROOT" ]; then
-     LD_AUDIT_32=$STARTDIR/../src/os/solaris/proc/`uname -p`/libsaproc_audit.so
-     export LD_AUDIT_32
-     if [ ! -f $LD_AUDIT_32 ]; then
-       echo "SA_ALTROOT is set and can't find libsaproc_audit.so."
-       echo "Make sure to build it with 'make natives'."
-       exit 1
-     fi
+   # configure audit helper library for solaris
+   LD_AUDIT_32=$STARTDIR/../src/os/solaris/proc/`uname -p`/libsaproc_audit.so
+   if [ ! -f $LD_AUDIT_32 ]; then
+     LD_AUDIT_32=$STARTDIR/solaris/`uname -p`/libsaproc_audit.so
+   fi  
+   if [ ! -f $LD_AUDIT_32 ]; then
+      echo "Can't find libsaproc_audit.so."
+      echo "Make sure to build it with 'make natives'."
+      exit 1
    fi
+   export LD_AUDIT_32
    SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/`uname -p`:$STARTDIR/solaris/`uname -p`
    OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger"
    CPU=sparc
--- a/agent/make/saenv64.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/make/saenv64.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -43,16 +43,19 @@
   fi
 fi
 
-# configure audit helper library if SA_ALTROOT is set
-if [ -n "$SA_ALTROOT" ]; then
-  LD_AUDIT_64=$STARTDIR/../src/os/solaris/proc/$CPU/libsaproc_audit.so
-  export LD_AUDIT_64
-  if [ ! -f $LD_AUDIT_64 ]; then
-      echo "SA_ALTROOT is set and can't find libsaproc_audit.so."
-      echo "Make sure to build it with 'make natives'."
-      exit 1
-  fi
+# configure audit helper library
+LD_AUDIT_64=$STARTDIR/../src/os/solaris/proc/$CPU/libsaproc_audit.so
+if [ ! -f $LD_AUDIT_64 ]; then
+  LD_AUDIT_64=$STARTDIR/solaris/$CPU/libsaproc_audit.so
 fi
+
+if [ ! -f $LD_AUDIT_64 ]; then
+   echo "Can't find libsaproc_audit.so."
+   echo "Make sure to build it with 'make natives'."
+   exit 1
+fi
+
+export LD_AUDIT_64
 SA_LIBPATH=$STARTDIR/../src/os/solaris/proc/$CPU:$STARTDIR/solaris/$CPU
 
 OPTIONS="-Dsa.library.path=$SA_LIBPATH -Dsun.jvm.hotspot.debugger.useProcDebugger"
--- a/agent/src/os/linux/libproc.h	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/os/linux/libproc.h	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -80,7 +80,7 @@
 *************************************************************************************/
 
 
-#if defined(sparc)  || defined(sparcv9)
+#if defined(sparc) || defined(sparcv9) || defined(ppc64)
 #define user_regs_struct  pt_regs
 #endif
 
--- a/agent/src/os/linux/libproc_impl.c	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/os/linux/libproc_impl.c	Wed Mar 12 13:30:08 2014 +0100
@@ -29,54 +29,51 @@
 #include <thread_db.h>
 #include "libproc_impl.h"
 
-static const char* alt_root = NULL;
-static int alt_root_len = -1;
-
 #define SA_ALTROOT "SA_ALTROOT"
 
-static void init_alt_root() {
-   if (alt_root_len == -1) {
-      alt_root = getenv(SA_ALTROOT);
-      if (alt_root) {
-         alt_root_len = strlen(alt_root);
-      } else {
-         alt_root_len = 0;
-      }
-   }
-}
-
 int pathmap_open(const char* name) {
-   int fd;
-   char alt_path[PATH_MAX + 1];
+  static const char *alt_root = NULL;
+  static int alt_root_initialized = 0;
 
-   init_alt_root();
+  int fd;
+  char alt_path[PATH_MAX + 1], *alt_path_end;
+  const char *s;
 
-   if (alt_root_len > 0) {
-      strcpy(alt_path, alt_root);
-      strcat(alt_path, name);
-      fd = open(alt_path, O_RDONLY);
-      if (fd >= 0) {
-         print_debug("path %s substituted for %s\n", alt_path, name);
-         return fd;
-      }
+  if (!alt_root_initialized) {
+    alt_root_initialized = -1;
+    alt_root = getenv(SA_ALTROOT);
+  }
+
+  if (alt_root == NULL) {
+    return open(name, O_RDONLY);
+  }
+
+  strcpy(alt_path, alt_root);
+  alt_path_end = alt_path + strlen(alt_path);
 
-      if (strrchr(name, '/')) {
-         strcpy(alt_path, alt_root);
-         strcat(alt_path, strrchr(name, '/'));
-         fd = open(alt_path, O_RDONLY);
-         if (fd >= 0) {
-            print_debug("path %s substituted for %s\n", alt_path, name);
-            return fd;
-         }
-      }
-   } else {
-      fd = open(name, O_RDONLY);
-      if (fd >= 0) {
-         return fd;
-      }
-   }
+  // Strip path items one by one and try to open file with alt_root prepended
+  s = name;
+  while (1) {
+    strcat(alt_path, s);
+    s += 1;
+
+    fd = open(alt_path, O_RDONLY);
+    if (fd >= 0) {
+      print_debug("path %s substituted for %s\n", alt_path, name);
+      return fd;
+    }
 
-   return -1;
+    // Linker always put full path to solib to process, so we can rely
+    // on presence of /. If slash is not present, it means, that SOlib doesn't
+    // physically exist (e.g. linux-gate.so) and we fail opening it anyway
+    if ((s = strchr(s, '/')) == NULL) {
+      break;
+    }
+
+    *alt_path_end = 0;
+  }
+
+  return -1;
 }
 
 static bool _libsaproc_debug;
--- a/agent/src/os/linux/salibelf.c	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/os/linux/salibelf.c	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/os/linux/symtab.c	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/os/linux/symtab.c	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -214,8 +214,10 @@
                                 + 2);
   strcpy(debug_pathname, name);
   char *last_slash = strrchr(debug_pathname, '/');
-  if (last_slash == NULL)
+  if (last_slash == NULL) {
+    free(debug_pathname);
     return -1;
+  }
 
   /* Look in the same directory as the object.  */
   strcpy(last_slash+1, debug_filename);
--- a/agent/src/os/solaris/proc/saproc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/os/solaris/proc/saproc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/os/win32/windbg/sawindbg.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/os/win32/windbg/sawindbg.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/LinuxVtblAccess.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/LinuxVtblAccess.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciEnv.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -95,9 +95,15 @@
     int entryBci = task.osrBci();
     int compLevel = task.compLevel();
     Klass holder = method.getMethodHolder();
-    out.println("compile " + holder.getName().asString() + " " +
-                OopUtilities.escapeString(method.getName().asString()) + " " +
-                method.getSignature().asString() + " " +
-                entryBci + " " + compLevel);
+    out.print("compile " + holder.getName().asString() + " " +
+              OopUtilities.escapeString(method.getName().asString()) + " " +
+              method.getSignature().asString() + " " +
+              entryBci + " " + compLevel);
+    Compile compiler = compilerData();
+    if (compiler != null) {
+      // Dump inlining data.
+      compiler.dumpInlineData(out);
+    }
+    out.println();
   }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethod.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/ci/ciMethod.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/compiler/CompileTask.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebugger.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdDebugger.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java	Wed Mar 12 13:30:08 2014 +0100
@@ -55,31 +55,21 @@
     if (pc == null) {
       return null;
     }
+
+    /* Typically we have about ten loaded objects here. So no reason to do
+      sort/binary search here. Linear search gives us acceptable performance.*/
+
     List objs = getLoadObjectList();
-    Object[] arr = objs.toArray();
-    // load objects are sorted by base address, do binary search
-    int mid  = -1;
-    int low  = 0;
-    int high = arr.length - 1;
 
-    while (low <= high) {
-       mid = (low + high) >> 1;
-       LoadObject midVal = (LoadObject) arr[mid];
-       long cmp = pc.minus(midVal.getBase());
-       if (cmp < 0) {
-          high = mid - 1;
-       } else if (cmp > 0) {
-          long size = midVal.getSize();
-          if (cmp >= size) {
-             low = mid + 1;
-          } else {
-             return (LoadObject) arr[mid];
-          }
-       } else { // match found
-          return (LoadObject) arr[mid];
-       }
+    for (int i = 0; i < objs.size(); i++) {
+      LoadObject ob = (LoadObject) objs.get(i);
+      Address base = ob.getBase();
+      long size = ob.getSize();
+      if ( pc.greaterThanOrEqual(base) && pc.lessThan(base.addOffsetTo(size))) {
+        return ob;
+      }
     }
-    // no match found.
+
     return null;
   }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxDebuggerLocal.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxOopHandle.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxOopHandle.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/amd64/LinuxAMD64CFrame.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/amd64/LinuxAMD64CFrame.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/x86/LinuxX86CFrame.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/x86/LinuxX86CFrame.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgCDebugger.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgCDebugger.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/windows/amd64/WindowsAMD64CFrame.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/windows/amd64/WindowsAMD64CFrame.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/windows/x86/WindowsX86CFrame.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/windows/x86/WindowsX86CFrame.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ArrayTypeImpl.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ArrayTypeImpl.java	Wed Mar 12 13:30:08 2014 +0100
@@ -24,19 +24,29 @@
 
 package sun.jvm.hotspot.jdi;
 
-import com.sun.jdi.*;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import sun.jvm.hotspot.oops.ArrayKlass;
+import sun.jvm.hotspot.oops.Instance;
 import sun.jvm.hotspot.oops.InstanceKlass;
-import sun.jvm.hotspot.oops.ObjArrayKlass;
-import sun.jvm.hotspot.oops.TypeArrayKlass;
 import sun.jvm.hotspot.oops.Klass;
-import sun.jvm.hotspot.oops.Instance;
+import sun.jvm.hotspot.oops.ObjArrayKlass;
 import sun.jvm.hotspot.oops.Symbol;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.Map;
+import sun.jvm.hotspot.oops.TypeArrayKlass;
+
+import com.sun.jdi.ArrayReference;
+import com.sun.jdi.ArrayType;
+import com.sun.jdi.ClassLoaderReference;
+import com.sun.jdi.ClassNotLoadedException;
+import com.sun.jdi.InterfaceType;
+import com.sun.jdi.Method;
+import com.sun.jdi.PrimitiveType;
+import com.sun.jdi.ReferenceType;
+import com.sun.jdi.Type;
+import com.sun.jdi.VirtualMachine;
 
 public class ArrayTypeImpl extends ReferenceTypeImpl implements ArrayType {
   protected ArrayTypeImpl(VirtualMachine aVm, ArrayKlass aRef) {
@@ -75,7 +85,8 @@
         }
     }
 
-    void addVisibleMethods(Map methodMap) {
+    @Override
+    void addVisibleMethods(Map<String, Method> methodMap, Set<InterfaceType> handledInterfaces) {
         // arrays don't have methods
     }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ClassTypeImpl.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ClassTypeImpl.java	Wed Mar 12 13:30:08 2014 +0100
@@ -24,12 +24,30 @@
 
 package sun.jvm.hotspot.jdi;
 
-import com.sun.jdi.*;
-import sun.jvm.hotspot.oops.Klass;
+import java.lang.ref.SoftReference;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import sun.jvm.hotspot.oops.InstanceKlass;
 
-import java.util.*;
-import java.lang.ref.SoftReference;
+import com.sun.jdi.ClassNotLoadedException;
+import com.sun.jdi.ClassType;
+import com.sun.jdi.Field;
+import com.sun.jdi.IncompatibleThreadStateException;
+import com.sun.jdi.InterfaceType;
+import com.sun.jdi.InvalidTypeException;
+import com.sun.jdi.InvocationException;
+import com.sun.jdi.Method;
+import com.sun.jdi.ObjectReference;
+import com.sun.jdi.ReferenceType;
+import com.sun.jdi.ThreadReference;
+import com.sun.jdi.Value;
+import com.sun.jdi.VirtualMachine;
 
 public class ClassTypeImpl extends ReferenceTypeImpl
     implements ClassType
@@ -195,22 +213,26 @@
         return null;
     }
 
-    void addVisibleMethods(Map methodMap) {
+    @Override
+    void addVisibleMethods(Map<String, Method> methodMap, Set<InterfaceType> seenInterfaces) {
         /*
          * Add methods from
          * parent types first, so that the methods in this class will
          * overwrite them in the hash table
          */
 
-        Iterator iter = interfaces().iterator();
+        Iterator<InterfaceType> iter = interfaces().iterator();
         while (iter.hasNext()) {
             InterfaceTypeImpl interfaze = (InterfaceTypeImpl)iter.next();
-            interfaze.addVisibleMethods(methodMap);
+            if (!seenInterfaces.contains(interfaze)) {
+                interfaze.addVisibleMethods(methodMap, seenInterfaces);
+                seenInterfaces.add(interfaze);
+            }
         }
 
         ClassTypeImpl clazz = (ClassTypeImpl)superclass();
         if (clazz != null) {
-            clazz.addVisibleMethods(methodMap);
+            clazz.addVisibleMethods(methodMap, seenInterfaces);
         }
 
         addToMethodMap(methodMap, methods());
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/InterfaceTypeImpl.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/InterfaceTypeImpl.java	Wed Mar 12 13:30:08 2014 +0100
@@ -24,15 +24,22 @@
 
 package sun.jvm.hotspot.jdi;
 
-import com.sun.jdi.*;
+import java.lang.ref.SoftReference;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import sun.jvm.hotspot.oops.InstanceKlass;
 
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.Iterator;
-import java.util.Collections;
-import java.lang.ref.SoftReference;
+import com.sun.jdi.ClassNotPreparedException;
+import com.sun.jdi.ClassType;
+import com.sun.jdi.InterfaceType;
+import com.sun.jdi.Method;
+import com.sun.jdi.ReferenceType;
+import com.sun.jdi.VirtualMachine;
 
 public class InterfaceTypeImpl extends ReferenceTypeImpl
                                implements InterfaceType {
@@ -96,16 +103,20 @@
         return implementors;
     }
 
-    void addVisibleMethods(Map methodMap) {
+    @Override
+    void addVisibleMethods(Map<String, Method> methodMap, Set<InterfaceType> seenInterfaces) {
         /*
          * Add methods from
          * parent types first, so that the methods in this class will
          * overwrite them in the hash table
          */
-        Iterator iter = superinterfaces().iterator();
+        Iterator<InterfaceType> iter = superinterfaces().iterator();
         while (iter.hasNext()) {
             InterfaceTypeImpl interfaze = (InterfaceTypeImpl)iter.next();
-            interfaze.addVisibleMethods(methodMap);
+            if (!seenInterfaces.contains(interfaze)) {
+                interfaze.addVisibleMethods(methodMap, seenInterfaces);
+                seenInterfaces.add(interfaze);
+            }
         }
 
         addToMethodMap(methodMap, methods());
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/JVMTIThreadState.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/JVMTIThreadState.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java	Wed Mar 12 13:30:08 2014 +0100
@@ -24,24 +24,45 @@
 
 package sun.jvm.hotspot.jdi;
 
-import java.io.*;
-
-import com.sun.jdi.*;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.lang.ref.SoftReference;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import sun.jvm.hotspot.memory.SystemDictionary;
+import sun.jvm.hotspot.oops.ArrayKlass;
+import sun.jvm.hotspot.oops.DefaultHeapVisitor;
 import sun.jvm.hotspot.oops.Instance;
 import sun.jvm.hotspot.oops.InstanceKlass;
-import sun.jvm.hotspot.oops.ArrayKlass;
 import sun.jvm.hotspot.oops.JVMDIClassStatus;
 import sun.jvm.hotspot.oops.Klass;
-import sun.jvm.hotspot.oops.ObjArray;
 import sun.jvm.hotspot.oops.Oop;
 import sun.jvm.hotspot.oops.Symbol;
-import sun.jvm.hotspot.oops.DefaultHeapVisitor;
 import sun.jvm.hotspot.utilities.Assert;
 
-import java.util.*;
-import java.lang.ref.SoftReference;
+import com.sun.jdi.AbsentInformationException;
+import com.sun.jdi.ArrayType;
+import com.sun.jdi.ClassLoaderReference;
+import com.sun.jdi.ClassNotLoadedException;
+import com.sun.jdi.ClassNotPreparedException;
+import com.sun.jdi.ClassObjectReference;
+import com.sun.jdi.Field;
+import com.sun.jdi.InterfaceType;
+import com.sun.jdi.Method;
+import com.sun.jdi.ObjectReference;
+import com.sun.jdi.PrimitiveType;
+import com.sun.jdi.ReferenceType;
+import com.sun.jdi.Type;
+import com.sun.jdi.Value;
+import com.sun.jdi.VirtualMachine;
 
 public abstract class ReferenceTypeImpl extends TypeImpl
 implements ReferenceType {
@@ -421,7 +442,8 @@
         }
     }
 
-    abstract void addVisibleMethods(Map methodMap);
+    abstract void addVisibleMethods(Map<String, Method> methodMap, Set<InterfaceType> seenInterfaces);
+
     public final List visibleMethods() throws ClassNotPreparedException {
         checkPrepared();
         /*
@@ -430,8 +452,8 @@
          * concatenation of name and signature.
          */
         //System.out.println("jj: RTI: Calling addVisibleMethods for:" + this);
-        Map map = new HashMap();
-        addVisibleMethods(map);
+        Map<String, Method> map = new HashMap<String, Method>();
+        addVisibleMethods(map, new HashSet<InterfaceType>());
 
         /*
          * ... but the hash map destroys order. Methods should be
@@ -441,7 +463,7 @@
          */
         //System.out.println("jj: RTI: Calling allMethods for:" + this);
 
-        List list = new ArrayList(allMethods());
+        List<Method> list = new ArrayList<Method>(allMethods());
         //System.out.println("jj: allMethods = " + jjstr(list));
         //System.out.println("jj: map = " + map.toString());
         //System.out.println("jj: map = " + jjstr(map.values()));
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/AdaptiveFreeList.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,77 @@
+/*
+ * @(#)AdaptiveFreeList.java
+ *
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.memory;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class AdaptiveFreeList extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+      public void update(Observable o, Object data) {
+        initialize(VM.getVM().getTypeDataBase());
+      }
+    });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("AdaptiveFreeList<FreeChunk>");
+    sizeField = type.getCIntegerField("_size");
+    countField = type.getCIntegerField("_count");
+    headerSize = type.getSize();
+  }
+
+  // Fields
+  private static CIntegerField sizeField;
+  private static CIntegerField countField;
+  private static long          headerSize;
+
+  //Constructor
+  public AdaptiveFreeList(Address address) {
+    super(address);
+  }
+
+  // Accessors
+  public long size() {
+    return sizeField.getValue(addr);
+  }
+
+  public long count() {
+    return  countField.getValue(addr);
+  }
+
+  public static long sizeOf() {
+    return headerSize;
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/CMSCollector.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/CMSCollector.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,25 +24,29 @@
 
 package sun.jvm.hotspot.memory;
 
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.Debugger;
+import sun.jvm.hotspot.oops.ObjectHeap;
+import sun.jvm.hotspot.oops.Oop;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+import sun.jvm.hotspot.utilities.Assert;
 
 public class CompactibleFreeListSpace extends CompactibleSpace {
    private static AddressField collectorField;
-
-   // for free size, three fields
-   //       FreeBlockDictionary* _dictionary;        // ptr to dictionary for large size blocks
-   //       FreeList _indexedFreeList[IndexSetSize]; // indexed array for small size blocks
-   //       LinearAllocBlock _smallLinearAllocBlock; // small linear alloc in TLAB
    private static AddressField indexedFreeListField;
    private static AddressField dictionaryField;
    private static long         smallLinearAllocBlockFieldOffset;
-   private static long indexedFreeListSizeOf;
 
    private int    heapWordSize;     // 4 for 32bit, 8 for 64 bits
    private int    IndexSetStart;    // for small indexed list
@@ -109,11 +113,11 @@
       // small chunks
       long size = 0;
       Address cur = addr.addOffsetTo( indexedFreeListField.getOffset() );
-      cur = cur.addOffsetTo(IndexSetStart*FreeList.sizeOf());
+      cur = cur.addOffsetTo(IndexSetStart*AdaptiveFreeList.sizeOf());
       for (int i=IndexSetStart; i<IndexSetSize; i += IndexSetStride) {
-         FreeList freeList = (FreeList) VMObjectFactory.newObject(FreeList.class, cur);
+         AdaptiveFreeList freeList = (AdaptiveFreeList) VMObjectFactory.newObject(AdaptiveFreeList.class, cur);
          size += i*freeList.count();
-         cur= cur.addOffsetTo(IndexSetStride*FreeList.sizeOf());
+         cur= cur.addOffsetTo(IndexSetStride*AdaptiveFreeList.sizeOf());
       }
 
       // large block
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/DictionaryEntry.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/FreeList.java	Tue Mar 11 15:34:06 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,72 +0,0 @@
-/*
- * @(#)FreeList.java
- *
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.memory;
-
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.runtime.*;
-
-public class FreeList extends VMObject {
-   static {
-      VM.registerVMInitializedObserver(new Observer() {
-         public void update(Observable o, Object data) {
-            initialize(VM.getVM().getTypeDataBase());
-         }
-      });
-   }
-
-   private static synchronized void initialize(TypeDataBase db) {
-      Type type = db.lookupType("FreeList<FreeChunk>");
-      sizeField = type.getCIntegerField("_size");
-      countField = type.getCIntegerField("_count");
-      headerSize = type.getSize();
-   }
-
-   // Fields
-   private static CIntegerField sizeField;
-   private static CIntegerField countField;
-   private static long          headerSize;
-
-   //Constructor
-   public FreeList(Address address) {
-     super(address);
-   }
-
-   // Accessors
-   public long size() {
-      return sizeField.getValue(addr);
-   }
-
-   public long count() {
-      return  countField.getValue(addr);
-   }
-
-   public static long sizeOf() {
-     return headerSize;
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -152,7 +152,7 @@
 
   private long indexOffset(long index) {
     if (Assert.ASSERTS_ENABLED) {
-      Assert.that(index > 0 && index < getLength(),  "invalid cp index " + index + " " + getLength());
+      Assert.that(index >= 0 && index < getLength(),  "invalid cp index " + index + " " + getLength());
     }
     return (index * getElementSize()) + headerSize;
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Klass.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/MethodCounters.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/MethodData.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/MethodData.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/opto/Block.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Block.java	Wed Mar 12 13:30:08 2014 +0100
@@ -48,7 +48,7 @@
     preOrderField = new CIntField(type.getCIntegerField("_pre_order"), 0);
     domDepthField = new CIntField(type.getCIntegerField("_dom_depth"), 0);
     idomField = type.getAddressField("_idom");
-    freqField = type.getJFloatField("_freq");
+    freqField = type.getJDoubleField("_freq");
   }
 
   private static AddressField nodesField;
@@ -57,7 +57,7 @@
   private static CIntField preOrderField;
   private static CIntField domDepthField;
   private static AddressField idomField;
-  private static JFloatField freqField;
+  private static JDoubleField freqField;
 
   public Block(Address addr) {
     super(addr);
@@ -67,8 +67,8 @@
     return (int)preOrderField.getValue(getAddress());
   }
 
-  public float freq() {
-    return (float)freqField.getValue(getAddress());
+  public double freq() {
+    return (double)freqField.getValue(getAddress());
   }
 
   public Node_List nodes() {
--- a/agent/src/share/classes/sun/jvm/hotspot/opto/Compile.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Compile.java	Wed Mar 12 13:30:08 2014 +0100
@@ -25,6 +25,7 @@
 package sun.jvm.hotspot.opto;
 
 import java.util.*;
+import java.io.PrintStream;
 import sun.jvm.hotspot.ci.*;
 import sun.jvm.hotspot.debugger.*;
 import sun.jvm.hotspot.runtime.*;
@@ -92,4 +93,13 @@
     }
     return null;
   }
+
+  public void dumpInlineData(PrintStream out) {
+    InlineTree inlTree = ilt();
+    if (inlTree != null) {
+      out.print(" inline " + inlTree.count());
+      inlTree.dumpReplayData(out);
+    }
+  }
+
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/opto/InlineTree.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/InlineTree.java	Wed Mar 12 13:30:08 2014 +0100
@@ -87,6 +87,11 @@
     return GrowableArray.create(addr, inlineTreeConstructor);
   }
 
+  public int inlineLevel() {
+    JVMState jvms = callerJvms();
+    return (jvms != null) ? jvms.depth() : 0;
+  }
+
   public void printImpl(PrintStream st, int indent) {
     for (int i = 0; i < indent; i++) st.print(" ");
     st.printf(" @ %d ", callerBci());
@@ -101,4 +106,28 @@
   public void print(PrintStream st) {
     printImpl(st, 2);
   }
+
+  // Count number of nodes in this subtree
+  public int count() {
+    int result = 1;
+    GrowableArray<InlineTree> subt = subtrees();
+    for (int i = 0 ; i < subt.length(); i++) {
+      result += subt.at(i).count();
+    }
+    return result;
+  }
+
+  public void dumpReplayData(PrintStream out) {
+    out.printf(" %d %d ", inlineLevel(), callerBci());
+    Method method = (Method)method().getMetadata();
+    Klass holder = method.getMethodHolder();
+    out.print(holder.getName().asString() + " " +
+              OopUtilities.escapeString(method.getName().asString()) + " " +
+              method.getSignature().asString());
+
+    GrowableArray<InlineTree> subt = subtrees();
+    for (int i = 0 ; i < subt.length(); i++) {
+      subt.at(i).dumpReplayData(out);
+    }
+  }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/opto/JVMState.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/JVMState.java	Wed Mar 12 13:30:08 2014 +0100
@@ -88,6 +88,10 @@
     return (int)bciField.getValue(getAddress());
   }
 
+  public int depth() {
+    return (int)depthField.getValue(getAddress());
+  }
+
   public JVMState caller() {
     return create(callerField.getValue(getAddress()));
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ThreadLocalAllocBuffer.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ThreadLocalAllocBuffer.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/bsd_amd64/BsdAMD64JavaThreadPDAccess.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/bsd_amd64/BsdAMD64JavaThreadPDAccess.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java	Wed Mar 12 13:30:08 2014 +0100
@@ -103,11 +103,12 @@
       }
 
       SystemDictionary dict = VM.getVM().getSystemDictionary();
-      dict.classesDo(new SystemDictionary.ClassAndLoaderVisitor() {
-                        public void visit(Klass k, Oop loader) {
+      dict.classesDo(new SystemDictionary.ClassVisitor() {
+                        public void visit(Klass k) {
                            if (! (k instanceof InstanceKlass)) {
                               return;
                            }
+                           Oop loader = ((InstanceKlass) k).getClassLoader();
                            LoaderData ld = (loader != null) ? (LoaderData)loaderMap.get(loader)
                                                             : bootstrapLoaderData;
                            if (ld != null) {
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2003, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -98,11 +98,14 @@
            break;
        default: throw new IllegalArgumentException();
        }
+
        if (cpCache == null) {
           return (short) cpCacheIndex;
        } else if (fmt.indexOf("JJJJ") >= 0) {
-          // change byte-ordering and go via secondary cache entry
-           throw new InternalError("unimplemented");
+          // Invokedynamic require special handling
+          cpCacheIndex = ~cpCacheIndex;
+          cpCacheIndex = bytes.swapInt(cpCacheIndex);
+          return (short) cpCache.getEntryAt(cpCacheIndex).getConstantPoolIndex();
        } else if (fmt.indexOf("JJ") >= 0) {
           // change byte-ordering and go via cache
           return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort((short)cpCacheIndex))).getConstantPoolIndex();
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/SAPanel.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/ui/SAPanel.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,8 +61,9 @@
     long h = 0;
     int s = 0;
     int len = buf.length;
+    // Emulate the unsigned int in java_lang_String::hash_code
     while (len-- > 0) {
-      h = 31*h + (0xFFL & buf[s]);
+      h = 31*h + (0xFFFFFFFFL & buf[s]);
       s++;
     }
     return h & 0xFFFFFFFFL;
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapGXLWriter.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapGXLWriter.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaInstanceKlass.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/JSJavaInstanceKlass.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Tue Mar 11 15:34:06 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -371,19 +371,23 @@
    return sa.dbg.lookup(dso, sym);
 }
 
-// returns the ClosestSymbol or null
-function closestSymbolFor(addr) {
-   if (sa.cdbg == null) {
+function loadObjectContainingPC(addr) {
+    if (sa.cdbg == null) {
       // no CDebugger support, return null
       return null;
-   } else {
-      var dso = sa.cdbg.loadObjectContainingPC(addr);
-      if (dso != null) {
-         return dso.closestSymbolToPC(addr);
-      } else {
-         return null;
-      }
-   }
+    }
+
+    return  sa.cdbg.loadObjectContainingPC(addr);
+}
+
+// returns the ClosestSymbol or null
+function closestSymbolFor(addr) {
+    var dso = loadObjectContainingPC(addr);
+    if (dso != null) {
+      return dso.closestSymbolToPC(addr);
+    }
+
+    return null;
 }
 
 // Address-to-symbol
@@ -804,6 +808,16 @@
 // VM type to SA class map
 var  vmType2Class = new Object();
 
+// C2 only classes
+try{
+  vmType2Class["ExceptionBlob"] = sapkg.code.ExceptionBlob;
+  vmType2Class["UncommonTrapBlob"] = sapkg.code.UncommonTrapBlob;
+} catch(e) {
+  // Ignore exception. C2 specific objects might be not 
+  // available in client VM
+}
+
+
 // This is *not* exhaustive. Add more if needed.
 // code blobs
 vmType2Class["BufferBlob"] = sapkg.code.BufferBlob;
@@ -812,10 +826,8 @@
 vmType2Class["SafepointBlob"] = sapkg.code.SafepointBlob;
 vmType2Class["C2IAdapter"] = sapkg.code.C2IAdapter;
 vmType2Class["DeoptimizationBlob"] = sapkg.code.DeoptimizationBlob;
-vmType2Class["ExceptionBlob"] = sapkg.code.ExceptionBlob;
 vmType2Class["I2CAdapter"] = sapkg.code.I2CAdapter;
 vmType2Class["OSRAdapter"] = sapkg.code.OSRAdapter;
-vmType2Class["UncommonTrapBlob"] = sapkg.code.UncommonTrapBlob;
 vmType2Class["PCDesc"] = sapkg.code.PCDesc;
 
 // interpreter
@@ -876,21 +888,29 @@
 
 // returns description of given pointer as a String
 function whatis(addr) {
-   addr = any2addr(addr);
-   var ptrLoc = findPtr(addr);
-   if (ptrLoc.isUnknown()) {
-      var vmType = vmTypeof(addr);
-      if (vmType != null) {
-         return "pointer to " + vmType.name;
-      } else {
-         var sym = closestSymbolFor(addr);
-         if (sym != null) {
-            return sym.name + '+' + sym.offset;
-         } else {
-            return ptrLoc.toString();
-         }
-      }
-   } else {
-      return ptrLoc.toString();
-   }
+  addr = any2addr(addr);
+  var ptrLoc = findPtr(addr);
+  if (!ptrLoc.isUnknown()) {
+    return ptrLoc.toString();
+  }
+
+  var vmType = vmTypeof(addr);
+  if (vmType != null) {
+    return "pointer to " + vmType.name;
+  }
+
+  var dso = loadObjectContainingPC(addr);
+  if (dso == null) {
+    return ptrLoc.toString();
+  }
+
+  var sym = dso.closestSymbolToPC(addr);
+  if (sym != null) {
+    return sym.name + '+' + sym.offset;
+  }
+
+  var s = dso.getName();
+  var p = s.lastIndexOf("/");
+  var base = dso.getBase();
+  return s.substring(p+1, s.length) + '+' + addr.minus(base);
 }
--- a/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/graal/com.oracle.graal.java/src/com/oracle/graal/java/GraphBuilderPhase.java	Wed Mar 12 13:30:08 2014 +0100
@@ -839,7 +839,7 @@
         private Object lookupConstant(int cpi, int opcode) {
             eagerResolvingForSnippets(cpi, opcode);
             Object result = constantPool.lookupConstant(cpi);
-            assert !graphBuilderConfig.eagerResolving() || !(result instanceof JavaType) || (result instanceof ResolvedJavaType);
+            assert !graphBuilderConfig.eagerResolving() || !(result instanceof JavaType) || (result instanceof ResolvedJavaType) : result;
             return result;
         }
 
--- a/make/Makefile	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/Makefile	Wed Mar 12 13:30:08 2014 +0100
@@ -90,10 +90,11 @@
 # Typical C1/C2 targets made available with this Makefile
 C1_VM_TARGETS=product1 fastdebug1 optimized1 debug1
 C2_VM_TARGETS=product  fastdebug  optimized  debug
+GRAAL_VM_TARGETS=productgraal fastdebuggraal optimizedgraal debuggraal
+CORE_VM_TARGETS=productcore fastdebugcore optimizedcore debugcore
 ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero debugzero
 SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark debugshark
 MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 debugminimal1
-GRAAL_VM_TARGETS=productgraal fastdebuggraal optimizedgraal debuggraal
 
 COMMON_VM_PRODUCT_TARGETS=product product1 docs export_product
 COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 docs export_fastdebug
@@ -128,6 +129,12 @@
 
 all_optimized: optimized optimized1 docs export_optimized
 
+allgraal:           all_productgraal all_fastdebuggraal
+all_productgraal:   productgraal docs export_product
+all_fastdebuggraal: fastdebuggraal docs export_fastdebug
+all_debuggraal:     debuggraal docs export_debug
+all_optimizedgraal: optimizedgraal docs export_optimized
+
 allzero:           all_productzero all_fastdebugzero
 all_productzero:   productzero docs export_product
 all_fastdebugzero: fastdebugzero docs export_fastdebug
@@ -140,11 +147,11 @@
 all_debugshark:     debugshark docs export_debug
 all_optimizedshark: optimizedshark docs export_optimized
 
-allgraal:           all_productgraal all_fastdebuggraal
-all_productgraal:   productgraal docs export_product
-all_fastdebuggraal: fastdebuggraal docs export_fastdebug
-all_debuggraal:     debuggraal docs export_debug
-all_optimizedgraal: optimizedgraal docs export_optimized
+allcore:           all_productcore all_fastdebugcore
+all_productcore:   productcore docs export_product
+all_fastdebugcore: fastdebugcore docs export_fastdebug
+all_debugcore:     debugcore docs export_debug
+all_optimizedcore: optimizedcore docs export_optimized
 
 # Do everything
 world:         all create_jdk
@@ -165,10 +172,10 @@
 C1_DIR      =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1
 C2_DIR      =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2
 GRAAL_DIR   =$(OUTPUTDIR)/$(VM_PLATFORM)_graal
+CORE_DIR    =$(OUTPUTDIR)/$(VM_PLATFORM)_core
 MINIMAL1_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1
 ZERO_DIR    =$(OUTPUTDIR)/$(VM_PLATFORM)_zero
 SHARK_DIR   =$(OUTPUTDIR)/$(VM_PLATFORM)_shark
-GRAAL_DIR   =$(OUTPUTDIR)/$(VM_PLATFORM)_graal
 
 # Build variation of hotspot
 $(C1_VM_TARGETS):
@@ -179,6 +186,10 @@
 	$(CD) $(GAMMADIR)/make; \
 	$(MAKE) BUILD_DIR=$(C2_DIR) BUILD_FLAVOR=$@ VM_TARGET=$@ generic_build2 $(ALT_OUT)
 
+$(CORE_VM_TARGETS):
+	$(CD) $(GAMMADIR)/make; \
+	$(MAKE) BUILD_DIR=$(CORE_DIR) BUILD_FLAVOR=$(@:%core=%) VM_TARGET=$@ generic_buildcore $(ALT_OUT)
+
 $(ZERO_VM_TARGETS):
 	$(CD) $(GAMMADIR)/make; \
 	$(MAKE) BUILD_DIR=$(ZERO_DIR) BUILD_FLAVOR=$(@:%zero=%) VM_TARGET=$@ generic_buildzero $(ALT_OUT)
@@ -240,6 +251,20 @@
 		      $(MAKE_ARGS) $(VM_TARGET)
 endif
 
+generic_buildcore: $(HOTSPOT_SCRIPT)
+ifeq ($(HS_ARCH),ppc)
+  ifeq ($(ARCH_DATA_MODEL),64)
+	$(MKDIR) -p $(OUTPUTDIR)
+	$(CD) $(OUTPUTDIR); \
+		$(MAKE) -f $(ABS_OS_MAKEFILE) \
+			$(MAKE_ARGS) $(VM_TARGET)
+  else
+	@$(ECHO) "No ($(VM_TARGET)) for ppc ARCH_DATA_MODEL=$(ARCH_DATA_MODEL)"
+  endif
+else
+	@$(ECHO) "No ($(VM_TARGET)) for $(HS_ARCH)"
+endif
+
 generic_buildzero: $(HOTSPOT_SCRIPT)
 	$(MKDIR) -p $(OUTPUTDIR)
 	$(CD) $(OUTPUTDIR); \
@@ -282,7 +307,7 @@
 
 # Builds code that can be shared among different build flavors
 buildshared:
-	python2.7 -u $(GAMMADIR)/mxtool/mx.py build --no-native --export-dir $(SHARED_DIR)
+#	python2.7 -u $(GAMMADIR)/mxtool/mx.py build --no-native --export-dir $(SHARED_DIR)
 
 # Export file rule
 generic_export: $(EXPORT_LIST)
@@ -290,11 +315,11 @@
 export_product:
 	$(MAKE) BUILD_FLAVOR=$(@:export_%=%) generic_export
 export_fastdebug:
-	$(MAKE) BUILD_FLAVOR=$(@:export_%=%) EXPORT_SUBDIR=/$(@:export_%=%) generic_export
+	$(MAKE) BUILD_FLAVOR=$(@:export_%=%) generic_export
 export_debug:
-	$(MAKE) BUILD_FLAVOR=$(@:export_%=%) EXPORT_SUBDIR=/$(@:export_%=%) generic_export
+	$(MAKE) BUILD_FLAVOR=$(@:export_%=%) generic_export
 export_optimized:
-	$(MAKE) BUILD_FLAVOR=$(@:export_%=%) EXPORT_SUBDIR=/$(@:export_%=%) generic_export
+	$(MAKE) BUILD_FLAVOR=$(@:export_%=%) generic_export
 
 export_product_jdk::
 	$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export
@@ -311,6 +336,7 @@
 C1_BUILD_DIR      =$(C1_DIR)/$(BUILD_FLAVOR)
 C2_BUILD_DIR      =$(C2_DIR)/$(BUILD_FLAVOR)
 GRAAL_BUILD_DIR   =$(GRAAL_DIR)/$(BUILD_FLAVOR)
+CORE_BUILD_DIR    =$(CORE_DIR)/$(BUILD_FLAVOR)
 MINIMAL1_BUILD_DIR=$(MINIMAL1_DIR)/$(BUILD_FLAVOR)
 ZERO_BUILD_DIR    =$(ZERO_DIR)/$(BUILD_FLAVOR)
 SHARK_BUILD_DIR   =$(SHARK_DIR)/$(BUILD_FLAVOR)
@@ -537,6 +563,28 @@
 	$(install-dir)
 endif
 
+# Core
+ifeq ($(JVM_VARIANT_CORE), true)
+# Common
+$(EXPORT_LIB_DIR)/%.jar:			$(CORE_BUILD_DIR)/../generated/%.jar
+	$(install-file)
+$(EXPORT_INCLUDE_DIR)/%:			$(CORE_BUILD_DIR)/../generated/jvmtifiles/%
+	$(install-file)
+# Unix
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX):	$(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+	$(install-file)
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo:		$(CORE_BUILD_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(CORE_BUILD_DIR)/%.diz
+	$(install-file)
+$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):	$(CORE_BUILD_DIR)/%.$(LIBRARY_SUFFIX)
+	$(install-file)
+$(EXPORT_SERVER_DIR)/%.debuginfo:		$(CORE_BUILD_DIR)/%.debuginfo
+	$(install-file)
+$(EXPORT_SERVER_DIR)/%.diz:			$(CORE_BUILD_DIR)/%.diz
+	$(install-file)
+endif
+
 # Shark
 ifeq ($(JVM_VARIANT_ZEROSHARK), true)
 # Common
@@ -609,6 +657,7 @@
 	$(RM) -r $(C1_DIR)
 	$(RM) -r $(C2_DIR)
 	$(RM) -r $(GRAAL_DIR)
+	$(RM) -r $(CORE_DIR)
 	$(RM) -r $(ZERO_DIR)
 	$(RM) -r $(SHARK_DIR)
 	$(RM) -r $(MINIMAL1_DIR)
@@ -635,11 +684,11 @@
 
 
 # Testing the built JVM
-RUN_JVM=JAVA_HOME=$(JDK_IMPORT_PATH) $(JDK_IMPORT_PATH)/bin/java -d$(ARCH_DATA_MODEL) -Dsun.java.launcher=gamma
+RUN_JVM=JAVA_HOME=$(JDK_IMPORT_PATH) $(JDK_IMPORT_PATH)/bin/java -d$(ARCH_DATA_MODEL) -XXaltjvm=$(ALTJVM_DIR) -Dsun.java.launcher.is_altjvm=true
 generic_test:
 	@$(ECHO) "Running with: $(ALTJVM_DIR)"
-	@$(RUN_JVM) -XXaltjvm=$(ALTJVM_DIR) -Xinternalversion
-	@$(RUN_JVM) -XXaltjvm=$(ALTJVM_DIR) -showversion -help
+	@$(RUN_JVM) -Xinternalversion
+	@$(RUN_JVM) -showversion -help
 
 # C2 test targets
 test_product test_optimized test_fastdebug test_debug:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/Makefile	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,377 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# This makefile creates a build tree and lights off a build.
+# You can go back into the build tree and perform rebuilds or
+# incremental builds as desired. Be sure to reestablish
+# environment variable settings for LD_LIBRARY_PATH and JAVA_HOME.
+
+# The make process now relies on java and javac. These can be
+# specified either implicitly on the PATH, by setting the
+# (JDK-inherited) ALT_BOOTDIR environment variable to full path to a
+# JDK in which bin/java and bin/javac are present and working (e.g.,
+# /usr/local/java/jdk1.3/solaris), or via the (JDK-inherited)
+# default BOOTDIR path value. Note that one of ALT_BOOTDIR
+# or BOOTDIR has to be set. We do *not* search javac, javah, rmic etc.
+# from the PATH.
+#
+# One can set ALT_BOOTDIR or BOOTDIR to point to a jdk that runs on
+# an architecture that differs from the target architecture, as long
+# as the bootstrap jdk runs under the same flavor of OS as the target
+# (i.e., if the target is linux, point to a jdk that runs on a linux
+# box).  In order to use such a bootstrap jdk, set the make variable
+# REMOTE to the desired remote command mechanism, e.g.,
+#
+#    make REMOTE="rsh -l me myotherlinuxbox"
+
+# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding.
+# JDI binding on SA produces two binaries:
+#  1. sa-jdi.jar       - This is built before building libjvm.so
+#                        Please refer to ./makefiles/sa.make
+#  2. libsa.so         - Native library for SA - This is built after
+#                        libjsig.so (signal interposition library)
+#                        Please refer to ./makefiles/vm.make
+# If $(GAMMADIR)/agent dir is not present, SA components are not built.
+
+# No tests on Aix.
+TEST_IN_BUILD=false
+
+ifeq ($(GAMMADIR),)
+include ../../make/defs.make
+else
+include $(GAMMADIR)/make/defs.make
+endif
+include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
+
+ifndef CC_INTERP
+  ifndef FORCE_TIERED
+    FORCE_TIERED=1
+  endif
+endif
+
+ifdef LP64
+  ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
+    _JUNK_ := $(shell echo >&2 \
+       $(OSNAME) $(ARCH) "*** ERROR: this platform does not support 64-bit compilers!")
+	@exit 1
+  endif
+endif
+
+# we need to set up LP64 correctly to satisfy sanity checks in adlc
+ifneq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
+  MFLAGS += " LP64=1 "
+endif
+
+# pass USE_SUNCC further, through MFLAGS
+ifdef USE_SUNCC
+  MFLAGS += " USE_SUNCC=1 "
+endif
+
+# The following renders pathnames in generated Makefiles valid on
+# machines other than the machine containing the build tree.
+#
+# For example, let's say my build tree lives on /files12 on
+# exact.east.sun.com.  This logic will cause GAMMADIR to begin with
+# /net/exact/files12/...
+#
+# We only do this on SunOS variants, for a couple of reasons:
+#  * It is extremely rare that source trees exist on other systems
+#  * It has been claimed that the Linux automounter is flakey, so
+#    changing GAMMADIR in a way that exercises the automounter could
+#    prove to be a source of unreliability in the build process.
+# Obviously, this Makefile is only relevant on SunOS boxes to begin
+# with, but the SunOS conditionalization will make it easier to
+# combine Makefiles in the future (assuming we ever do that).
+
+ifeq ($(OSNAME),solaris)
+
+  #   prepend current directory to relative pathnames.
+  NEW_GAMMADIR :=                                    \
+    $(shell echo $(GAMMADIR) |                       \
+      sed -e "s=^\([^/].*\)=$(shell pwd)/\1="        \
+     )
+  unexport NEW_GAMMADIR
+
+  # If NEW_GAMMADIR doesn't already start with "/net/":
+  ifeq ($(strip $(filter /net/%,$(NEW_GAMMADIR))),)
+    #   prepend /net/$(HOST)
+    #   remove /net/$(HOST) if name already began with /home/
+    #   remove /net/$(HOST) if name already began with /java/
+    #   remove /net/$(HOST) if name already began with /lab/
+    NEW_GAMMADIR :=                                     \
+         $(shell echo $(NEW_GAMMADIR) |                 \
+                 sed -e "s=^\(.*\)=/net/$(HOST)\1="     \
+                     -e "s=^/net/$(HOST)/home/=/home/=" \
+                     -e "s=^/net/$(HOST)/java/=/java/=" \
+                     -e "s=^/net/$(HOST)/lab/=/lab/="   \
+          )
+    # Don't use the new value for GAMMADIR unless a file with the new
+    # name actually exists.
+    ifneq ($(wildcard $(NEW_GAMMADIR)),)
+      GAMMADIR := $(NEW_GAMMADIR)
+    endif
+  endif
+
+endif
+
+# BUILDARCH is set to "zero" for Zero builds.  VARIANTARCH
+# is used to give the build directories meaningful names.
+VARIANTARCH = $(subst i386,i486,$(ZERO_LIBARCH))
+
+# There is a (semi-) regular correspondence between make targets and actions:
+#
+#       Target          Tree Type       Build Dir
+#
+#       debug           compiler2       <os>_<arch>_compiler2/debug
+#       fastdebug       compiler2       <os>_<arch>_compiler2/fastdebug
+#       optimized       compiler2       <os>_<arch>_compiler2/optimized
+#       product         compiler2       <os>_<arch>_compiler2/product
+#
+#       debug1          compiler1       <os>_<arch>_compiler1/debug
+#       fastdebug1      compiler1       <os>_<arch>_compiler1/fastdebug
+#       optimized1      compiler1       <os>_<arch>_compiler1/optimized
+#       product1        compiler1       <os>_<arch>_compiler1/product
+#
+#       debugcore       core            <os>_<arch>_core/debug
+#       fastdebugcore   core            <os>_<arch>_core/fastdebug
+#       optimizedcore   core            <os>_<arch>_core/optimized
+#       productcore     core            <os>_<arch>_core/product
+#
+#       debugzero       zero            <os>_<arch>_zero/debug
+#       fastdebugzero   zero            <os>_<arch>_zero/fastdebug
+#       optimizedzero   zero            <os>_<arch>_zero/optimized
+#       productzero     zero            <os>_<arch>_zero/product
+#
+#       debugshark      shark           <os>_<arch>_shark/debug
+#       fastdebugshark  shark           <os>_<arch>_shark/fastdebug
+#       optimizedshark  shark           <os>_<arch>_shark/optimized
+#       productshark    shark           <os>_<arch>_shark/product
+#
+#       fastdebugminimal1 minimal1      <os>_<arch>_minimal1/fastdebug
+#       productminimal1   minimal1      <os>_<arch>_minimal1/product
+#
+# What you get with each target:
+#
+# debug*     - debug compile with asserts enabled
+# fastdebug* - optimized compile, but with asserts enabled
+# optimized* - optimized compile, no asserts
+# product*   - the shippable thing:  optimized compile, no asserts, -DPRODUCT
+
+# This target list needs to be coordinated with the usage message
+# in the build.sh script:
+TARGETS           = debug fastdebug optimized product
+
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+  SUBDIR_DOCS     = $(OSNAME)_$(VARIANTARCH)_docs
+else
+  SUBDIR_DOCS     = $(OSNAME)_$(BUILDARCH)_docs
+endif
+SUBDIRS_C1        = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler1/,$(TARGETS))
+SUBDIRS_C2        = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler2/,$(TARGETS))
+SUBDIRS_TIERED    = $(addprefix $(OSNAME)_$(BUILDARCH)_tiered/,$(TARGETS))
+SUBDIRS_CORE      = $(addprefix $(OSNAME)_$(BUILDARCH)_core/,$(TARGETS))
+SUBDIRS_ZERO      = $(addprefix $(OSNAME)_$(VARIANTARCH)_zero/,$(TARGETS))
+SUBDIRS_SHARK     = $(addprefix $(OSNAME)_$(VARIANTARCH)_shark/,$(TARGETS))
+SUBDIRS_MINIMAL1  = $(addprefix $(OSNAME)_$(BUILDARCH)_minimal1/,$(TARGETS))
+
+TARGETS_C2        = $(TARGETS)
+TARGETS_C1        = $(addsuffix 1,$(TARGETS))
+TARGETS_TIERED    = $(addsuffix tiered,$(TARGETS))
+TARGETS_CORE      = $(addsuffix core,$(TARGETS))
+TARGETS_ZERO      = $(addsuffix zero,$(TARGETS))
+TARGETS_SHARK     = $(addsuffix shark,$(TARGETS))
+TARGETS_MINIMAL1 =  $(addsuffix minimal1,$(TARGETS))
+
+BUILDTREE_MAKE    = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
+BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH)
+BUILDTREE_VARS   += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
+BUILDTREE_VARS   += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY) ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) ZIPEXE=$(ZIPEXE)
+
+BUILDTREE         = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
+
+#-------------------------------------------------------------------------------
+
+# Could make everything by default, but that would take a while.
+all:
+	@echo "Try '$(MAKE) <target> ...'  where <target> is one or more of"
+	@echo "  $(TARGETS_C2)"
+	@echo "  $(TARGETS_C1)"
+	@echo "  $(TARGETS_CORE)"
+	@echo "  $(TARGETS_ZERO)"
+	@echo "  $(TARGETS_SHARK)"
+	@echo "  $(TARGETS_MINIMAL1)"
+
+checks: check_os_version check_j2se_version
+
+# We do not want people accidentally building on old systems (e.g. Linux 2.2.x,
+# Solaris 2.5.1, 2.6).
+# Disable this check by setting DISABLE_HOTSPOT_OS_VERSION_CHECK=ok.
+
+SUPPORTED_OS_VERSION = AIX
+OS_VERSION := $(shell uname -a)
+EMPTY_IF_NOT_SUPPORTED = $(filter $(SUPPORTED_OS_VERSION),$(OS_VERSION))
+
+check_os_version:
+ifeq ($(DISABLE_HOTSPOT_OS_VERSION_CHECK)$(EMPTY_IF_NOT_SUPPORTED),)
+	$(QUIETLY) >&2 echo "*** This OS is not supported:" `uname -a`; exit 1;
+endif
+
+# jvmti.make requires XSLT (J2SE 1.4.x or newer):
+XSLT_CHECK	= $(REMOTE) $(RUN.JAVAP) javax.xml.transform.TransformerFactory
+# If not found then fail fast.
+check_j2se_version:
+	$(QUIETLY) $(XSLT_CHECK) > /dev/null 2>&1; \
+	if [ $$? -ne 0 ]; then \
+	  $(REMOTE) $(RUN.JAVA) -version; \
+	  echo "*** An XSLT processor (J2SE 1.4.x or newer) is required" \
+	  "to bootstrap this build" 1>&2; \
+	  exit 1; \
+	fi
+
+$(SUBDIRS_TIERED): $(BUILDTREE_MAKE)
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=tiered
+
+$(SUBDIRS_C2): $(BUILDTREE_MAKE)
+ifeq ($(FORCE_TIERED),1)
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+		$(BUILDTREE) VARIANT=tiered FORCE_TIERED=1
+else
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+		$(BUILDTREE) VARIANT=compiler2
+endif
+
+$(SUBDIRS_C1): $(BUILDTREE_MAKE)
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=compiler1
+
+$(SUBDIRS_CORE): $(BUILDTREE_MAKE)
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=core
+
+$(SUBDIRS_ZERO): $(BUILDTREE_MAKE) platform_zero
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=zero VARIANTARCH=$(VARIANTARCH)
+
+$(SUBDIRS_SHARK): $(BUILDTREE_MAKE) platform_zero
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=shark VARIANTARCH=$(VARIANTARCH)
+
+$(SUBDIRS_MINIMAL1): $(BUILDTREE_MAKE)
+	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
+	$(BUILDTREE) VARIANT=minimal1
+
+
+platform_zero: $(GAMMADIR)/make/$(OSNAME)/platform_zero.in
+	$(SED) 's/@ZERO_ARCHDEF@/$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@
+
+# Define INSTALL=y at command line to automatically copy JVM into JAVA_HOME
+
+$(TARGETS_C2):  $(SUBDIRS_C2)
+	cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(BUILDARCH)_compiler2/$@ && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_TIERED):  $(SUBDIRS_TIERED)
+	cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(BUILDARCH)_tiered/$(patsubst %tiered,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_C1):  $(SUBDIRS_C1)
+	cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(BUILDARCH)_compiler1/$(patsubst %1,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_CORE):  $(SUBDIRS_CORE)
+	cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(BUILDARCH)_core/$(patsubst %core,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_ZERO):  $(SUBDIRS_ZERO)
+	cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_SHARK):  $(SUBDIRS_SHARK)
+	cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(VARIANTARCH)_shark/$(patsubst %shark,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+$(TARGETS_MINIMAL1):  $(SUBDIRS_MINIMAL1)
+	cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && $(MAKE) $(MFLAGS)
+ifdef INSTALL
+	cd $(OSNAME)_$(BUILDARCH)_minimal1/$(patsubst %minimal1,%,$@) && $(MAKE) $(MFLAGS) install
+endif
+
+# Just build the tree, and nothing else:
+tree:      $(SUBDIRS_C2)
+tree1:     $(SUBDIRS_C1)
+treecore:  $(SUBDIRS_CORE)
+treezero:  $(SUBDIRS_ZERO)
+treeshark: $(SUBDIRS_SHARK)
+treeminimal1: $(SUBDIRS_MINIMAL1)
+
+# Doc target.  This is the same for all build options.
+#     Hence create a docs directory beside ...$(ARCH)_[...]
+# We specify 'BUILD_FLAVOR=product' so that the proper
+# ENABLE_FULL_DEBUG_SYMBOLS value is used.
+docs: checks
+	$(QUIETLY) mkdir -p $(SUBDIR_DOCS)
+	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs
+
+# Synonyms for win32-like targets.
+compiler2:  debug product
+
+compiler1:  debug1 product1
+
+core: debugcore productcore
+
+zero: debugzero productzero
+
+shark: debugshark productshark
+
+clean_docs:
+	rm -rf $(SUBDIR_DOCS)
+
+clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark clean_minimal1:
+	rm -rf $(OSNAME)_$(BUILDARCH)_$(subst clean_,,$@)
+
+clean:  clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_minimal1 clean_docs
+
+include $(GAMMADIR)/make/cscope.make
+
+#-------------------------------------------------------------------------------
+
+.PHONY: $(TARGETS_C2) $(TARGETS_C1) $(TARGETS_CORE) $(TARGETS_ZERO) $(TARGETS_SHARK) $(TARGETS_MINIMAL1)
+.PHONY: tree tree1 treecore treezero treeshark
+.PHONY: all compiler1 compiler2 core zero shark
+.PHONY: clean clean_compiler1 clean_compiler2 clean_core clean_zero clean_shark docs clean_docs
+.PHONY: checks check_os_version check_j2se_version
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/adlc_updater	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,20 @@
+#! /bin/sh
+#
+# This file is used by adlc.make to selectively update generated
+# adlc files. Because source and target diretories are relative
+# paths, this file is copied to the target build directory before
+# use.
+#
+# adlc-updater <file> <source-dir> <target-dir>
+#
+fix_lines() {
+  # repair bare #line directives in $1 to refer to $2
+  awk < $1 > $1+ '
+    /^#line 999999$/ {print "#line " (NR+1) " \"" F2 "\""; next}
+    {print}
+  ' F2=$2
+  mv $1+ $1
+}
+fix_lines $2/$1 $3/$1
+[ -f $3/$1 ] && cmp -s $2/$1 $3/$1 || \
+( [ -f $3/$1 ] && echo Updating $3/$1 ; touch $2/made-change ; mv $2/$1 $3/$1 )
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/adjust-mflags.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,87 @@
+#! /bin/sh
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This script is used only from top.make.
+# The macro $(MFLAGS-adjusted) calls this script to
+# adjust the "-j" arguments to take into account
+# the HOTSPOT_BUILD_JOBS variable.  The default
+# handling of the "-j" argument by gnumake does
+# not meet our needs, so we must adjust it ourselves.
+
+# This argument adjustment applies to two recursive
+# calls to "$(MAKE) $(MFLAGS-adjusted)" in top.make.
+# One invokes adlc.make, and the other invokes vm.make.
+# The adjustment propagates the desired concurrency
+# level down to the sub-make (of the adlc or vm).
+# The default behavior of gnumake is to run all
+# sub-makes without concurrency ("-j1").
+
+# Also, we use a make variable rather than an explicit
+# "-j<N>" argument to control this setting, so that
+# the concurrency setting (which must be tuned separately
+# for each MP system) can be set via an environment variable.
+# The recommended setting is 1.5x to 2x the number of available
+# CPUs on the MP system, which is large enough to keep the CPUs
+# busy (even though some jobs may be I/O bound) but not too large,
+# we may presume, to overflow the system's swap space.
+
+set -eu
+
+default_build_jobs=4
+
+case $# in
+[12])	true;;
+*)	>&2 echo "Usage: $0 ${MFLAGS} ${HOTSPOT_BUILD_JOBS}"; exit 2;;
+esac
+
+MFLAGS=$1
+HOTSPOT_BUILD_JOBS=${2-}
+
+# Normalize any -jN argument to the form " -j${HBJ}"
+MFLAGS=`
+	echo "$MFLAGS" \
+	| sed '
+		s/^-/ -/
+		s/ -\([^ 	][^ 	]*\)j/ -\1 -j/
+		s/ -j[0-9][0-9]*/ -j/
+		s/ -j\([^ 	]\)/ -j -\1/
+		s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
+	' `
+
+case ${HOTSPOT_BUILD_JOBS} in \
+
+'') case ${MFLAGS} in
+    *\ -j*)
+	>&2 echo "# Note: -jN is ineffective for setting parallelism in this makefile." 
+	>&2 echo "# please set HOTSPOT_BUILD_JOBS=${default_build_jobs} in the command line or environment."
+    esac;;
+
+?*) case ${MFLAGS} in
+     *\ -j*) true;;
+     *)      MFLAGS="-j${HOTSPOT_BUILD_JOBS} ${MFLAGS}";;
+    esac;;
+esac
+
+echo "${MFLAGS}"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/adlc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,231 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This makefile (adlc.make) is included from the adlc.make in the
+# build directories.
+# It knows how to compile, link, and run the adlc.
+
+include $(GAMMADIR)/make/$(Platform_os_family)/makefiles/rules.make
+
+# #########################################################################
+
+# OUTDIR must be the same as AD_Dir = $(GENERATED)/adfiles in top.make:
+GENERATED = ../generated
+OUTDIR  = $(GENERATED)/adfiles
+
+ARCH = $(Platform_arch)
+OS = $(Platform_os_family)
+
+SOURCE.AD = $(OUTDIR)/$(OS)_$(Platform_arch_model).ad 
+
+ifeq ("${Platform_arch_model}", "${Platform_arch}")
+  SOURCES.AD = \
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
+else
+  SOURCES.AD = \
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
+  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
+endif
+
+EXEC = $(OUTDIR)/adlc
+
+# set VPATH so make knows where to look for source files
+Src_Dirs_V += $(GAMMADIR)/src/share/vm/adlc
+VPATH += $(Src_Dirs_V:%=%:)
+
+# set INCLUDES for C preprocessor
+Src_Dirs_I += $(GAMMADIR)/src/share/vm/adlc $(GENERATED)
+INCLUDES += $(Src_Dirs_I:%=-I%)
+
+# set flags for adlc compilation
+CXXFLAGS = $(SYSDEFS) $(INCLUDES)
+
+# Force assertions on.
+CXXFLAGS += -DASSERT
+
+# CFLAGS_WARN holds compiler options to suppress/enable warnings.
+# Suppress warnings (for now)
+CFLAGS_WARN = -w
+CFLAGS += $(CFLAGS_WARN)
+
+OBJECTNAMES = \
+	adlparse.o \
+	archDesc.o \
+	arena.o \
+	dfa.o \
+	dict2.o \
+	filebuff.o \
+	forms.o \
+	formsopt.o \
+	formssel.o \
+	main.o \
+	adlc-opcodes.o \
+	output_c.o \
+	output_h.o \
+
+OBJECTS = $(OBJECTNAMES:%=$(OUTDIR)/%)
+
+GENERATEDNAMES = \
+        ad_$(Platform_arch_model).cpp \
+        ad_$(Platform_arch_model).hpp \
+        ad_$(Platform_arch_model)_clone.cpp \
+        ad_$(Platform_arch_model)_expand.cpp \
+        ad_$(Platform_arch_model)_format.cpp \
+        ad_$(Platform_arch_model)_gen.cpp \
+        ad_$(Platform_arch_model)_misc.cpp \
+        ad_$(Platform_arch_model)_peephole.cpp \
+        ad_$(Platform_arch_model)_pipeline.cpp \
+        adGlobals_$(Platform_arch_model).hpp \
+        dfa_$(Platform_arch_model).cpp \
+
+GENERATEDFILES = $(GENERATEDNAMES:%=$(OUTDIR)/%)
+
+# #########################################################################
+
+all: $(EXEC)
+
+$(EXEC) : $(OBJECTS)
+	@echo Making adlc
+	$(QUIETLY) $(HOST.LINK_NOPROF.CXX) -o $(EXEC) $(OBJECTS)
+
+# Random dependencies:
+$(OBJECTS): opcodes.hpp classes.hpp adlc.hpp adlcVMDeps.hpp adlparse.hpp archDesc.hpp arena.hpp dict2.hpp filebuff.hpp forms.hpp formsopt.hpp formssel.hpp
+
+# The source files refer to ostream.h, which sparcworks calls iostream.h
+$(OBJECTS): ostream.h
+
+ostream.h :
+	@echo >$@ '#include <iostream.h>'
+
+dump:
+	: OUTDIR=$(OUTDIR)
+	: OBJECTS=$(OBJECTS)
+	: products = $(GENERATEDFILES)
+
+all: $(GENERATEDFILES)
+
+$(GENERATEDFILES): refresh_adfiles
+
+# Get a unique temporary directory name, so multiple makes can run in parallel.
+# Note that product files are updated via "mv", which is atomic.
+TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
+
+# Debuggable by default
+CFLAGS += -g
+
+# Pass -D flags into ADLC.
+ADLCFLAGS += $(SYSDEFS)
+
+# Note "+="; it is a hook so flags.make can add more flags, like -g or -DFOO.
+ADLCFLAGS += -q -T
+
+# Normally, debugging is done directly on the ad_<arch>*.cpp files.
+# But -g will put #line directives in those files pointing back to <arch>.ad.
+# Some builds of gcc 3.2 have a bug that gets tickled by the extra #line directives
+# so skip it for 3.2 and ealier.
+ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
+ADLCFLAGS += -g
+endif
+
+ifdef LP64
+ADLCFLAGS += -D_LP64
+else
+ADLCFLAGS += -U_LP64
+endif
+
+#
+# adlc_updater is a simple sh script, under sccs control. It is
+# used to selectively update generated adlc files. This should
+# provide a nice compilation speed improvement.
+#
+ADLC_UPDATER_DIRECTORY = $(GAMMADIR)/make/$(OS)
+ADLC_UPDATER = adlc_updater
+$(ADLC_UPDATER): $(ADLC_UPDATER_DIRECTORY)/$(ADLC_UPDATER)
+	$(QUIETLY) cp $< $@; chmod +x $@
+
+# This action refreshes all generated adlc files simultaneously.
+# The way it works is this:
+# 1) create a scratch directory to work in.
+# 2) if the current working directory does not have $(ADLC_UPDATER), copy it.
+# 3) run the compiled adlc executable. This will create new adlc files in the scratch directory.
+# 4) call $(ADLC_UPDATER) on each generated adlc file. It will selectively update changed or missing files.
+# 5) If we actually updated any files, echo a notice.
+#
+refresh_adfiles: $(EXEC) $(SOURCE.AD) $(ADLC_UPDATER)
+	@rm -rf $(TEMPDIR); mkdir $(TEMPDIR)
+	$(QUIETLY) $(EXEC) $(ADLCFLAGS) $(SOURCE.AD) \
+            -c$(TEMPDIR)/ad_$(Platform_arch_model).cpp -h$(TEMPDIR)/ad_$(Platform_arch_model).hpp -a$(TEMPDIR)/dfa_$(Platform_arch_model).cpp -v$(TEMPDIR)/adGlobals_$(Platform_arch_model).hpp \
+	    || { rm -rf $(TEMPDIR); exit 1; }
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model).cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model).hpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_clone.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_expand.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_format.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_gen.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_misc.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_peephole.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) ad_$(Platform_arch_model)_pipeline.cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) adGlobals_$(Platform_arch_model).hpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) ./$(ADLC_UPDATER) dfa_$(Platform_arch_model).cpp $(TEMPDIR) $(OUTDIR)
+	$(QUIETLY) [ -f $(TEMPDIR)/made-change ] \
+		|| echo "Rescanned $(SOURCE.AD) but encountered no changes."
+	$(QUIETLY) rm -rf $(TEMPDIR)
+
+
+# #########################################################################
+
+$(SOURCE.AD): $(SOURCES.AD)
+	$(QUIETLY) $(PROCESS_AD_FILES) $(SOURCES.AD) > $(SOURCE.AD)
+
+#PROCESS_AD_FILES = cat
+# Pass through #line directives, in case user enables -g option above:
+PROCESS_AD_FILES = awk '{ \
+    if (CUR_FN != FILENAME) { CUR_FN=FILENAME; NR_BASE=NR-1; need_lineno=1 } \
+    if (need_lineno && $$0 !~ /\/\//) \
+      { print "\n\n\#line " (NR-NR_BASE) " \"" FILENAME "\""; need_lineno=0 }; \
+    print }'
+
+$(OUTDIR)/%.o: %.cpp
+	@echo Compiling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
+
+# Some object files are given a prefix, to disambiguate
+# them from objects of the same name built for the VM.
+$(OUTDIR)/adlc-%.o: %.cpp
+	@echo Compiling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(QUIETLY) $(HOST.COMPILE.CXX) -o $@ $< $(COMPILE_DONE)
+
+# #########################################################################
+
+clean:
+	rm $(OBJECTS)
+
+cleanall:
+	rm $(OBJECTS) $(EXEC)
+
+# #########################################################################
+
+.PHONY: all dump refresh_adfiles clean cleanall
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/build_vm_def.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# If we're cross compiling use that path for nm
+if [ "$CROSS_COMPILE_ARCH" != "" ]; then 
+NM=$ALT_COMPILER_PATH/nm
+else
+# On AIX we have to prevent that we pick up the 'nm' version from the GNU binutils
+# which may be installed under /opt/freeware/bin. So better use an absolute path here! 
+NM=/usr/bin/nm
+fi
+
+$NM -X64 -B -C $* \
+    | awk '{
+              if (($2="d" || $2="D") && ($3 ~ /^__vft/ || $3 ~ /^gHotSpotVM/)) print "\t" $3 ";"
+              if ($3 ~ /^UseSharedSpaces$/) print "\t" $3 ";"
+              if ($3 ~ /^SharedArchivePath__9Arguments$/) print "\t" $3 ";"
+          }' \
+    | sort -u
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/buildtree.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,364 @@
+#
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# Usage:
+#
+# $(MAKE) -f buildtree.make SRCARCH=srcarch BUILDARCH=buildarch LIBARCH=libarch
+#         GAMMADIR=dir OS_FAMILY=os VARIANT=variant
+#
+# The macros ARCH, GAMMADIR, OS_FAMILY and VARIANT must be defined in the
+# environment or on the command-line:
+#
+# ARCH		- sparc, i486, ... HotSpot cpu and os_cpu source directory
+# BUILDARCH     - build directory
+# LIBARCH       - the corresponding directory in JDK/JRE
+# GAMMADIR	- top of workspace
+# OS_FAMILY	- operating system
+# VARIANT	- core, compiler1, compiler2, or tiered
+# HOTSPOT_RELEASE_VERSION - <major>.<minor>-b<nn> (11.0-b07)
+# HOTSPOT_BUILD_VERSION   - internal, internal-$(USER_RELEASE_SUFFIX) or empty
+# JRE_RELEASE_VERSION     - <major>.<minor>.<micro> (1.7.0)
+#
+# Builds the directory trees with makefiles plus some convenience files in
+# each directory:
+#
+# Makefile	- for "make foo"
+# flags.make	- with macro settings
+# vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
+# adlc.make	-
+# trace.make	- generate tracing event and type definitions
+# jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
+# sa.make	- generate SA jar file and natives
+#
+# The makefiles are split this way so that "make foo" will run faster by not
+# having to read the dependency files for the vm.
+
+-include $(SPEC)
+include $(GAMMADIR)/make/scm.make
+include $(GAMMADIR)/make/defs.make
+include $(GAMMADIR)/make/altsrc.make
+
+
+# 'gmake MAKE_VERBOSE=y' or 'gmake QUIETLY=' gives all the gory details.
+QUIETLY$(MAKE_VERBOSE)	= @
+
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+  PLATFORM_FILE = $(shell dirname $(shell dirname $(shell pwd)))/platform_zero
+else
+  ifdef USE_SUNCC
+    PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH).suncc
+  else
+    PLATFORM_FILE = $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH)
+  endif
+endif
+
+# Allow overriding of the arch part of the directory but default
+# to BUILDARCH if nothing is specified
+ifeq ($(VARIANTARCH),)
+  VARIANTARCH=$(BUILDARCH)
+endif
+
+ifdef FORCE_TIERED
+ifeq		($(VARIANT),tiered)
+PLATFORM_DIR	= $(OS_FAMILY)_$(VARIANTARCH)_compiler2
+else
+PLATFORM_DIR	= $(OS_FAMILY)_$(VARIANTARCH)_$(VARIANT)
+endif
+else
+PLATFORM_DIR    = $(OS_FAMILY)_$(VARIANTARCH)_$(VARIANT)
+endif
+
+#
+# We do two levels of exclusion in the shared directory.
+# TOPLEVEL excludes are pruned, they are not recursively searched,
+# but lower level directories can be named without fear of collision.
+# ALWAYS excludes are excluded at any level in the directory tree.
+#
+
+ALWAYS_EXCLUDE_DIRS     = $(SCM_DIRS)
+
+ifeq		($(VARIANT),tiered)
+TOPLEVEL_EXCLUDE_DIRS	= $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name agent
+else
+ifeq		($(VARIANT),compiler2)
+TOPLEVEL_EXCLUDE_DIRS	= $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name c1 -o -name agent
+else
+# compiler1 and core use the same exclude list
+TOPLEVEL_EXCLUDE_DIRS	= $(ALWAYS_EXCLUDE_DIRS) -o -name adlc -o -name opto -o -name libadt -o -name agent
+endif
+endif
+
+# Get things from the platform file.
+COMPILER	= $(shell sed -n 's/^compiler[ 	]*=[ 	]*//p' $(PLATFORM_FILE))
+
+SIMPLE_DIRS	= \
+	$(PLATFORM_DIR)/generated/dependencies \
+	$(PLATFORM_DIR)/generated/adfiles \
+	$(PLATFORM_DIR)/generated/jvmtifiles \
+	$(PLATFORM_DIR)/generated/tracefiles
+
+TARGETS      = debug fastdebug optimized product
+SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
+
+# For dependencies and recursive makes.
+BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
+
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
+
+BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
+	SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
+
+# Define variables to be set in flags.make.
+# Default values are set in make/defs.make.
+ifeq ($(HOTSPOT_BUILD_VERSION),)
+  HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)
+else
+  HS_BUILD_VER=$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)
+endif
+# Set BUILD_USER from system-dependent hints:  $LOGNAME, $(whoami)
+ifndef HOTSPOT_BUILD_USER
+  HOTSPOT_BUILD_USER := $(shell echo $$LOGNAME)
+endif
+ifndef HOTSPOT_BUILD_USER
+  HOTSPOT_BUILD_USER := $(shell whoami)
+endif
+# Define HOTSPOT_VM_DISTRO based on settings in make/openjdk_distro
+# or make/hotspot_distro.
+ifndef HOTSPOT_VM_DISTRO
+  ifeq ($(call if-has-altsrc,$(HS_COMMON_SRC)/,true,false),true)
+    include $(GAMMADIR)/make/hotspot_distro
+  else
+    include $(GAMMADIR)/make/openjdk_distro
+  endif
+endif
+
+# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK
+ifndef OPENJDK
+  ifneq ($(call if-has-altsrc,$(HS_COMMON_SRC)/,true,false),true)
+    OPENJDK=true
+  endif
+endif
+
+BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION=  JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
+
+BUILDTREE	= \
+	$(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_TARGETS) $(BUILDTREE_VARS)
+
+BUILDTREE_COMMENT	= echo "\# Generated by $(BUILDTREE_MAKE)"
+
+all:  $(SUBMAKE_DIRS)
+
+# Run make in each subdirectory recursively.
+$(SUBMAKE_DIRS): $(SIMPLE_DIRS) FORCE
+	$(QUIETLY) [ -d $@ ] || { mkdir -p $@; }
+	$(QUIETLY) cd $@ && $(BUILDTREE) TARGET=$(@F)
+	$(QUIETLY) touch $@
+
+$(SIMPLE_DIRS):
+	$(QUIETLY) mkdir -p $@
+
+# Convenience macro which takes a source relative path, applies $(1) to the
+# absolute path, and then replaces $(GAMMADIR) in the result with a
+# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
+gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
+
+# This bit is needed to enable local rebuilds.
+# Unless the makefile itself sets LP64, any environmental
+# setting of LP64 will interfere with the build.
+LP64_SETTING/32 = LP64 = \#empty
+LP64_SETTING/64 = LP64 = 1
+
+DATA_MODE/ppc64 = 64
+
+DATA_MODE = $(DATA_MODE/$(BUILDARCH))
+
+flags.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo "Platform_file = $(PLATFORM_FILE)" | sed 's|$(GAMMADIR)|$$(GAMMADIR)|'; \
+	sed -n '/=/s/^ */Platform_/p' < $(PLATFORM_FILE); \
+	echo; \
+	echo "GAMMADIR = $(GAMMADIR)"; \
+	echo "HS_ALT_MAKE = $(HS_ALT_MAKE)"; \
+	echo "OSNAME = $(OSNAME)"; \
+	echo "SYSDEFS = \$$(Platform_sysdefs)"; \
+	echo "SRCARCH = $(SRCARCH)"; \
+	echo "BUILDARCH = $(BUILDARCH)"; \
+	echo "LIBARCH = $(LIBARCH)"; \
+	echo "TARGET = $(TARGET)"; \
+	echo "HS_BUILD_VER = $(HS_BUILD_VER)"; \
+	echo "JRE_RELEASE_VER = $(JRE_RELEASE_VERSION)"; \
+	echo "SA_BUILD_VERSION = $(HS_BUILD_VER)"; \
+	echo "HOTSPOT_BUILD_USER = $(HOTSPOT_BUILD_USER)"; \
+	echo "HOTSPOT_VM_DISTRO = $(HOTSPOT_VM_DISTRO)"; \
+	echo "OPENJDK = $(OPENJDK)"; \
+	echo "$(LP64_SETTING/$(DATA_MODE))"; \
+	echo; \
+	echo "# Used for platform dispatching"; \
+	echo "TARGET_DEFINES  = -DTARGET_OS_FAMILY_\$$(Platform_os_family)"; \
+	echo "TARGET_DEFINES += -DTARGET_ARCH_\$$(Platform_arch)"; \
+	echo "TARGET_DEFINES += -DTARGET_ARCH_MODEL_\$$(Platform_arch_model)"; \
+	echo "TARGET_DEFINES += -DTARGET_OS_ARCH_\$$(Platform_os_arch)"; \
+	echo "TARGET_DEFINES += -DTARGET_OS_ARCH_MODEL_\$$(Platform_os_arch_model)"; \
+	echo "TARGET_DEFINES += -DTARGET_COMPILER_\$$(Platform_compiler)"; \
+	echo "CFLAGS += \$$(TARGET_DEFINES)"; \
+	echo; \
+	echo "Src_Dirs_V = \\"; \
+	sed 's/$$/ \\/;s|$(GAMMADIR)|$$(GAMMADIR)|' ../shared_dirs.lst; \
+	echo "$(call gamma-path,altsrc,cpu/$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,cpu/$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
+	echo; \
+	echo "Src_Dirs_I = \\"; \
+	echo "$(call gamma-path,altsrc,share/vm/prims) \\"; \
+	echo "$(call gamma-path,commonsrc,share/vm/prims) \\"; \
+	echo "$(call gamma-path,altsrc,share/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,share/vm) \\"; \
+	echo "$(call gamma-path,altsrc,share/vm/precompiled) \\"; \
+	echo "$(call gamma-path,commonsrc,share/vm/precompiled) \\"; \
+	echo "$(call gamma-path,altsrc,cpu/$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,cpu/$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os_cpu/$(OS_FAMILY)_$(SRCARCH)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os/$(OS_FAMILY)/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os/$(OS_FAMILY)/vm) \\"; \
+	echo "$(call gamma-path,altsrc,os/posix/vm) \\"; \
+	echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
+	[ -n "$(CFLAGS_BROWSE)" ] && \
+	    echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \
+	[ -n "$(ENABLE_FULL_DEBUG_SYMBOLS)" ] && \
+	    echo && echo "ENABLE_FULL_DEBUG_SYMBOLS = $(ENABLE_FULL_DEBUG_SYMBOLS)"; \
+	[ -n "$(OBJCOPY)" ] && \
+	    echo && echo "OBJCOPY = $(OBJCOPY)"; \
+	[ -n "$(STRIP_POLICY)" ] && \
+	    echo && echo "STRIP_POLICY = $(STRIP_POLICY)"; \
+	[ -n "$(ZIP_DEBUGINFO_FILES)" ] && \
+	    echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
+	[ -n "$(ZIPEXE)" ] && \
+	    echo && echo "ZIPEXE = $(ZIPEXE)"; \
+	[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
+	    echo && \
+	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
+	    echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
+	[ -n "$(INCLUDE_TRACE)" ] && \
+	    echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \
+	echo; \
+	[ -n "$(SPEC)" ] && \
+	    echo "include $(SPEC)"; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(VARIANT).make"; \
+	echo "include \$$(GAMMADIR)/make/excludeSrc.make"; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(COMPILER).make"; \
+	) > $@
+
+flags_vm.make: $(BUILDTREE_MAKE) ../shared_dirs.lst
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \
+	) > $@
+
+../shared_dirs.lst:  $(BUILDTREE_MAKE) $(GAMMADIR)/src/share/vm
+	@echo Creating directory list $@
+	$(QUIETLY) if [ -d $(HS_ALT_SRC)/share/vm ]; then \
+          find $(HS_ALT_SRC)/share/vm/* -prune \
+	  -type d \! \( $(TOPLEVEL_EXCLUDE_DIRS) \) -exec find {} \
+          \( $(ALWAYS_EXCLUDE_DIRS) \) -prune -o -type d -print \; > $@; \
+        fi;
+	$(QUIETLY) find $(HS_COMMON_SRC)/share/vm/* -prune \
+	-type d \! \( $(TOPLEVEL_EXCLUDE_DIRS) \) -exec find {} \
+        \( $(ALWAYS_EXCLUDE_DIRS) \) -prune -o -type d -print \; >> $@
+
+Makefile: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/top.make"; \
+	) > $@
+
+vm.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo include flags_vm.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
+adlc.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
+jvmti.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
+trace.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
+sa.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
+FORCE:
+
+.PHONY:  all FORCE
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/compiler2.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,32 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Sets make macros for making server version of VM
+
+TYPE=COMPILER2
+
+VM_SUBDIR = server
+
+CFLAGS += -DCOMPILER2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/core.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,33 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Sets make macros for making core version of VM
+
+# Select which files to use (in top.make)
+TYPE=CORE
+
+# There is no "core" directory in JDK. Install core build in server directory.
+VM_SUBDIR = server
+
+# Note:  macros.hpp defines CORE
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/debug.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,41 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# Sets make macros for making debug version of VM
+
+# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make
+DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS)
+DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@))
+CFLAGS += $(DEBUG_CFLAGS/BYFILE)
+
+# Set the environment variable HOTSPARC_GENERIC to "true"
+# to inhibit the effect of the previous line on CFLAGS.
+
+# Linker mapfile
+MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
+
+VERSION = debug
+SYSDEFS += -DASSERT -DDEBUG
+PICFLAGS = DEFAULT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/defs.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,231 @@
+#
+# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# The common definitions for hotspot AIX builds.
+# Include the top level defs.make under make directory instead of this one.
+# This file is included into make/defs.make.
+
+SLASH_JAVA ?= /java
+
+# Need PLATFORM (os-arch combo names) for jdk and hotspot, plus libarch name
+#ARCH:=$(shell uname -m)
+PATH_SEP = :
+ifeq ($(LP64), 1)
+  ARCH_DATA_MODEL ?= 64
+else
+  ARCH_DATA_MODEL ?= 32
+endif
+
+ifeq ($(ARCH_DATA_MODEL), 64)
+  ARCH = ppc64
+else
+  ARCH = ppc
+endif
+
+# PPC
+ifeq ($(ARCH), ppc)
+  #ARCH_DATA_MODEL = 32
+  PLATFORM         = aix-ppc
+  VM_PLATFORM      = aix_ppc
+  HS_ARCH          = ppc
+endif
+
+# PPC64
+ifeq ($(ARCH), ppc64)
+  #ARCH_DATA_MODEL = 64
+  MAKE_ARGS       += LP64=1
+  PLATFORM         = aix-ppc64
+  VM_PLATFORM      = aix_ppc64
+  HS_ARCH          = ppc
+endif
+
+# On 32 bit aix we build server and client, on 64 bit just server.
+ifeq ($(JVM_VARIANTS),)
+  ifeq ($(ARCH_DATA_MODEL), 32)
+    JVM_VARIANTS:=client,server
+    JVM_VARIANT_CLIENT:=true
+    JVM_VARIANT_SERVER:=true
+  else
+    JVM_VARIANTS:=server
+    JVM_VARIANT_SERVER:=true
+  endif
+endif
+
+# determine if HotSpot is being built in JDK6 or earlier version
+JDK6_OR_EARLIER=0
+ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
+  # if the longer variable names (newer build style) are set, then check those
+  ifeq "$(shell expr \( $(JDK_MAJOR_VERSION) = 1 \& $(JDK_MINOR_VERSION) \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+else
+  # the longer variables aren't set so check the shorter variable names
+  ifeq "$(shell expr \( '$(JDK_MAJOR_VER)' = 1 \& '$(JDK_MINOR_VER)' \< 7 \))" "1"
+    JDK6_OR_EARLIER=1
+  endif
+endif
+
+ifeq ($(JDK6_OR_EARLIER),0)
+  # Full Debug Symbols is supported on JDK7 or newer.
+  # The Full Debug Symbols (FDS) default for BUILD_FLAVOR == product
+  # builds is enabled with debug info files ZIP'ed to save space. For
+  # BUILD_FLAVOR != product builds, FDS is always enabled, after all a
+  # debug build without debug info isn't very useful.
+  # The ZIP_DEBUGINFO_FILES option only has meaning when FDS is enabled.
+  #
+  # If you invoke a build with FULL_DEBUG_SYMBOLS=0, then FDS will be
+  # disabled for a BUILD_FLAVOR == product build.
+  #
+  # Note: Use of a different variable name for the FDS override option
+  # versus the FDS enabled check is intentional (FULL_DEBUG_SYMBOLS
+  # versus ENABLE_FULL_DEBUG_SYMBOLS). For auto build systems that pass
+  # in options via environment variables, use of distinct variables
+  # prevents strange behaviours. For example, in a BUILD_FLAVOR !=
+  # product build, the FULL_DEBUG_SYMBOLS environment variable will be
+  # 0, but the ENABLE_FULL_DEBUG_SYMBOLS make variable will be 1. If
+  # the same variable name is used, then different values can be picked
+  # up by different parts of the build. Just to be clear, we only need
+  # two variable names because the incoming option value can be
+  # overridden in some situations, e.g., a BUILD_FLAVOR != product
+  # build.
+
+  # Due to the multiple sub-make processes that occur this logic gets
+  # executed multiple times. We reduce the noise by at least checking that
+  # BUILD_FLAVOR has been set.
+  ifneq ($(BUILD_FLAVOR),)
+    ifeq ($(BUILD_FLAVOR), product)
+      FULL_DEBUG_SYMBOLS ?= 1
+      ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
+    else
+      # debug variants always get Full Debug Symbols (if available)
+      ENABLE_FULL_DEBUG_SYMBOLS = 1
+    endif
+    _JUNK_ := $(shell \
+      echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+    # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
+
+    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+      # Default OBJCOPY comes from GNU Binutils on Linux
+      ifeq ($(CROSS_COMPILE_ARCH),)
+        DEF_OBJCOPY=/usr/bin/objcopy
+      else
+        # Assume objcopy is part of the cross-compilation toolset
+        ifneq ($(ALT_COMPILER_PATH),)
+          DEF_OBJCOPY=$(ALT_COMPILER_PATH)/objcopy
+        endif
+      endif
+      OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
+      ifneq ($(ALT_OBJCOPY),)
+        _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
+        OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
+      endif
+
+      ifeq ($(OBJCOPY),)
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo files. You may need to set ALT_OBJCOPY.")
+        ENABLE_FULL_DEBUG_SYMBOLS=0
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
+      else
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo files.")
+
+        # Library stripping policies for .debuginfo configs:
+        #   all_strip - strips everything from the library
+        #   min_strip - strips most stuff from the library; leaves minimum symbols
+        #   no_strip  - does not strip the library at all
+        #
+        # Oracle security policy requires "all_strip". A waiver was granted on
+        # 2011.09.01 that permits using "min_strip" in the Java JDK and Java JRE.
+        #
+        # Currently, STRIP_POLICY is only used when Full Debug Symbols is enabled.
+        #
+        STRIP_POLICY ?= min_strip
+
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
+
+        ZIP_DEBUGINFO_FILES ?= 1
+
+        _JUNK_ := $(shell \
+          echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
+      endif
+    endif # ENABLE_FULL_DEBUG_SYMBOLS=1
+  endif # BUILD_FLAVOR
+endif # JDK_6_OR_EARLIER
+
+# unused JDK_INCLUDE_SUBDIR=aix
+
+# Library suffix
+LIBRARY_SUFFIX=so
+
+EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
+
+# client and server subdirectories have symbolic links to ../libjsig.so
+EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
+#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+#  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+#    EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
+#  else
+#    EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+#  endif
+#endif
+EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
+EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
+EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
+
+ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
+  EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
+#  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+#    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+#      EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.diz
+#    else
+#      EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
+#    endif
+#  endif
+endif
+
+ifeq ($(JVM_VARIANT_CLIENT),true)
+  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
+  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
+#  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+#    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+#      EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.diz
+#    else
+#      EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
+#    endif
+#  endif
+endif
+
+# Serviceability Binaries
+# No SA Support for PPC or zero
+ADD_SA_BINARIES/ppc   =
+ADD_SA_BINARIES/ppc64 =
+ADD_SA_BINARIES/zero  =
+
+EXPORT_LIST += $(ADD_SA_BINARIES/$(HS_ARCH))
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/dtrace.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,27 @@
+#
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Linux does not build jvm_db
+LIBJVM_DB =
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/fastdebug.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,73 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Sets make macros for making debug version of VM
+
+# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
+# Pare down optimization to -O2 if xlCV10.1 is in use.
+OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS) $(QV10_OPT_CONSERVATIVE)
+OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@))
+
+# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
+
+ifeq ($(BUILDARCH), ia64)
+  # Bug in GCC, causes hang.  -O1 will override the -O3 specified earlier
+  OPT_CFLAGS/callGenerator.o += -O1
+  OPT_CFLAGS/ciTypeFlow.o += -O1
+  OPT_CFLAGS/compile.o += -O1
+  OPT_CFLAGS/concurrentMarkSweepGeneration.o += -O1
+  OPT_CFLAGS/doCall.o += -O1
+  OPT_CFLAGS/generateOopMap.o += -O1
+  OPT_CFLAGS/generateOptoStub.o += -O1
+  OPT_CFLAGS/graphKit.o += -O1
+  OPT_CFLAGS/instanceKlass.o += -O1
+  OPT_CFLAGS/interpreterRT_ia64.o += -O1
+  OPT_CFLAGS/output.o += -O1
+  OPT_CFLAGS/parse1.o += -O1
+  OPT_CFLAGS/runtime.o += -O1
+  OPT_CFLAGS/synchronizer.o += -O1
+endif
+
+
+# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
+CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
+
+# Set the environment variable HOTSPARC_GENERIC to "true"
+# to inhibit the effect of the previous line on CFLAGS.
+
+# Linker mapfile
+MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
+
+# xlc 10.1 parameters for ipa linkage.
+#  - remove ipa linkage altogether. Does not seem to benefit performance, 
+#    but increases code footprint.
+#  - this is a debug build in the end. Extra effort for ipa linkage is thus 
+#    not justified.
+LFLAGS_QIPA=
+
+G_SUFFIX = _g
+VERSION = optimized
+SYSDEFS += -DASSERT -DFASTDEBUG
+PICFLAGS = DEFAULT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/jsig.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,87 @@
+#
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Rules to build signal interposition library, used by vm.make
+
+# libjsig.so: signal interposition library
+JSIG = jsig
+LIBJSIG = lib$(JSIG).so
+
+LIBJSIG_DEBUGINFO   = lib$(JSIG).debuginfo
+LIBJSIG_DIZ         = lib$(JSIG).diz
+
+JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
+
+DEST_JSIG           = $(JDK_LIBDIR)/$(LIBJSIG)
+DEST_JSIG_DEBUGINFO = $(JDK_LIBDIR)/$(LIBJSIG_DEBUGINFO)
+DEST_JSIG_DIZ       = $(JDK_LIBDIR)/$(LIBJSIG_DIZ)
+
+LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig
+
+# On Linux we really dont want a mapfile, as this library is small 
+# and preloaded using LD_PRELOAD, making functions private will 
+# cause problems with interposing. See CR: 6466665
+# LFLAGS_JSIG += $(MAPFLAG:FILENAME=$(LIBJSIG_MAPFILE))
+
+LFLAGS_JSIG += -D_GNU_SOURCE -D_REENTRANT $(LDFLAGS_HASH_STYLE)
+
+LFLAGS_JSIG += $(BIN_UTILS)
+
+# DEBUG_BINARIES overrides everything, use full -g debug information
+ifeq ($(DEBUG_BINARIES), true)
+  JSIG_DEBUG_CFLAGS = -g
+endif
+
+$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE)
+	@echo Making signal interposition lib...
+	$(QUIETLY) $(CXX) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
+                         $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
+
+#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+#	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
+#	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
+#  ifeq ($(STRIP_POLICY),all_strip)
+#	$(QUIETLY) $(STRIP) $@
+#  else
+#    ifeq ($(STRIP_POLICY),min_strip)
+#	$(QUIETLY) $(STRIP) -g $@
+#    # implied else here is no stripping at all
+#    endif
+#  endif
+#  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+#	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
+#	$(RM) $(LIBJSIG_DEBUGINFO)
+#  endif
+#endif
+
+install_jsig: $(LIBJSIG)
+	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
+	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
+	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
+	$(QUIETLY) test -f $(LIBJSIG_DIZ) && \
+	    cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ)
+	$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
+
+.PHONY: install_jsig
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/jvmti.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,118 @@
+#
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# This makefile (jvmti.make) is included from the jvmti.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate jvmti.
+
+include $(GAMMADIR)/make/aix/makefiles/rules.make
+
+# #########################################################################
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+JvmtiOutDir = $(GENERATED)/jvmtifiles
+
+JvmtiSrcDir = $(GAMMADIR)/src/share/vm/prims
+InterpreterSrcDir = $(GAMMADIR)/src/share/vm/interpreter
+
+# set VPATH so make knows where to look for source files
+Src_Dirs_V += $(JvmtiSrcDir)
+VPATH += $(Src_Dirs_V:%=%:)
+
+JvmtiGeneratedNames = \
+        jvmtiEnv.hpp \
+        jvmtiEnter.cpp \
+        jvmtiEnterTrace.cpp \
+        jvmtiEnvRecommended.cpp \
+        bytecodeInterpreterWithChecks.cpp \
+        jvmti.h \
+
+JvmtiEnvFillSource = $(JvmtiSrcDir)/jvmtiEnvFill.java
+JvmtiEnvFillClass = $(JvmtiOutDir)/jvmtiEnvFill.class
+
+JvmtiGenSource = $(JvmtiSrcDir)/jvmtiGen.java
+JvmtiGenClass = $(JvmtiOutDir)/jvmtiGen.class
+
+JvmtiGeneratedFiles = $(JvmtiGeneratedNames:%=$(JvmtiOutDir)/%)
+
+XSLT = $(QUIETLY) $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
+
+.PHONY: all jvmtidocs clean cleanall
+
+# #########################################################################
+
+all: $(JvmtiGeneratedFiles)
+
+both = $(JvmtiGenClass) $(JvmtiSrcDir)/jvmti.xml $(JvmtiSrcDir)/jvmtiLib.xsl
+
+$(JvmtiGenClass): $(JvmtiGenSource)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JvmtiOutDir) $(JvmtiGenSource)
+
+$(JvmtiEnvFillClass): $(JvmtiEnvFillSource)
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -d $(JvmtiOutDir) $(JvmtiEnvFillSource)
+
+$(JvmtiOutDir)/jvmtiEnter.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnter.cpp -PARAM interface jvmti
+
+$(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp: $(JvmtiGenClass) $(InterpreterSrcDir)/bytecodeInterpreter.cpp $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xml -XSL $(InterpreterSrcDir)/bytecodeInterpreterWithChecks.xsl -OUT $(JvmtiOutDir)/bytecodeInterpreterWithChecks.cpp 
+
+$(JvmtiOutDir)/jvmtiEnterTrace.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnter.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnter.xsl -OUT $(JvmtiOutDir)/jvmtiEnterTrace.cpp -PARAM interface jvmti -PARAM trace Trace
+
+$(JvmtiOutDir)/jvmtiEnvRecommended.cpp: $(both) $(JvmtiSrcDir)/jvmtiEnv.xsl $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiEnvFillClass)
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiEnv.xsl -OUT $(JvmtiOutDir)/jvmtiEnvStub.cpp
+	$(QUIETLY) $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiEnvFill $(JvmtiSrcDir)/jvmtiEnv.cpp $(JvmtiOutDir)/jvmtiEnvStub.cpp $(JvmtiOutDir)/jvmtiEnvRecommended.cpp
+
+$(JvmtiOutDir)/jvmtiEnv.hpp: $(both) $(JvmtiSrcDir)/jvmtiHpp.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiHpp.xsl -OUT $(JvmtiOutDir)/jvmtiEnv.hpp
+
+$(JvmtiOutDir)/jvmti.h: $(both) $(JvmtiSrcDir)/jvmtiH.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmtiH.xsl -OUT $(JvmtiOutDir)/jvmti.h
+
+jvmtidocs:  $(JvmtiOutDir)/jvmti.html 
+
+$(JvmtiOutDir)/jvmti.html: $(both) $(JvmtiSrcDir)/jvmti.xsl
+	@echo Generating $@
+	$(XSLT) -IN $(JvmtiSrcDir)/jvmti.xml -XSL $(JvmtiSrcDir)/jvmti.xsl -OUT $(JvmtiOutDir)/jvmti.html
+
+# #########################################################################
+
+clean :
+	rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles)
+
+cleanall :
+	rm $(JvmtiGenClass) $(JvmtiEnvFillClass) $(JvmtiGeneratedFiles)
+
+# #########################################################################
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/mapfile-vers-debug	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,274 @@
+#
+# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Define public interface.
+
+SUNWprivate_1.1 {
+        global:
+                # JNI
+                JNI_CreateJavaVM;
+                JNI_GetCreatedJavaVMs;
+                JNI_GetDefaultJavaVMInitArgs;
+
+                # JVM
+                JVM_Accept;
+                JVM_ActiveProcessorCount;
+                JVM_AllocateNewArray;
+                JVM_AllocateNewObject;
+                JVM_ArrayCopy;
+                JVM_AssertionStatusDirectives;
+                JVM_Available;
+                JVM_Bind;
+                JVM_ClassDepth;
+                JVM_ClassLoaderDepth;
+                JVM_Clone;
+                JVM_Close;
+                JVM_CX8Field;
+                JVM_CompileClass;
+                JVM_CompileClasses;
+                JVM_CompilerCommand;
+                JVM_Connect;
+                JVM_ConstantPoolGetClassAt;
+                JVM_ConstantPoolGetClassAtIfLoaded;
+                JVM_ConstantPoolGetDoubleAt;
+                JVM_ConstantPoolGetFieldAt;
+                JVM_ConstantPoolGetFieldAtIfLoaded;
+                JVM_ConstantPoolGetFloatAt;
+                JVM_ConstantPoolGetIntAt;
+                JVM_ConstantPoolGetLongAt;
+                JVM_ConstantPoolGetMethodAt;
+                JVM_ConstantPoolGetMethodAtIfLoaded;
+                JVM_ConstantPoolGetMemberRefInfoAt;
+                JVM_ConstantPoolGetSize;
+                JVM_ConstantPoolGetStringAt;
+                JVM_ConstantPoolGetUTF8At;
+                JVM_CountStackFrames;
+                JVM_CurrentClassLoader;
+                JVM_CurrentLoadedClass;
+                JVM_CurrentThread;
+                JVM_CurrentTimeMillis;
+                JVM_DefineClass;
+                JVM_DefineClassWithSource;
+                JVM_DefineClassWithSourceCond;
+                JVM_DesiredAssertionStatus;
+                JVM_DisableCompiler;
+                JVM_DoPrivileged;
+                JVM_DTraceGetVersion;
+                JVM_DTraceActivate;
+                JVM_DTraceIsProbeEnabled;
+                JVM_DTraceIsSupported;
+                JVM_DTraceDispose;
+                JVM_DumpAllStacks;
+                JVM_DumpThreads;
+                JVM_EnableCompiler;
+                JVM_Exit;
+                JVM_FillInStackTrace;
+                JVM_FindClassFromClass;
+                JVM_FindClassFromClassLoader;
+                JVM_FindClassFromBootLoader;
+                JVM_FindLibraryEntry;
+                JVM_FindLoadedClass;
+                JVM_FindPrimitiveClass;
+                JVM_FindSignal;
+                JVM_FreeMemory;
+                JVM_GC;
+                JVM_GetAllThreads;
+                JVM_GetArrayElement;
+                JVM_GetArrayLength;
+                JVM_GetCPClassNameUTF;
+                JVM_GetCPFieldClassNameUTF;
+                JVM_GetCPFieldModifiers;
+                JVM_GetCPFieldNameUTF;
+                JVM_GetCPFieldSignatureUTF;
+                JVM_GetCPMethodClassNameUTF;
+                JVM_GetCPMethodModifiers;
+                JVM_GetCPMethodNameUTF;
+                JVM_GetCPMethodSignatureUTF;
+                JVM_GetCallerClass;
+                JVM_GetClassAccessFlags;
+                JVM_GetClassAnnotations;
+                JVM_GetClassCPEntriesCount;
+                JVM_GetClassCPTypes;
+                JVM_GetClassConstantPool;
+                JVM_GetClassContext;
+                JVM_GetClassDeclaredConstructors;
+                JVM_GetClassDeclaredFields;
+                JVM_GetClassDeclaredMethods;
+                JVM_GetClassFieldsCount;
+                JVM_GetClassInterfaces;
+                JVM_GetClassLoader;
+                JVM_GetClassMethodsCount;
+                JVM_GetClassModifiers;
+                JVM_GetClassName;
+                JVM_GetClassNameUTF;
+		JVM_GetClassSignature;
+                JVM_GetClassSigners;
+                JVM_GetClassTypeAnnotations;
+                JVM_GetComponentType;
+                JVM_GetDeclaredClasses;
+                JVM_GetDeclaringClass;
+                JVM_GetEnclosingMethodInfo;
+                JVM_GetFieldAnnotations;
+                JVM_GetFieldIxModifiers;
+                JVM_GetFieldTypeAnnotations;
+                JVM_GetHostName;
+                JVM_GetInheritedAccessControlContext;
+                JVM_GetInterfaceVersion;
+                JVM_GetLastErrorString;
+                JVM_GetManagement;
+                JVM_GetMethodAnnotations;
+                JVM_GetMethodDefaultAnnotationValue;
+                JVM_GetMethodIxArgsSize;
+                JVM_GetMethodIxByteCode;
+                JVM_GetMethodIxByteCodeLength;
+                JVM_GetMethodIxExceptionIndexes;
+                JVM_GetMethodIxExceptionTableEntry;
+                JVM_GetMethodIxExceptionTableLength;
+                JVM_GetMethodIxExceptionsCount;
+                JVM_GetMethodIxLocalsCount;
+                JVM_GetMethodIxMaxStack;
+                JVM_GetMethodIxModifiers;
+                JVM_GetMethodIxNameUTF;
+                JVM_GetMethodIxSignatureUTF;
+                JVM_GetMethodParameterAnnotations;
+                JVM_GetMethodParameters;
+                JVM_GetMethodTypeAnnotations;
+                JVM_GetPrimitiveArrayElement;
+                JVM_GetProtectionDomain;
+                JVM_GetSockName;
+                JVM_GetSockOpt;
+                JVM_GetStackAccessControlContext;
+                JVM_GetStackTraceDepth;
+                JVM_GetStackTraceElement;
+                JVM_GetSystemPackage;
+                JVM_GetSystemPackages;
+                JVM_GetThreadStateNames;
+                JVM_GetThreadStateValues;
+                JVM_GetVersionInfo;
+                JVM_Halt;
+                JVM_HoldsLock;
+                JVM_IHashCode;
+                JVM_InitAgentProperties;
+                JVM_InitProperties;
+                JVM_InitializeCompiler;
+                JVM_InitializeSocketLibrary;
+                JVM_InternString;
+                JVM_Interrupt;
+                JVM_InvokeMethod;
+                JVM_IsArrayClass;
+                JVM_IsConstructorIx;
+                JVM_IsInterface;
+                JVM_IsInterrupted;
+                JVM_IsNaN;
+                JVM_IsPrimitiveClass;
+                JVM_IsSameClassPackage;
+                JVM_IsSilentCompiler;
+                JVM_IsSupportedJNIVersion;
+                JVM_IsThreadAlive;
+                JVM_IsVMGeneratedMethodIx;
+                JVM_LatestUserDefinedLoader;
+                JVM_Listen;
+                JVM_LoadClass0;
+                JVM_LoadLibrary;
+                JVM_Lseek;
+                JVM_MaxObjectInspectionAge;
+                JVM_MaxMemory;
+                JVM_MonitorNotify;
+                JVM_MonitorNotifyAll;
+                JVM_MonitorWait;
+                JVM_NanoTime;
+                JVM_NativePath;
+                JVM_NewArray;
+                JVM_NewInstanceFromConstructor;
+                JVM_NewMultiArray;
+                JVM_OnExit;
+                JVM_Open;
+                JVM_RaiseSignal;
+                JVM_RawMonitorCreate;
+                JVM_RawMonitorDestroy;
+                JVM_RawMonitorEnter;
+                JVM_RawMonitorExit;
+                JVM_Read;
+                JVM_Recv;
+                JVM_RecvFrom;
+                JVM_RegisterSignal;
+                JVM_ReleaseUTF;
+                JVM_ResolveClass;
+                JVM_ResumeThread;
+                JVM_Send;
+                JVM_SendTo;
+                JVM_SetArrayElement;
+                JVM_SetClassSigners;
+                JVM_SetLength;
+                JVM_SetNativeThreadName;
+                JVM_SetPrimitiveArrayElement;
+                JVM_SetProtectionDomain;
+                JVM_SetSockOpt;
+                JVM_SetThreadPriority;
+                JVM_Sleep;
+                JVM_Socket;
+                JVM_SocketAvailable;
+                JVM_SocketClose;
+                JVM_SocketShutdown;
+                JVM_StartThread;
+                JVM_StopThread;
+                JVM_SuspendThread;
+                JVM_SupportsCX8;
+                JVM_Sync;
+                JVM_Timeout;
+                JVM_TotalMemory;
+                JVM_TraceInstructions;
+                JVM_TraceMethodCalls;
+                JVM_UnloadLibrary;
+                JVM_Write;
+                JVM_Yield;
+                JVM_handle_linux_signal;
+
+                # debug JVM
+                JVM_AccessVMBooleanFlag;
+                JVM_AccessVMIntFlag;
+                JVM_VMBreakPoint;
+
+                # miscellaneous functions
+                jio_fprintf;
+                jio_printf;
+                jio_snprintf;
+                jio_vfprintf;
+                jio_vsnprintf;
+                fork1;
+                numa_warn;
+                numa_error;
+
+                # Needed because there is no JVM interface for this.
+                sysThreadAvailableStackWithSlack;
+
+                # This is for Forte Analyzer profiling support.
+                AsyncGetCallTrace;
+
+		# INSERT VTABLE SYMBOLS HERE
+
+        local:
+                *;
+};
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/mapfile-vers-jsig	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,38 @@
+#
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Define library interface.
+
+SUNWprivate_1.1 {
+        global:
+            JVM_begin_signal_setting;
+            JVM_end_signal_setting;
+            JVM_get_libjsig_version;
+            JVM_get_signal_action;
+            sigaction;
+            signal;
+            sigset;
+        local:
+                *;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/mapfile-vers-product	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,267 @@
+#
+# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Define public interface.
+
+SUNWprivate_1.1 {
+        global:
+                # JNI
+                JNI_CreateJavaVM;
+                JNI_GetCreatedJavaVMs;
+                JNI_GetDefaultJavaVMInitArgs;
+
+                # JVM
+                JVM_Accept;
+                JVM_ActiveProcessorCount;
+                JVM_AllocateNewArray;
+                JVM_AllocateNewObject;
+                JVM_ArrayCopy;
+                JVM_AssertionStatusDirectives;
+                JVM_Available;
+                JVM_Bind;
+                JVM_ClassDepth;
+                JVM_ClassLoaderDepth;
+                JVM_Clone;
+                JVM_Close;
+                JVM_CX8Field;
+                JVM_CompileClass;
+                JVM_CompileClasses;
+                JVM_CompilerCommand;
+                JVM_Connect;
+                JVM_ConstantPoolGetClassAt;
+                JVM_ConstantPoolGetClassAtIfLoaded;
+                JVM_ConstantPoolGetDoubleAt;
+                JVM_ConstantPoolGetFieldAt;
+                JVM_ConstantPoolGetFieldAtIfLoaded;
+                JVM_ConstantPoolGetFloatAt;
+                JVM_ConstantPoolGetIntAt;
+                JVM_ConstantPoolGetLongAt;
+                JVM_ConstantPoolGetMethodAt;
+                JVM_ConstantPoolGetMethodAtIfLoaded;
+                JVM_ConstantPoolGetMemberRefInfoAt;
+                JVM_ConstantPoolGetSize;
+                JVM_ConstantPoolGetStringAt;
+                JVM_ConstantPoolGetUTF8At;
+                JVM_CountStackFrames;
+                JVM_CurrentClassLoader;
+                JVM_CurrentLoadedClass;
+                JVM_CurrentThread;
+                JVM_CurrentTimeMillis;
+                JVM_DefineClass;
+                JVM_DefineClassWithSource;
+                JVM_DefineClassWithSourceCond;
+                JVM_DesiredAssertionStatus;
+                JVM_DisableCompiler;
+                JVM_DoPrivileged;
+                JVM_DTraceGetVersion;
+                JVM_DTraceActivate;
+                JVM_DTraceIsProbeEnabled;
+                JVM_DTraceIsSupported;
+                JVM_DTraceDispose;
+                JVM_DumpAllStacks;
+                JVM_DumpThreads;
+                JVM_EnableCompiler;
+                JVM_Exit;
+                JVM_FillInStackTrace;
+                JVM_FindClassFromClass;
+                JVM_FindClassFromClassLoader;
+                JVM_FindClassFromBootLoader;
+                JVM_FindLibraryEntry;
+                JVM_FindLoadedClass;
+                JVM_FindPrimitiveClass;
+                JVM_FindSignal;
+                JVM_FreeMemory;
+                JVM_GC;
+                JVM_GetAllThreads;
+                JVM_GetArrayElement;
+                JVM_GetArrayLength;
+                JVM_GetCPClassNameUTF;
+                JVM_GetCPFieldClassNameUTF;
+                JVM_GetCPFieldModifiers;
+                JVM_GetCPFieldNameUTF;
+                JVM_GetCPFieldSignatureUTF;
+                JVM_GetCPMethodClassNameUTF;
+                JVM_GetCPMethodModifiers;
+                JVM_GetCPMethodNameUTF;
+                JVM_GetCPMethodSignatureUTF;
+                JVM_GetCallerClass;
+                JVM_GetClassAccessFlags;
+                JVM_GetClassAnnotations;
+                JVM_GetClassCPEntriesCount;
+                JVM_GetClassCPTypes;
+                JVM_GetClassConstantPool;
+                JVM_GetClassContext;
+                JVM_GetClassDeclaredConstructors;
+                JVM_GetClassDeclaredFields;
+                JVM_GetClassDeclaredMethods;
+                JVM_GetClassFieldsCount;
+                JVM_GetClassInterfaces;
+                JVM_GetClassLoader;
+                JVM_GetClassMethodsCount;
+                JVM_GetClassModifiers;
+                JVM_GetClassName;
+                JVM_GetClassNameUTF;
+                JVM_GetClassSignature;
+                JVM_GetClassSigners;
+                JVM_GetClassTypeAnnotations;
+                JVM_GetComponentType;
+                JVM_GetDeclaredClasses;
+                JVM_GetDeclaringClass;
+                JVM_GetEnclosingMethodInfo;
+                JVM_GetFieldAnnotations;
+                JVM_GetFieldIxModifiers;
+                JVM_GetHostName;
+                JVM_GetInheritedAccessControlContext;
+                JVM_GetInterfaceVersion;
+                JVM_GetLastErrorString;
+                JVM_GetManagement;
+                JVM_GetMethodAnnotations;
+                JVM_GetMethodDefaultAnnotationValue;
+                JVM_GetMethodIxArgsSize;
+                JVM_GetMethodIxByteCode;
+                JVM_GetMethodIxByteCodeLength;
+                JVM_GetMethodIxExceptionIndexes;
+                JVM_GetMethodIxExceptionTableEntry;
+                JVM_GetMethodIxExceptionTableLength;
+                JVM_GetMethodIxExceptionsCount;
+                JVM_GetMethodIxLocalsCount;
+                JVM_GetMethodIxMaxStack;
+                JVM_GetMethodIxModifiers;
+                JVM_GetMethodIxNameUTF;
+                JVM_GetMethodIxSignatureUTF;
+                JVM_GetMethodParameterAnnotations;
+                JVM_GetMethodParameters;
+                JVM_GetPrimitiveArrayElement;
+                JVM_GetProtectionDomain;
+                JVM_GetSockName;
+                JVM_GetSockOpt;
+                JVM_GetStackAccessControlContext;
+                JVM_GetStackTraceDepth;
+                JVM_GetStackTraceElement;
+                JVM_GetSystemPackage;
+                JVM_GetSystemPackages;
+                JVM_GetThreadStateNames;
+                JVM_GetThreadStateValues;
+                JVM_GetVersionInfo;
+                JVM_Halt;
+                JVM_HoldsLock;
+                JVM_IHashCode;
+                JVM_InitAgentProperties;
+                JVM_InitProperties;
+                JVM_InitializeCompiler;
+                JVM_InitializeSocketLibrary;
+                JVM_InternString;
+                JVM_Interrupt;
+                JVM_InvokeMethod;
+                JVM_IsArrayClass;
+                JVM_IsConstructorIx;
+                JVM_IsInterface;
+                JVM_IsInterrupted;
+                JVM_IsNaN;
+                JVM_IsPrimitiveClass;
+                JVM_IsSameClassPackage;
+                JVM_IsSilentCompiler;
+                JVM_IsSupportedJNIVersion;
+                JVM_IsThreadAlive;
+                JVM_IsVMGeneratedMethodIx;
+                JVM_LatestUserDefinedLoader;
+                JVM_Listen;
+                JVM_LoadClass0;
+                JVM_LoadLibrary;
+                JVM_Lseek;
+                JVM_MaxObjectInspectionAge;
+                JVM_MaxMemory;
+                JVM_MonitorNotify;
+                JVM_MonitorNotifyAll;
+                JVM_MonitorWait;
+                JVM_NanoTime;
+                JVM_NativePath;
+                JVM_NewArray;
+                JVM_NewInstanceFromConstructor;
+                JVM_NewMultiArray;
+                JVM_OnExit;
+                JVM_Open;
+                JVM_RaiseSignal;
+                JVM_RawMonitorCreate;
+                JVM_RawMonitorDestroy;
+                JVM_RawMonitorEnter;
+                JVM_RawMonitorExit;
+                JVM_Read;
+                JVM_Recv;
+                JVM_RecvFrom;
+                JVM_RegisterSignal;
+                JVM_ReleaseUTF;
+                JVM_ResolveClass;
+                JVM_ResumeThread;
+                JVM_Send;
+                JVM_SendTo;
+                JVM_SetArrayElement;
+                JVM_SetClassSigners;
+                JVM_SetLength;
+                JVM_SetNativeThreadName;
+                JVM_SetPrimitiveArrayElement;
+                JVM_SetProtectionDomain;
+                JVM_SetSockOpt;
+                JVM_SetThreadPriority;
+                JVM_Sleep;
+                JVM_Socket;
+                JVM_SocketAvailable;
+                JVM_SocketClose;
+                JVM_SocketShutdown;
+                JVM_StartThread;
+                JVM_StopThread;
+                JVM_SuspendThread;
+                JVM_SupportsCX8;
+                JVM_Sync;
+                JVM_Timeout;
+                JVM_TotalMemory;
+                JVM_TraceInstructions;
+                JVM_TraceMethodCalls;
+                JVM_UnloadLibrary;
+                JVM_Write;
+                JVM_Yield;
+                JVM_handle_linux_signal;
+
+                # miscellaneous functions
+                jio_fprintf;
+                jio_printf;
+                jio_snprintf;
+                jio_vfprintf;
+                jio_vsnprintf;
+                fork1;
+                numa_warn;
+                numa_error;
+
+                # Needed because there is no JVM interface for this.
+                sysThreadAvailableStackWithSlack;
+
+                # This is for Forte Analyzer profiling support.
+                AsyncGetCallTrace;
+
+		# INSERT VTABLE SYMBOLS HERE
+
+        local:
+                *;
+};
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/ppc64.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,94 @@
+#
+# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Produce 64 bits object files.
+CFLAGS += -q64
+
+# Balanced tuning for recent versions of the POWER architecture (if supported by xlc).
+QTUNE=$(if $(CXX_SUPPORTS_BALANCED_TUNING),balanced,pwr5)
+
+# Try to speed up the interpreter: use ppc64 instructions and inline 
+# glue code for external functions.
+OPT_CFLAGS += -qarch=ppc64 -qtune=$(QTUNE) -qinlglue
+
+# We need variable length arrays
+CFLAGS += -qlanglvl=c99vla
+# Just to check for unwanted macro redefinitions
+CFLAGS += -qlanglvl=noredefmac
+
+# Suppress those "implicit private" warnings xlc gives.
+#  - The omitted keyword "private" is assumed for base class "...".
+CFLAGS += -qsuppress=1540-0198
+
+# Suppress the following numerous warning:
+#  - 1540-1090 (I) The destructor of "..." might not be called.
+#  - 1500-010: (W) WARNING in ...: Infinite loop.  Program may not stop.
+#    There are several infinite loops in the vm, suppress.
+CFLAGS += -qsuppress=1540-1090 -qsuppress=1500-010
+
+# Suppress 
+#  - 540-1088 (W) The exception specification is being ignored.
+# caused by throw() in declaration of new() in nmethod.hpp.
+CFLAGS += -qsuppress=1540-1088
+
+# Turn off floating-point optimizations that may alter program semantics
+OPT_CFLAGS += -qstrict
+
+# Disable aggressive optimizations for functions in sharedRuntimeTrig.cpp 
+# and sharedRuntimeTrans.cpp on ppc64. 
+# -qstrict turns off the following optimizations:
+#   * Performing code motion and scheduling on computations such as loads
+#     and floating-point computations that may trigger an exception.
+#   * Relaxing conformance to IEEE rules.
+#   * Reassociating floating-point expressions.
+# When using '-qstrict' there still remains one problem
+# in javasoft.sqe.tests.api.java.lang.Math.sin5Tests when run in compile-all
+# mode, so don't optimize sharedRuntimeTrig.cpp at all.
+OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
+OPT_CFLAGS/sharedRuntimeTrans.o = $(OPT_CFLAGS/NOOPT)
+
+# xlc 10.01 parameters for ipa compile.
+QIPA_COMPILE=$(if $(CXX_IS_V10),-qipa)
+
+# Xlc 10.1 parameters for aggressive optimization:
+# - qhot=level=1: Most aggressive loop optimizations.
+# - qignerrno: Assume errno is not modified by system calls.
+# - qinline: Inline method calls. No suboptions for c++ compiles.
+# - qxflag=ASMMIDCOALFIX: Activate fix for -O3 problem in interpreter loop.
+# - qxflag=asmfastsync: Activate fix for performance problem with inline assembler with memory clobber.
+QV10_OPT=$(if $(CXX_IS_V10),-qxflag=ASMMIDCOALFIX -qxflag=asmfastsync)
+QV10_OPT_AGGRESSIVE=$(if $(CXX_IS_V10),-qhot=level=1 -qignerrno -qinline)
+QV10_OPT_CONSERVATIVE=$(if $(CXX_IS_V10),-qhot=level=1 -qignerrno -qinline)
+
+# Disallow inlining for synchronizer.cpp, but perform O3 optimizations.
+OPT_CFLAGS/synchronizer.o = $(OPT_CFLAGS) -qnoinline
+
+# Set all the xlC V10.1 options here.
+OPT_CFLAGS += $(QIPA_COMPILE) $(QV10_OPT) $(QV10_OPT_AGGRESSIVE)
+
+export OBJECT_MODE=64
+
+# Also build launcher as 64 bit executable.
+LAUNCHERFLAGS += -q64
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/product.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,58 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Sets make macros for making optimized version of Gamma VM
+# (This is the "product", not the "release" version.)
+
+# Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
+OPT_CFLAGS/DEFAULT= $(OPT_CFLAGS)
+OPT_CFLAGS/BYFILE = $(OPT_CFLAGS/$@)$(OPT_CFLAGS/DEFAULT$(OPT_CFLAGS/$@))
+
+# (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files)
+
+# If you set HOTSPARC_GENERIC=yes, you disable all OPT_CFLAGS settings
+CFLAGS$(HOTSPARC_GENERIC) += $(OPT_CFLAGS/BYFILE)
+
+# Set the environment variable HOTSPARC_GENERIC to "true"
+# to inhibit the effect of the previous line on CFLAGS.
+
+# Linker mapfile
+MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-product
+
+# Remove ipa linkage altogether. Does not seem to benfit performance, but increases code footprint.
+LFLAGS_QIPA=
+
+SYSDEFS += -DPRODUCT
+VERSION = optimized
+
+# use -g to strip library as -x will discard its symbol table; -x is fine for
+# executables.
+# Note: these macros are not used in .debuginfo configs
+STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
+STRIP_AOUT   = $(STRIP) -x $@ || exit 1;
+
+# If we can create .debuginfo files, then the VM is stripped in vm.make
+# and this macro is not used.
+# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/rules.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,203 @@
+#
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# Common rules/macros for the vm, adlc.
+
+# Tell make that .cpp is important
+.SUFFIXES: .cpp $(SUFFIXES)
+
+DEMANGLER       = c++filt
+DEMANGLE        = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@
+
+# $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++).
+CC_COMPILE       = $(CC) $(CXXFLAGS) $(CFLAGS)
+CXX_COMPILE      = $(CXX) $(CXXFLAGS) $(CFLAGS)
+
+AS.S            = $(AS) $(ASFLAGS)
+
+COMPILE.CC       = $(CC_COMPILE) -c
+GENASM.CC        = $(CC_COMPILE) -S
+LINK.CC          = $(CC) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+LINK_LIB.CC      = $(CC) $(LFLAGS) $(SHARED_FLAG)
+PREPROCESS.CC    = $(CC_COMPILE) -E
+
+COMPILE.CXX      = $(CXX_COMPILE) -c
+GENASM.CXX       = $(CXX_COMPILE) -S
+LINK.CXX         = $(CXX) $(LFLAGS) $(AOUT_FLAGS) $(PROF_AOUT_FLAGS)
+LINK_NOPROF.CXX  = $(CXX) $(LFLAGS) $(AOUT_FLAGS)
+LINK_LIB.CXX     = $(CXX) $(LFLAGS) $(SHARED_FLAG)
+PREPROCESS.CXX   = $(CXX_COMPILE) -E
+
+# cross compiling the jvm with c2 requires host compilers to build
+# adlc tool
+
+HOST.CXX_COMPILE      = $(HOSTCXX) $(CXXFLAGS) $(CFLAGS)
+HOST.COMPILE.CXX      = $(HOST.CXX_COMPILE) -c
+HOST.LINK_NOPROF.CXX  = $(HOSTCXX) $(LFLAGS) $(AOUT_FLAGS)
+
+
+# Effect of REMOVE_TARGET is to delete out-of-date files during "gnumake -k".
+REMOVE_TARGET   = rm -f $@
+
+# Note use of ALT_BOOTDIR to explicitly specify location of java and
+# javac; this is the same environment variable used in the J2SE build
+# process for overriding the default spec, which is BOOTDIR.
+# Note also that we fall back to using JAVA_HOME if neither of these is
+# specified.
+
+ifdef ALT_BOOTDIR
+
+RUN.JAVA  = $(ALT_BOOTDIR)/bin/java
+RUN.JAVAP = $(ALT_BOOTDIR)/bin/javap
+RUN.JAVAH = $(ALT_BOOTDIR)/bin/javah
+RUN.JAR   = $(ALT_BOOTDIR)/bin/jar
+COMPILE.JAVAC = $(ALT_BOOTDIR)/bin/javac
+COMPILE.RMIC = $(ALT_BOOTDIR)/bin/rmic
+BOOT_JAVA_HOME = $(ALT_BOOTDIR)
+
+else
+
+ifdef BOOTDIR
+
+RUN.JAVA  = $(BOOTDIR)/bin/java
+RUN.JAVAP = $(BOOTDIR)/bin/javap
+RUN.JAVAH = $(BOOTDIR)/bin/javah
+RUN.JAR   = $(BOOTDIR)/bin/jar
+COMPILE.JAVAC = $(BOOTDIR)/bin/javac
+COMPILE.RMIC  = $(BOOTDIR)/bin/rmic
+BOOT_JAVA_HOME = $(BOOTDIR)
+
+else
+
+ifdef JAVA_HOME
+
+RUN.JAVA  = $(JAVA_HOME)/bin/java
+RUN.JAVAP = $(JAVA_HOME)/bin/javap
+RUN.JAVAH = $(JAVA_HOME)/bin/javah
+RUN.JAR   = $(JAVA_HOME)/bin/jar
+COMPILE.JAVAC = $(JAVA_HOME)/bin/javac
+COMPILE.RMIC  = $(JAVA_HOME)/bin/rmic
+BOOT_JAVA_HOME = $(JAVA_HOME)
+
+else
+
+# take from the PATH, if ALT_BOOTDIR, BOOTDIR and JAVA_HOME are not defined
+# note that this is to support hotspot build without SA. To build
+# SA along with hotspot, you need to define ALT_BOOTDIR, BOOTDIR or JAVA_HOME
+
+RUN.JAVA  = java
+RUN.JAVAP = javap
+RUN.JAVAH = javah
+RUN.JAR   = jar
+COMPILE.JAVAC = javac
+COMPILE.RMIC  = rmic
+
+endif
+endif
+endif
+
+COMPILE.JAVAC += $(BOOTSTRAP_JAVAC_FLAGS)
+
+SUM = /usr/bin/sum
+
+# 'gmake MAKE_VERBOSE=y' gives all the gory details.
+QUIETLY$(MAKE_VERBOSE)  = @
+RUN.JAR$(MAKE_VERBOSE) += >/dev/null
+
+# Settings for javac
+BOOT_SOURCE_LANGUAGE_VERSION = 6
+BOOT_TARGET_CLASS_VERSION = 6
+JAVAC_FLAGS = -g -encoding ascii
+BOOTSTRAP_JAVAC_FLAGS = $(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION)
+
+# With parallel makes, print a message at the end of compilation.
+ifeq    ($(findstring j,$(MFLAGS)),j)
+COMPILE_DONE    = && { echo Done with $<; }
+endif
+
+# Include $(NONPIC_OBJ_FILES) definition
+ifndef LP64
+include $(GAMMADIR)/make/pic.make
+endif
+
+include $(GAMMADIR)/make/altsrc.make
+
+# The non-PIC object files are only generated for 32 bit platforms.
+ifdef LP64
+%.o: %.cpp
+	@echo Compiling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
+else
+%.o: %.cpp
+	@echo Compiling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
+	   $(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
+	   $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
+endif
+
+%.o: %.s
+	@echo Assembling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(QUIETLY) $(AS.S) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
+
+%.s: %.cpp
+	@echo Generating assembly for $<
+	$(QUIETLY) $(GENASM.CXX) -o $@ $<
+	$(QUIETLY) $(DEMANGLE) $(COMPILE_DONE)
+
+# Intermediate files (for debugging macros)
+%.i: %.cpp
+	@echo Preprocessing $< to $@
+	$(QUIETLY) $(PREPROCESS.CXX) $< > $@ $(COMPILE_DONE)
+
+#  Override gnumake built-in rules which do sccs get operations badly.
+#  (They put the checked out code in the current directory, not in the
+#  directory of the original file.)  Since this is a symptom of a teamware
+#  failure, and since not all problems can be detected by gnumake due
+#  to incomplete dependency checking... just complain and stop.
+%:: s.%
+	@echo "========================================================="
+	@echo File $@
+	@echo is out of date with respect to its SCCS file.
+	@echo This file may be from an unresolved Teamware conflict.
+	@echo This is also a symptom of a Teamware bringover/putback failure
+	@echo in which SCCS files are updated but not checked out.
+	@echo Check for other out of date files in your workspace.
+	@echo "========================================================="
+	@exit 666
+
+%:: SCCS/s.%
+	@echo "========================================================="
+	@echo File $@
+	@echo is out of date with respect to its SCCS file.
+	@echo This file may be from an unresolved Teamware conflict.
+	@echo This is also a symptom of a Teamware bringover/putback failure
+	@echo in which SCCS files are updated but not checked out.
+	@echo Check for other out of date files in your workspace.
+	@echo "========================================================="
+	@exit 666
+
+.PHONY: default
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/sa.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,116 @@
+#
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# This makefile (sa.make) is included from the sa.make in the
+# build directories.
+
+# This makefile is used to build Serviceability Agent java code
+# and generate JNI header file for native methods.
+
+include $(GAMMADIR)/make/aix/makefiles/rules.make
+
+include $(GAMMADIR)/make/defs.make
+
+AGENT_DIR = $(GAMMADIR)/agent
+
+include $(GAMMADIR)/make/sa.files
+
+TOPDIR    = $(shell echo `pwd`)
+GENERATED = $(TOPDIR)/../generated
+
+# tools.jar is needed by the JDI - SA binding
+SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
+
+# TODO: if it's a modules image, check if SA module is installed.
+MODULELIB_PATH= $(BOOT_JAVA_HOME)/lib/modules
+
+AGENT_FILES_LIST := $(GENERATED)/agent.classes.list
+
+SA_CLASSDIR = $(GENERATED)/saclasses
+
+SA_BUILD_VERSION_PROP = "sun.jvm.hotspot.runtime.VM.saBuildVersion=$(SA_BUILD_VERSION)"
+
+SA_PROPERTIES = $(SA_CLASSDIR)/sa.properties
+
+# if $(AGENT_DIR) does not exist, we don't build SA
+# also, we don't build SA on Itanium, PowerPC, ARM or zero.
+
+all:
+	if [ -d $(AGENT_DIR) -a "$(SRCARCH)" != "ia64" \
+             -a "$(SRCARCH)" != "arm" \
+             -a "$(SRCARCH)" != "ppc" \
+             -a "$(SRCARCH)" != "zero" ] ; then \
+	   $(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
+	fi
+
+$(GENERATED)/sa-jdi.jar: $(AGENT_FILES)
+	$(QUIETLY) echo "Making $@"
+	$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
+	  echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
+	  exit 1; \
+	fi
+	$(QUIETLY) if [ ! -f $(SA_CLASSPATH) -a ! -d $(MODULELIB_PATH) ] ; then \
+	  echo "Missing $(SA_CLASSPATH) file. Use 1.6.0 or later version of JDK";\
+	  echo ""; \
+	  exit 1; \
+	fi
+	$(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \
+	  mkdir -p $(SA_CLASSDIR);        \
+	fi
+# Note: When indented, make tries to execute the '$(shell' comment.
+# In some environments, cmd processors have limited line length.
+# To prevent the javac invocation in the next block from using
+# a very long cmd line, we use javac's @file-list option. We
+# generate the file lists using make's built-in 'foreach' control
+# flow which also avoids cmd processor line length issues. Since
+# the 'foreach' is done as part of make's macro expansion phase,
+# the initialization of the lists is also done in the same phase
+# using '$(shell rm ...' instead of using the more traditional
+# 'rm ...' rule.
+	$(shell rm -rf $(AGENT_FILES_LIST))
+# gnumake 3.78.1 does not accept the *'s that
+# are in AGENT_FILES, so use the shell to expand them.
+# Be extra carefull to not produce too long command lines in the shell!
+	$(foreach file,$(AGENT_FILES),$(shell ls -1 $(file) >> $(AGENT_FILES_LIST)))
+	$(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -d $(SA_CLASSDIR) @$(AGENT_FILES_LIST)
+	$(QUIETLY) $(REMOTE) $(COMPILE.RMIC)  -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
+	$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
+	$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
+	$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
+	$(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
+	$(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
+	$(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
+	$(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
+	$(QUIETLY) $(REMOTE) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ .
+	$(QUIETLY) $(REMOTE) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector
+	$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext
+	$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext
+	$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.amd64.AMD64ThreadContext
+	$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.sparc.SPARCThreadContext
+
+clean:
+	rm -rf $(SA_CLASSDIR)
+	rm -rf $(GENERATED)/sa-jdi.jar
+	rm -rf $(AGENT_FILES_LIST)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/saproc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,117 @@
+#
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+include $(GAMMADIR)/make/defs.make
+
+# Rules to build serviceability agent library, used by vm.make
+
+# libsaproc.so: serviceability agent
+
+SAPROC = saproc
+LIBSAPROC = lib$(SAPROC).so
+
+LIBSAPROC_DEBUGINFO   = lib$(SAPROC).debuginfo
+LIBSAPROC_DIZ         = lib$(SAPROC).diz
+
+AGENT_DIR = $(GAMMADIR)/agent
+
+SASRCDIR = $(AGENT_DIR)/src/os/$(Platform_os_family)
+
+SASRCFILES = $(SASRCDIR)/salibelf.c                   \
+             $(SASRCDIR)/symtab.c                     \
+             $(SASRCDIR)/libproc_impl.c               \
+             $(SASRCDIR)/ps_proc.c                    \
+             $(SASRCDIR)/ps_core.c                    \
+             $(SASRCDIR)/LinuxDebuggerLocal.c         \
+
+SAMAPFILE = $(SASRCDIR)/mapfile
+
+DEST_SAPROC           = $(JDK_LIBDIR)/$(LIBSAPROC)
+DEST_SAPROC_DEBUGINFO = $(JDK_LIBDIR)/$(LIBSAPROC_DEBUGINFO)
+DEST_SAPROC_DIZ       = $(JDK_LIBDIR)/$(LIBSAPROC_DIZ)
+
+# DEBUG_BINARIES overrides everything, use full -g debug information
+ifeq ($(DEBUG_BINARIES), true)
+  SA_DEBUG_CFLAGS = -g
+endif
+
+# if $(AGENT_DIR) does not exist, we don't build SA
+# also, we don't build SA on Itanium, PPC, ARM or zero.
+
+ifneq ($(wildcard $(AGENT_DIR)),)
+ifneq ($(filter-out ia64 arm ppc zero,$(SRCARCH)),)
+  BUILDLIBSAPROC = $(LIBSAPROC)
+endif
+endif
+
+
+SA_LFLAGS = $(MAPFLAG:FILENAME=$(SAMAPFILE)) $(LDFLAGS_HASH_STYLE)
+
+$(LIBSAPROC): $(SASRCFILES) $(SAMAPFILE)
+	$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
+	  echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
+	  exit 1; \
+	fi
+	@echo Making SA debugger back-end...
+	$(QUIETLY) $(CC) -D$(BUILDARCH) -D_GNU_SOURCE                   \
+		   -D_FILE_OFFSET_BITS=64                               \
+                   $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG)     \
+		   $(BIN_UTILS)						\
+	           -I$(SASRCDIR)                                        \
+	           -I$(GENERATED)                                       \
+	           -I$(BOOT_JAVA_HOME)/include                          \
+	           -I$(BOOT_JAVA_HOME)/include/$(Platform_os_family)    \
+	           $(SASRCFILES)                                        \
+	           $(SA_LFLAGS)                                         \
+	           $(SA_DEBUG_CFLAGS)                                   \
+	           -o $@                                                \
+	           -lthread_db
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
+	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
+  ifeq ($(STRIP_POLICY),all_strip)
+	$(QUIETLY) $(STRIP) $@
+  else
+    ifeq ($(STRIP_POLICY),min_strip)
+	$(QUIETLY) $(STRIP) -g $@
+    # implied else here is no stripping at all
+    endif
+  endif
+  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
+	$(RM) $(LIBSAPROC_DEBUGINFO)
+  endif
+endif
+
+install_saproc: $(BUILDLIBSAPROC)
+	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
+	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
+	  test -f $(LIBSAPROC_DEBUGINFO) &&                  \
+	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO); \
+	  test -f $(LIBSAPROC_DIZ) &&                  \
+	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ); \
+	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
+	fi
+
+.PHONY: install_saproc
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/top.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,144 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+# top.make is included in the Makefile in the build directories.
+# It DOES NOT include the vm dependency info in order to be faster.
+# Its main job is to implement the incremental form of make lists.
+# It also:
+#   -builds and runs adlc via adlc.make
+#   -generates JVMTI source and docs via jvmti.make (JSR-163)
+#   -generate sa-jdi.jar (JDI binding to core files)
+
+# It assumes the following flags are set:
+# CFLAGS Platform_file, Src_Dirs_I, Src_Dirs_V, SYSDEFS, AOUT, Obj_Files
+
+# -- D. Ungar (5/97) from a file by Bill Bush
+
+# Don't override the built-in $(MAKE).
+# Instead, use "gmake" (or "gnumake") from the command line.  --Rose
+#MAKE = gmake
+
+include $(GAMMADIR)/make/altsrc.make
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+VM          = $(GAMMADIR)/src/share/vm
+Plat_File   = $(Platform_file)
+CDG         = cd $(GENERATED); 
+
+ifneq ($(USE_PRECOMPILED_HEADER),0)
+UpdatePCH = $(MAKE) -f vm.make $(PRECOMPILED_HEADER) $(MFLAGS) 
+else
+UpdatePCH = \# precompiled header is not used
+endif
+
+Cached_plat = $(GENERATED)/platform.current
+
+AD_Dir   = $(GENERATED)/adfiles
+ADLC     = $(AD_Dir)/adlc
+AD_Spec  = $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad)
+AD_Src   = $(call altsrc-replace,$(HS_COMMON_SRC)/share/vm/adlc)
+AD_Names = ad_$(Platform_arch_model).hpp ad_$(Platform_arch_model).cpp
+AD_Files = $(AD_Names:%=$(AD_Dir)/%)
+
+# AD_Files_If_Required/COMPILER1 = ad_stuff
+AD_Files_If_Required/COMPILER2 = ad_stuff
+AD_Files_If_Required/TIERED = ad_stuff
+AD_Files_If_Required = $(AD_Files_If_Required/$(TYPE))
+
+# Wierd argument adjustment for "gnumake -j..."
+adjust-mflags   = $(GENERATED)/adjust-mflags
+MFLAGS-adjusted = -r `$(adjust-mflags) "$(MFLAGS)" "$(HOTSPOT_BUILD_JOBS)"`
+
+
+# default target: update lists, make vm
+# done in stages to force sequential order with parallel make
+#
+
+default: vm_build_preliminaries the_vm
+	@echo All done.
+
+# This is an explicit dependency for the sake of parallel makes.
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff
+	@# We need a null action here, so implicit rules don't get consulted.
+
+$(Cached_plat): $(Plat_File)
+	$(CDG) cp $(Plat_File) $(Cached_plat)
+
+# make AD files as necessary
+ad_stuff: $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f adlc.make $(MFLAGS-adjusted)
+
+# generate JVMTI files from the spec
+jvmti_stuff: $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
+
+# generate trace files
+trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
+
+# generate SA jar files and native header
+sa_stuff:
+	@$(MAKE) -f sa.make $(MFLAGS-adjusted)
+
+# and the VM: must use other makefile with dependencies included
+
+# We have to go to great lengths to get control over the -jN argument
+# to the recursive invocation of vm.make.  The problem is that gnumake
+# resets -jN to -j1 for recursive runs.  (How helpful.)
+# Note that the user must specify the desired parallelism level via a
+# command-line or environment variable name HOTSPOT_BUILD_JOBS.
+$(adjust-mflags): $(GAMMADIR)/make/$(Platform_os_family)/makefiles/adjust-mflags.sh
+	@+rm -f $@ $@+
+	@+cat $< > $@+
+	@+chmod +x $@+
+	@+mv $@+ $@
+
+the_vm: vm_build_preliminaries $(adjust-mflags)
+	@$(UpdatePCH)
+	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
+
+install gamma: the_vm
+	@$(MAKE) -f vm.make $@
+
+# next rules support "make foo.[ois]"
+
+%.o %.i %.s:
+	$(UpdatePCH) 
+	$(MAKE) -f vm.make $(MFLAGS) $@
+	#$(MAKE) -f vm.make $@
+
+# this should force everything to be rebuilt
+clean: 
+	rm -f $(GENERATED)/*.class
+	$(MAKE) -f vm.make $(MFLAGS) clean
+
+# just in case it doesn't, this should do it
+realclean:
+	$(MAKE) -f vm.make $(MFLAGS) clean
+	rm -fr $(GENERATED)
+
+.PHONY: default vm_build_preliminaries
+.PHONY: lists ad_stuff jvmti_stuff sa_stuff the_vm clean realclean
+.PHONY: checks check_os_version install
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/trace.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,120 @@
+#
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# This makefile (trace.make) is included from the trace.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate trace files.
+
+include $(GAMMADIR)/make/linux/makefiles/rules.make
+include $(GAMMADIR)/make/altsrc.make
+
+# #########################################################################
+
+HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
+  echo "true"; else echo "false";\
+  fi)
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+JvmtiOutDir = $(GENERATED)/jvmtifiles
+TraceOutDir   = $(GENERATED)/tracefiles
+
+TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
+TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
+
+# set VPATH so make knows where to look for source files
+Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
+VPATH += $(Src_Dirs_V:%=%:)
+
+TraceGeneratedNames =     \
+    traceEventClasses.hpp \
+	traceEventIds.hpp     \
+	traceTypes.hpp
+
+ifeq ($(HAS_ALT_SRC), true)
+TraceGeneratedNames +=  \
+	traceRequestables.hpp \
+    traceEventControl.hpp
+
+ifneq ($(INCLUDE_TRACE), false)
+TraceGeneratedNames += traceProducer.cpp
+endif
+
+endif
+
+TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
+
+XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
+
+XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
+	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
+ifeq ($(HAS_ALT_SRC), true)
+	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
+endif
+
+.PHONY: all clean cleanall
+
+# #########################################################################
+
+all: $(TraceGeneratedFiles)
+
+GENERATE_CODE= \
+  $(QUIETLY) echo Generating $@; \
+  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
+  test -f $@
+
+$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+ifeq ($(HAS_ALT_SRC), false)
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+else
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+endif
+
+# #########################################################################
+
+clean cleanall:
+	rm $(TraceGeneratedFiles)
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/vm.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,377 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# Rules to build JVM and related libraries, included from vm.make in the build
+# directory.
+
+# Common build rules.
+MAKEFILES_DIR=$(GAMMADIR)/make/$(Platform_os_family)/makefiles
+include $(MAKEFILES_DIR)/rules.make
+include $(GAMMADIR)/make/altsrc.make
+
+default: build
+
+#----------------------------------------------------------------------
+# Defs
+
+GENERATED     = ../generated
+DEP_DIR       = $(GENERATED)/dependencies
+
+# reads the generated files defining the set of .o's and the .o .h dependencies
+-include $(DEP_DIR)/*.d
+
+# read machine-specific adjustments (%%% should do this via buildtree.make?)
+ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+  include $(MAKEFILES_DIR)/zeroshark.make
+else
+  include $(MAKEFILES_DIR)/$(BUILDARCH).make
+endif
+
+# set VPATH so make knows where to look for source files
+# Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
+# The adfiles directory contains ad_<arch>.[ch]pp.
+# The jvmtifiles directory contains jvmti*.[ch]pp
+Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
+VPATH += $(Src_Dirs_V:%=%:)
+
+# set INCLUDES for C preprocessor.
+Src_Dirs_I += $(GENERATED)
+# The order is important for the precompiled headers to work.
+INCLUDES += $(PRECOMPILED_HEADER_DIR:%=-I%) $(Src_Dirs_I:%=-I%)
+
+# SYMFLAG is used by {jsig,saproc}.make
+ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  # always build with debug info when we can create .debuginfo files
+  SYMFLAG = -g
+else
+  ifeq (${VERSION}, debug)
+    SYMFLAG = -g
+  else
+    SYMFLAG =
+  endif
+endif
+
+# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
+# in $(GAMMADIR)/make/defs.make
+ifeq ($(HOTSPOT_BUILD_VERSION),)
+  BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\""
+else
+  BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)-$(HOTSPOT_BUILD_VERSION)\""
+endif
+
+# The following variables are defined in the generated flags.make file.
+BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HS_BUILD_VER)\""
+JRE_VERSION   = -DJRE_RELEASE_VERSION="\"$(JRE_RELEASE_VER)\""
+HS_LIB_ARCH   = -DHOTSPOT_LIB_ARCH=\"$(LIBARCH)\"
+BUILD_TARGET  = -DHOTSPOT_BUILD_TARGET="\"$(TARGET)\""
+BUILD_USER    = -DHOTSPOT_BUILD_USER="\"$(HOTSPOT_BUILD_USER)\""
+VM_DISTRO     = -DHOTSPOT_VM_DISTRO="\"$(HOTSPOT_VM_DISTRO)\""
+
+CXXFLAGS =           \
+  ${SYSDEFS}         \
+  ${INCLUDES}        \
+  ${BUILD_VERSION}   \
+  ${BUILD_TARGET}    \
+  ${BUILD_USER}      \
+  ${HS_LIB_ARCH}     \
+  ${VM_DISTRO}
+
+# This is VERY important! The version define must only be supplied to vm_version.o
+# If not, ccache will not re-use the cache at all, since the version string might contain
+# a time and date.
+vm_version.o: CXXFLAGS += ${JRE_VERSION}
+
+CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
+
+# File specific flags
+CXXFLAGS += $(CXXFLAGS/BYFILE)
+
+
+# CFLAGS_WARN holds compiler options to suppress/enable warnings.
+CFLAGS += $(CFLAGS_WARN/BYFILE)
+
+# Do not use C++ exception handling
+CFLAGS += $(CFLAGS/NOEX)
+
+# Extra flags from gnumake's invocation or environment
+CFLAGS += $(EXTRA_CFLAGS)
+LFLAGS += $(EXTRA_CFLAGS)
+
+# Don't set excutable bit on stack segment
+# the same could be done by separate execstack command
+#LFLAGS += -Xlinker -z -Xlinker noexecstack
+
+LIBS += -lm -ldl -lpthread
+
+# By default, link the *.o into the library, not the executable.
+LINK_INTO$(LINK_INTO) = LIBJVM
+
+JDK_LIBDIR = $(JAVA_HOME)/jre/lib/$(LIBARCH)
+
+#----------------------------------------------------------------------
+# jvm_db & dtrace
+include $(MAKEFILES_DIR)/dtrace.make
+
+#----------------------------------------------------------------------
+# JVM
+
+JVM      = jvm
+LIBJVM   = lib$(JVM).so
+
+CFLAGS += -DALLOW_OPERATOR_NEW_USAGE
+
+LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
+LIBJVM_DIZ         = lib$(JVM).diz
+
+SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
+
+SOURCE_PATHS=\
+  $(shell find $(HS_COMMON_SRC)/share/vm/* -type d \! \
+      \( -name DUMMY $(foreach dir,$(SPECIAL_PATHS),-o -name $(dir)) \))
+SOURCE_PATHS+=$(HS_COMMON_SRC)/os/$(Platform_os_family)/vm
+SOURCE_PATHS+=$(HS_COMMON_SRC)/os/posix/vm
+SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(SRCARCH)/vm
+SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_family)_$(SRCARCH)/vm
+
+CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
+CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
+
+ifneq ($(INCLUDE_TRACE), false)
+CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
+  find $(HS_ALT_SRC)/share/vm/jfr -type d; \
+  fi)
+endif
+
+COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
+COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
+
+COMPILER2_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/opto)
+COMPILER2_PATHS += $(call altsrc,$(HS_COMMON_SRC)/share/vm/libadt)
+COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/opto
+COMPILER2_PATHS += $(HS_COMMON_SRC)/share/vm/libadt
+COMPILER2_PATHS += $(GENERATED)/adfiles
+
+SHARK_PATHS := $(GAMMADIR)/src/share/vm/shark
+
+# Include dirs per type.
+Src_Dirs/CORE      := $(CORE_PATHS)
+Src_Dirs/COMPILER1 := $(CORE_PATHS) $(COMPILER1_PATHS)
+Src_Dirs/COMPILER2 := $(CORE_PATHS) $(COMPILER2_PATHS)
+Src_Dirs/TIERED    := $(CORE_PATHS) $(COMPILER1_PATHS) $(COMPILER2_PATHS)
+Src_Dirs/ZERO      := $(CORE_PATHS)
+Src_Dirs/SHARK     := $(CORE_PATHS) $(SHARK_PATHS)
+Src_Dirs := $(Src_Dirs/$(TYPE))
+
+COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
+COMPILER1_SPECIFIC_FILES := c1_\*
+SHARK_SPECIFIC_FILES     := shark
+ZERO_SPECIFIC_FILES      := zero
+
+# Always exclude these.
+Src_Files_EXCLUDE += jsig.c jvmtiEnvRecommended.cpp jvmtiEnvStub.cpp
+
+# Exclude per type.
+Src_Files_EXCLUDE/CORE      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER1 := $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/COMPILER2 := $(COMPILER1_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
+Src_Files_EXCLUDE/TIERED    := $(ZERO_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES)
+Src_Files_EXCLUDE/ZERO      := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(SHARK_SPECIFIC_FILES) ciTypeFlow.cpp
+Src_Files_EXCLUDE/SHARK     := $(COMPILER1_SPECIFIC_FILES) $(COMPILER2_SPECIFIC_FILES) $(ZERO_SPECIFIC_FILES)
+
+Src_Files_EXCLUDE +=  $(Src_Files_EXCLUDE/$(TYPE))
+
+# Disable ELF decoder on AIX (AIX uses XCOFF).
+Src_Files_EXCLUDE += decoder_elf.cpp elfFile.cpp elfStringTable.cpp elfSymbolTable.cpp elfFuncDescTable.cpp
+
+# Special handling of arch model.
+ifeq ($(Platform_arch_model), x86_32)
+Src_Files_EXCLUDE += \*x86_64\*
+endif
+ifeq ($(Platform_arch_model), x86_64)
+Src_Files_EXCLUDE += \*x86_32\*
+endif
+
+# Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE.
+define findsrc
+	$(notdir $(shell find $(1)/. ! -name . -prune \
+		-a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \
+		-a ! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \)))
+endef
+
+Src_Files := $(foreach e,$(Src_Dirs),$(call findsrc,$(e)))
+
+Obj_Files = $(sort $(addsuffix .o,$(basename $(Src_Files))))
+
+JVM_OBJ_FILES = $(Obj_Files)
+
+vm_version.o: $(filter-out vm_version.o,$(JVM_OBJ_FILES))
+
+mapfile : $(MAPFILE) vm.def
+	rm -f $@
+	awk '{ if ($$0 ~ "INSERT VTABLE SYMBOLS HERE")	\
+                 { system ("cat vm.def"); }		\
+               else					\
+                 { print $$0 }				\
+             }' > $@ < $(MAPFILE)
+
+mapfile_reorder : mapfile $(REORDERFILE)
+	rm -f $@
+	cat $^ > $@
+
+vm.def: $(Res_Files) $(Obj_Files)
+	sh $(GAMMADIR)/make/aix/makefiles/build_vm_def.sh *.o > $@
+
+ifeq ($(JVM_VARIANT_ZEROSHARK), true)
+  STATIC_CXX = false
+else
+  ifeq ($(ZERO_LIBARCH), ppc64)
+    STATIC_CXX = false
+  else
+    STATIC_CXX = true
+  endif
+endif
+
+ifeq ($(LINK_INTO),AOUT)
+  LIBJVM.o                 =
+  LIBJVM_MAPFILE           =
+  LIBS_VM                  = $(LIBS)
+else
+  LIBJVM.o                 = $(JVM_OBJ_FILES)
+  LIBJVM_MAPFILE$(LDNOMAP) = mapfile_reorder
+  LFLAGS_VM$(LDNOMAP)      += $(MAPFLAG:FILENAME=$(LIBJVM_MAPFILE))
+# xlC_r ignores the -o= syntax
+# LFLAGS_VM                += $(SONAMEFLAG:SONAME=$(LIBJVM))
+
+  # JVM is statically linked with libgcc[_s] and libstdc++; this is needed to
+  # get around library dependency and compatibility issues. Must use gcc not
+  # g++ to link.
+  LIBS_VM                  += $(STATIC_STDCXX) $(LIBS)
+endif
+
+LINK_VM = $(LINK_LIB.CXX)
+
+# create loadmap for libjvm.so by default. Helps in diagnosing some problems.
+LFLAGS_VM += -bloadmap:libjvm.loadmap
+
+# rule for building precompiled header
+$(PRECOMPILED_HEADER):
+	$(QUIETLY) echo Generating precompiled header $@
+	$(QUIETLY) mkdir -p $(PRECOMPILED_HEADER_DIR)
+	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -x c++-header $(PRECOMPILED_HEADER_SRC) -o $@ $(COMPILE_DONE)
+
+# making the library:
+
+ifneq ($(JVM_BASE_ADDR),)
+# By default shared library is linked at base address == 0. Modify the
+# linker script if JVM prefers a different base location. It can also be
+# implemented with 'prelink -r'. But 'prelink' is not (yet) available on
+# our build platform (AS-2.1).
+LD_SCRIPT = libjvm.so.lds
+$(LD_SCRIPT): $(LIBJVM_MAPFILE)
+	$(QUIETLY) {                                                \
+	  rm -rf $@;                                                \
+	  $(LINK_VM) -Wl,--verbose $(LFLAGS_VM) 2>&1             |  \
+	    sed -e '/^======/,/^======/!d'                          \
+		-e '/^======/d'                                     \
+		-e 's/0\( + SIZEOF_HEADERS\)/$(JVM_BASE_ADDR)\1/'   \
+		> $@;                                               \
+	}
+LD_SCRIPT_FLAG = -Wl,-T,$(LD_SCRIPT)
+endif
+
+# With more recent Redhat releases (or the cutting edge version Fedora), if
+# SELinux is configured to be enabled, the runtime linker will fail to apply
+# the text relocation to libjvm.so considering that it is built as a non-PIC
+# DSO. To workaround that, we run chcon to libjvm.so after it is built. See
+# details in bug 6538311.
+$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
+	$(QUIETLY) {                                                      \
+	    echo Linking vm...;                                           \
+	    $(LINK_LIB.CXX/PRE_HOOK)                                      \
+	    $(LINK_VM) $(LD_SCRIPT_FLAG)                                  \
+		       $(LFLAGS_VM) -o $@ $(sort $(LIBJVM.o)) $(LIBS_VM); \
+	    $(LINK_LIB.CXX/POST_HOOK)                                     \
+	    rm -f $@.1; ln -s $@ $@.1;                                    \
+	}
+# No security contexts on AIX
+#           if [ \"$(CROSS_COMPILE_ARCH)\" = \"\" ] ; then              \
+#	      if [ -x /usr/sbin/selinuxenabled ] ; then                 \
+#	        /usr/sbin/selinuxenabled;                               \
+#               if [ $$? = 0 ] ; then					\
+#		  /usr/bin/chcon -t textrel_shlib_t $@;                 \
+#		  if [ $$? != 0 ]; then                                 \
+#		    echo "ERROR: Cannot chcon $@";			\
+#		  fi							\
+#	        fi							\
+#	      fi                                                        \
+#           fi 							        \
+#	}
+
+#ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+#	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
+#	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
+#    ifeq ($(STRIP_POLICY),all_strip)
+#	$(QUIETLY) $(STRIP) $@
+#    else
+#      ifeq ($(STRIP_POLICY),min_strip)
+#	$(QUIETLY) $(STRIP) -g $@
+#      # implied else here is no stripping at all
+#      endif
+#    endif
+#    ifeq ($(ZIP_DEBUGINFO_FILES),1)
+#	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
+#	$(RM) $(LIBJVM_DEBUGINFO)
+#  endif
+#endif
+
+DEST_SUBDIR        = $(JDK_LIBDIR)/$(VM_SUBDIR)
+DEST_JVM           = $(DEST_SUBDIR)/$(LIBJVM)
+DEST_JVM_DEBUGINFO = $(DEST_SUBDIR)/$(LIBJVM_DEBUGINFO)
+DEST_JVM_DIZ       = $(DEST_SUBDIR)/$(LIBJVM_DIZ)
+
+install_jvm: $(LIBJVM)
+	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
+	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
+	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
+	$(QUIETLY) test -f $(LIBJVM_DIZ) && \
+	    cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ)
+	$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
+
+#----------------------------------------------------------------------
+# Other files
+
+# Signal interposition library
+include $(MAKEFILES_DIR)/jsig.make
+
+# Serviceability agent
+include $(MAKEFILES_DIR)/saproc.make
+
+#----------------------------------------------------------------------
+
+build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
+
+install: install_jvm install_jsig install_saproc
+
+.PHONY: default build install install_jvm
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/makefiles/xlc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,159 @@
+#
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2013 SAP. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#  
+#
+
+#------------------------------------------------------------------------
+# CC, CXX & AS
+
+# Set compiler explicitly
+CXX = $(COMPILER_PATH)xlC_r
+CC  = $(COMPILER_PATH)xlc_r
+HOSTCXX = $(CXX)
+HOSTCC  = $(CC)
+
+AS  = $(CC) -c
+
+# get xlc version
+CXX_VERSION   := $(shell $(CXX) -qversion 2>&1 | sed -n 's/.*Version: \([0-9.]*\)/\1/p')
+
+# xlc 08.00.0000.0023 and higher supports -qtune=balanced
+CXX_SUPPORTS_BALANCED_TUNING=$(shell if [ $(subst .,,$(CXX_VERSION)) -ge 080000000023 ] ; then echo "true" ; fi)
+# xlc 10.01 is used with aggressive optimizations to boost performance
+CXX_IS_V10=$(shell if [ $(subst .,,$(CXX_VERSION)) -ge 100100000000 ] ; then echo "true" ; fi)
+
+# check for precompiled headers support
+
+# Switch off the precompiled header support. Neither xlC 8.0 nor xlC 10.0
+# support precompiled headers. Both "understand" the command line switches "-qusepcomp" and 
+# "-qgenpcomp" but when we specify them the following message is printed:
+# "1506-755 (W) The -qusepcomp option is not supported in this release."
+USE_PRECOMPILED_HEADER = 0
+ifneq ($(USE_PRECOMPILED_HEADER),0)
+PRECOMPILED_HEADER_DIR=.
+PRECOMPILED_HEADER_SRC=$(GAMMADIR)/src/share/vm/precompiled/precompiled.hpp
+PRECOMPILED_HEADER=$(PRECOMPILED_HEADER_DIR)/precompiled.hpp.gch
+endif
+
+
+#------------------------------------------------------------------------
+# Compiler flags
+
+# position-independent code
+PICFLAG = -qpic=large
+
+VM_PICFLAG/LIBJVM = $(PICFLAG)
+VM_PICFLAG/AOUT   =
+VM_PICFLAG        = $(VM_PICFLAG/$(LINK_INTO))
+
+CFLAGS += $(VM_PICFLAG)
+CFLAGS += -qnortti
+CFLAGS += -qnoeh
+
+CFLAGS += -D_REENTRANT
+# no xlc counterpart for -fcheck-new
+# CFLAGS += -fcheck-new
+
+ARCHFLAG = -q64
+
+CFLAGS     += $(ARCHFLAG)
+AOUT_FLAGS += $(ARCHFLAG)
+LFLAGS     += $(ARCHFLAG)
+ASFLAGS    += $(ARCHFLAG)
+
+# Use C++ Interpreter
+ifdef CC_INTERP
+  CFLAGS += -DCC_INTERP
+endif
+
+# Keep temporary files (.ii, .s)
+# no counterpart on xlc for -save-temps, -pipe
+
+# Compiler warnings are treated as errors
+# Do not treat warnings as errors
+# WARNINGS_ARE_ERRORS = -Werror
+# Except for a few acceptable ones
+# ACCEPTABLE_WARNINGS = -Wpointer-arith -Wconversion -Wsign-compare
+# CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(ACCEPTABLE_WARNINGS)
+CFLAGS_WARN/COMMON = 
+CFLAGS_WARN/DEFAULT = $(CFLAGS_WARN/COMMON) $(EXTRA_WARNINGS)
+# Special cases
+CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@)) 
+
+# The flags to use for an optimized build
+OPT_CFLAGS += -O3
+
+# Hotspot uses very unstrict aliasing turn this optimization off
+OPT_CFLAGS += -qalias=noansi
+
+OPT_CFLAGS/NOOPT=-qnoopt
+
+DEPFLAGS = -qmakedep=gcc -MF $(DEP_DIR)/$(@:%=%.d)
+
+#------------------------------------------------------------------------
+# Linker flags
+
+# statically link libstdc++.so, work with gcc but ignored by g++
+STATIC_STDCXX = -Wl,-lC_r
+
+# Enable linker optimization
+# no counterpart on xlc for this 
+# LFLAGS += -Xlinker -O1
+
+# Use $(MAPFLAG:FILENAME=real_file_name) to specify a map file.
+# MAPFLAG = -Xlinker --version-script=FILENAME
+
+# Build shared library
+SHARED_FLAG = -q64 -b64 -bexpall -G -bnoentry -qmkshrobj -brtl -bnolibpath
+
+#------------------------------------------------------------------------
+# Debug flags
+
+# Always compile with '-g' to get symbols in the stacktraces in the hs_err file
+DEBUG_CFLAGS += -g
+FASTDEBUG_CFLAGS += -g
+OPT_CFLAGS += -g
+
+# DEBUG_BINARIES overrides everything, use full -g debug information
+ifeq ($(DEBUG_BINARIES), true)
+  DEBUG_CFLAGS = -g
+  CFLAGS += $(DEBUG_CFLAGS)
+endif
+
+# If we are building HEADLESS, pass on to VM
+# so it can set the java.awt.headless property
+ifdef HEADLESS
+CFLAGS += -DHEADLESS
+endif
+
+# We are building Embedded for a small device
+# favor code space over speed
+ifdef MINIMIZE_RAM_USAGE
+CFLAGS += -DMINIMIZE_RAM_USAGE
+endif
+
+ifdef CROSS_COMPILE_ARCH
+  STRIP = $(ALT_COMPILER_PATH)/strip
+else
+  STRIP = strip
+endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/aix/platform_ppc64	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,17 @@
+os_family = aix
+
+arch = ppc
+
+arch_model = ppc_64
+
+os_arch = aix_ppc
+
+os_arch_model = aix_ppc_64
+
+lib_arch = ppc64
+
+compiler = xlc
+
+gnu_dis_arch = ppc64
+
+sysdefs = -DAIX -DPPC64
--- a/make/bsd/makefiles/adjust-mflags.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/adjust-mflags.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -64,7 +64,7 @@
 	echo "$MFLAGS" \
 	| sed '
 		s/^-/ -/
-		s/ -\([^ 	][^ 	]*\)j/ -\1 -j/
+		s/ -\([^ 	I][^ 	I]*\)j/ -\1 -j/
 		s/ -j[0-9][0-9]*/ -j/
 		s/ -j\([^ 	]\)/ -j -\1/
 		s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
--- a/make/bsd/makefiles/adlc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/adlc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/bsd/makefiles/debug.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/debug.make	Wed Mar 12 13:30:08 2014 +0100
@@ -36,6 +36,9 @@
 
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
+ifeq ($(OS_VENDOR), Darwin)
+MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-darwin-debug
+endif
 
 VERSION = debug
 SYSDEFS += -DASSERT
--- a/make/bsd/makefiles/defs.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/defs.make	Wed Mar 12 13:30:08 2014 +0100
@@ -185,7 +185,7 @@
       ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
     else
       # debug variants always get Full Debug Symbols (if available)
-      ENABLE_FULL_DEBUG_SYMBOLS = 1
+      ENABLE_FULL_DEBUG_SYMBOLS ?= 1
     endif
     _JUNK_ := $(shell \
       echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
--- a/make/bsd/makefiles/dtrace.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/dtrace.make	Wed Mar 12 13:30:08 2014 +0100
@@ -53,6 +53,7 @@
 GENOFFS = generate$(JVMOFFS)
 
 DTRACE_SRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/dtrace
+DTRACE_COMMON_SRCDIR = $(GAMMADIR)/src/os/posix/dtrace
 DTRACE = dtrace
 DTRACE.o = $(DTRACE).o
 
@@ -68,11 +69,9 @@
 
 # Use mapfile with libjvm_db.so
 LIBJVM_DB_MAPFILE = # no mapfile for usdt2 # $(MAKEFILES_DIR)/mapfile-vers-jvm_db
-#LFLAGS_JVM_DB += $(MAPFLAG:FILENAME=$(LIBJVM_DB_MAPFILE))
 
 # Use mapfile with libjvm_dtrace.so
 LIBJVM_DTRACE_MAPFILE = # no mapfile for usdt2 # $(MAKEFILES_DIR)/mapfile-vers-jvm_dtrace
-#LFLAGS_JVM_DTRACE += $(MAPFLAG:FILENAME=$(LIBJVM_DTRACE_MAPFILE))
 
 LFLAGS_JVM_DB += $(PICFLAG) # -D_REENTRANT
 LFLAGS_JVM_DTRACE += $(PICFLAG) # -D_REENTRANT
@@ -260,116 +259,38 @@
   endif
 endif
 
-#$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
-#             $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
-#	$(QUIETLY) cat $^ > $@
 
 $(DtraceOutDir):
 	mkdir $(DtraceOutDir)
 
-$(DtraceOutDir)/hotspot.h: $(DTRACE_SRCDIR)/hotspot.d | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/hotspot.d
-
-$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/hotspot_jni.d
+$(DtraceOutDir)/hotspot.h: $(DTRACE_COMMON_SRCDIR)/hotspot.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot.d
 
-$(DtraceOutDir)/hs_private.h: $(DTRACE_SRCDIR)/hs_private.d | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/hs_private.d
+$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d
 
-$(DtraceOutDir)/jhelper.h: $(DTRACE_SRCDIR)/jhelper.d $(JVMOFFS).o | $(DtraceOutDir)
-	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_SRCDIR)/jhelper.d
+$(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hs_private.d
 
-# jhelper currently disabled
 dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h 
 
-DTraced_Files = ciEnv.o \
-                classLoadingService.o \
-                compileBroker.o \
-                hashtable.o \
-                instanceKlass.o \
-                java.o \
-                jni.o \
-                jvm.o \
-                memoryManager.o \
-                nmethod.o \
-                objectMonitor.o \
-                runtimeService.o \
-                sharedRuntime.o \
-                synchronizer.o \
-                thread.o \
-                unsafe.o \
-                vmThread.o \
-                vmCMSOperations.o \
-                vmPSOperations.o \
-                vmGCOperations.o \
-
-# Dtrace is available, so we build $(DTRACE.o)  
-#$(DTRACE.o): $(DTRACE).d $(JVMOFFS).h $(JVMOFFS)Index.h $(DTraced_Files)
-#	@echo Compiling $(DTRACE).d
-
-#	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \
-#     $(DTraced_Files) ||\
-#  STATUS=$$?;\
-#	if [ x"$$STATUS" = x"1" -a \
-#       x`uname -r` = x"5.10" -a \
-#       x`uname -p` = x"sparc" ]; then\
-#    echo "*****************************************************************";\
-#    echo "* If you are building server compiler, and the error message is ";\
-#    echo "* \"incorrect ELF machine type...\", you have run into solaris bug ";\
-#    echo "* 6213962, \"dtrace -G doesn't work on sparcv8+ object files\".";\
-#    echo "* Either patch/upgrade your system (>= S10u1_15), or set the ";\
-#    echo "* environment variable HOTSPOT_DISABLE_DTRACE_PROBES to disable ";\
-#    echo "* dtrace probes for this build.";\
-#    echo "*****************************************************************";\
-#  fi;\
-#  exit $$STATUS
-  # Since some DTraced_Files are in LIBJVM.o and they are touched by this
-  # command, and libgenerateJvmOffsets.so depends on LIBJVM.o, 'make' will
-  # think it needs to rebuild libgenerateJvmOffsets.so and thus JvmOffsets*
-  # files, but it doesn't, so we touch the necessary files to prevent later
-  # recompilation. Note: we only touch the necessary files if they already
-  # exist in order to close a race where an empty file can be created
-  # before the real build rule is executed.
-  # But, we can't touch the *.h files:  This rule depends
-  # on them, and that would cause an infinite cycle of rebuilding.
-  # Neither the *.h or *.ccp files need to be touched, since they have
-  # rules which do not update them when the generator file has not
-  # changed their contents.
-#	$(QUIETLY) if [ -f lib$(GENOFFS).so ]; then touch lib$(GENOFFS).so; fi
-#	$(QUIETLY) if [ -f $(GENOFFS) ]; then touch $(GENOFFS); fi
-#	$(QUIETLY) if [ -f $(JVMOFFS.o) ]; then touch $(JVMOFFS.o); fi
 
 .PHONY: dtraceCheck
 
-#SYSTEM_DTRACE_H = /usr/include/dtrace.h
 SYSTEM_DTRACE_PROG = /usr/sbin/dtrace
-#PATCH_DTRACE_PROG = /opt/SUNWdtrd/sbin/dtrace
 systemDtraceFound := $(wildcard ${SYSTEM_DTRACE_PROG})
-#patchDtraceFound := $(wildcard ${PATCH_DTRACE_PROG})
-#systemDtraceHdrFound := $(wildcard $(SYSTEM_DTRACE_H))
 
-#ifneq ("$(systemDtraceHdrFound)", "") 
-#CFLAGS += -DHAVE_DTRACE_H
-#endif
-
-#ifneq ("$(patchDtraceFound)", "")
-#DTRACE_PROG=$(PATCH_DTRACE_PROG)
-#DTRACE_INCL=-I/opt/SUNWdtrd/include
-#else
 ifneq ("$(systemDtraceFound)", "")
 DTRACE_PROG=$(SYSTEM_DTRACE_PROG)
 else
 
-endif # ifneq ("$(systemDtraceFound)", "")
-#endif # ifneq ("$(patchDtraceFound)", "")
+endif
 
 ifneq ("${DTRACE_PROG}", "")
 ifeq ("${HOTSPOT_DISABLE_DTRACE_PROBES}", "")
 
 DTRACE_OBJS = $(DTRACE.o) #$(JVMOFFS.o)
 CFLAGS += -DDTRACE_ENABLED #$(DTRACE_INCL)
-#clangCFLAGS += -DDTRACE_ENABLED -fno-optimize-sibling-calls
-#MAPFILE_DTRACE_OPT = $(MAPFILE_DTRACE)
 
 
 dtraceCheck:
--- a/make/bsd/makefiles/fastdebug.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/fastdebug.make	Wed Mar 12 13:30:08 2014 +0100
@@ -57,6 +57,9 @@
 
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
+ifeq ($(OS_VENDOR), Darwin)
+MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-darwin-debug
+endif
 
 VERSION = fastdebug
 SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
--- a/make/bsd/makefiles/gcc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/gcc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -261,14 +261,13 @@
   WARNINGS_ARE_ERRORS += -Wno-empty-body
 endif
 
-WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
+WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wformat=2 -Wno-error=format-nonliteral
 
-ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
+ifeq ($(USE_CLANG),)
   # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
   # conversions which might affect the values. Only enable it in earlier versions.
-  WARNING_FLAGS = -Wunused-function
-  ifeq ($(USE_CLANG),)
-    WARNING_FLAGS += -Wconversion
+  ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
+    WARNINGS_FLAGS += -Wconversion
   endif
 endif
 
@@ -291,7 +290,7 @@
 # The flags to use for an Optimized g++ build
 ifeq ($(OS_VENDOR), Darwin)
   # use -Os by default, unless -O3 can be proved to be worth the cost, as per policy
-  # <http://wikis.sun.com/display/OpenJDK/Mac+OS+X+Port+Compilers>
+  # <https://wiki.openjdk.java.net/display/MacOSXPort/Compiler+Errata>
   OPT_CFLAGS_DEFAULT ?= SIZE
 else
   OPT_CFLAGS_DEFAULT ?= SPEED
@@ -386,6 +385,11 @@
 # statically link libstdc++.so, work with gcc but ignored by g++
 STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic
 
+# Ensure use libstdc++ on clang, not libc++
+ifeq ($(USE_CLANG), true)
+  LFLAGS += -stdlib=libstdc++
+endif
+
 ifeq ($(USE_CLANG),)
   # statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x.
   ifneq ("${CC_VER_MAJOR}", "2")
--- a/make/bsd/makefiles/jsig.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/jsig.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -91,10 +91,10 @@
 install_jsig: $(LIBJSIG)
 	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
 ifeq ($(OS_VENDOR), Darwin)
-	-$(QUIETLY) test -d $(LIBJSIG_DEBUGINFO) && \
+	$(QUIETLY) test ! -d $(LIBJSIG_DEBUGINFO) || \
 	    cp -f -r $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
 else
-	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
+	$(QUIETLY) test ! -f $(LIBJSIG_DEBUGINFO) || \
 	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
 endif
 	$(QUIETLY) test ! -f $(LIBJSIG_DIZ) || \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/bsd/makefiles/mapfile-vers-darwin-debug	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,258 @@
+#
+# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+# Only used for OSX/Darwin builds
+
+# Define public interface.
+                # _JNI
+                _JNI_CreateJavaVM
+                _JNI_GetCreatedJavaVMs
+                _JNI_GetDefaultJavaVMInitArgs
+
+                # _JVM
+                _JVM_Accept
+                _JVM_ActiveProcessorCount
+                _JVM_AllocateNewArray
+                _JVM_AllocateNewObject
+                _JVM_ArrayCopy
+                _JVM_AssertionStatusDirectives
+                _JVM_Available
+                _JVM_Bind
+                _JVM_ClassDepth
+                _JVM_ClassLoaderDepth
+                _JVM_Clone
+                _JVM_Close
+                _JVM_CX8Field
+                _JVM_CompileClass
+                _JVM_CompileClasses
+                _JVM_CompilerCommand
+                _JVM_Connect
+                _JVM_ConstantPoolGetClassAt
+                _JVM_ConstantPoolGetClassAtIfLoaded
+                _JVM_ConstantPoolGetDoubleAt
+                _JVM_ConstantPoolGetFieldAt
+                _JVM_ConstantPoolGetFieldAtIfLoaded
+                _JVM_ConstantPoolGetFloatAt
+                _JVM_ConstantPoolGetIntAt
+                _JVM_ConstantPoolGetLongAt
+                _JVM_ConstantPoolGetMethodAt
+                _JVM_ConstantPoolGetMethodAtIfLoaded
+                _JVM_ConstantPoolGetMemberRefInfoAt
+                _JVM_ConstantPoolGetSize
+                _JVM_ConstantPoolGetStringAt
+                _JVM_ConstantPoolGetUTF8At
+                _JVM_CountStackFrames
+                _JVM_CurrentClassLoader
+                _JVM_CurrentLoadedClass
+                _JVM_CurrentThread
+                _JVM_CurrentTimeMillis
+                _JVM_DefineClass
+                _JVM_DefineClassWithSource
+                _JVM_DefineClassWithSourceCond
+                _JVM_DesiredAssertionStatus
+                _JVM_DisableCompiler
+                _JVM_DoPrivileged
+                _JVM_DTraceGetVersion
+                _JVM_DTraceActivate
+                _JVM_DTraceIsProbeEnabled
+                _JVM_DTraceIsSupported
+                _JVM_DTraceDispose
+                _JVM_DumpAllStacks
+                _JVM_DumpThreads
+                _JVM_EnableCompiler
+                _JVM_Exit
+                _JVM_FillInStackTrace
+                _JVM_FindClassFromClass
+                _JVM_FindClassFromClassLoader
+                _JVM_FindClassFromBootLoader
+                _JVM_FindLibraryEntry
+                _JVM_FindLoadedClass
+                _JVM_FindPrimitiveClass
+                _JVM_FindSignal
+                _JVM_FreeMemory
+                _JVM_GC
+                _JVM_GetAllThreads
+                _JVM_GetArrayElement
+                _JVM_GetArrayLength
+                _JVM_GetCPClassNameUTF
+                _JVM_GetCPFieldClassNameUTF
+                _JVM_GetCPFieldModifiers
+                _JVM_GetCPFieldNameUTF
+                _JVM_GetCPFieldSignatureUTF
+                _JVM_GetCPMethodClassNameUTF
+                _JVM_GetCPMethodModifiers
+                _JVM_GetCPMethodNameUTF
+                _JVM_GetCPMethodSignatureUTF
+                _JVM_GetCallerClass
+                _JVM_GetClassAccessFlags
+                _JVM_GetClassAnnotations
+                _JVM_GetClassCPEntriesCount
+                _JVM_GetClassCPTypes
+                _JVM_GetClassConstantPool
+                _JVM_GetClassContext
+                _JVM_GetClassDeclaredConstructors
+                _JVM_GetClassDeclaredFields
+                _JVM_GetClassDeclaredMethods
+                _JVM_GetClassFieldsCount
+                _JVM_GetClassInterfaces
+                _JVM_GetClassLoader
+                _JVM_GetClassMethodsCount
+                _JVM_GetClassModifiers
+                _JVM_GetClassName
+                _JVM_GetClassNameUTF
+                _JVM_GetClassSignature
+                _JVM_GetClassSigners
+                _JVM_GetClassTypeAnnotations
+                _JVM_GetComponentType
+                _JVM_GetDeclaredClasses
+                _JVM_GetDeclaringClass
+                _JVM_GetEnclosingMethodInfo
+                _JVM_GetFieldAnnotations
+                _JVM_GetFieldIxModifiers
+                _JVM_GetFieldTypeAnnotations
+                _JVM_GetHostName
+                _JVM_GetInheritedAccessControlContext
+                _JVM_GetInterfaceVersion
+                _JVM_GetLastErrorString
+                _JVM_GetManagement
+                _JVM_GetMethodAnnotations
+                _JVM_GetMethodDefaultAnnotationValue
+                _JVM_GetMethodIxArgsSize
+                _JVM_GetMethodIxByteCode
+                _JVM_GetMethodIxByteCodeLength
+                _JVM_GetMethodIxExceptionIndexes
+                _JVM_GetMethodIxExceptionTableEntry
+                _JVM_GetMethodIxExceptionTableLength
+                _JVM_GetMethodIxExceptionsCount
+                _JVM_GetMethodIxLocalsCount
+                _JVM_GetMethodIxMaxStack
+                _JVM_GetMethodIxModifiers
+                _JVM_GetMethodIxNameUTF
+                _JVM_GetMethodIxSignatureUTF
+                _JVM_GetMethodParameterAnnotations
+                _JVM_GetMethodParameters
+                _JVM_GetMethodTypeAnnotations
+                _JVM_GetPrimitiveArrayElement
+                _JVM_GetProtectionDomain
+                _JVM_GetSockName
+                _JVM_GetSockOpt
+                _JVM_GetStackAccessControlContext
+                _JVM_GetStackTraceDepth
+                _JVM_GetStackTraceElement
+                _JVM_GetSystemPackage
+                _JVM_GetSystemPackages
+                _JVM_GetThreadStateNames
+                _JVM_GetThreadStateValues
+                _JVM_GetVersionInfo
+                _JVM_Halt
+                _JVM_HoldsLock
+                _JVM_IHashCode
+                _JVM_InitAgentProperties
+                _JVM_InitProperties
+                _JVM_InitializeCompiler
+                _JVM_InitializeSocketLibrary
+                _JVM_InternString
+                _JVM_Interrupt
+                _JVM_InvokeMethod
+                _JVM_IsArrayClass
+                _JVM_IsConstructorIx
+                _JVM_IsInterface
+                _JVM_IsInterrupted
+                _JVM_IsNaN
+                _JVM_IsPrimitiveClass
+                _JVM_IsSameClassPackage
+                _JVM_IsSilentCompiler
+                _JVM_IsSupportedJNIVersion
+                _JVM_IsThreadAlive
+                _JVM_IsVMGeneratedMethodIx
+                _JVM_LatestUserDefinedLoader
+                _JVM_Listen
+                _JVM_LoadClass0
+                _JVM_LoadLibrary
+                _JVM_Lseek
+                _JVM_MaxObjectInspectionAge
+                _JVM_MaxMemory
+                _JVM_MonitorNotify
+                _JVM_MonitorNotifyAll
+                _JVM_MonitorWait
+                _JVM_NanoTime
+                _JVM_NativePath
+                _JVM_NewArray
+                _JVM_NewInstanceFromConstructor
+                _JVM_NewMultiArray
+                _JVM_OnExit
+                _JVM_Open
+                _JVM_RaiseSignal
+                _JVM_RawMonitorCreate
+                _JVM_RawMonitorDestroy
+                _JVM_RawMonitorEnter
+                _JVM_RawMonitorExit
+                _JVM_Read
+                _JVM_Recv
+                _JVM_RecvFrom
+                _JVM_RegisterSignal
+                _JVM_ReleaseUTF
+                _JVM_ResolveClass
+                _JVM_ResumeThread
+                _JVM_Send
+                _JVM_SendTo
+                _JVM_SetArrayElement
+                _JVM_SetClassSigners
+                _JVM_SetLength
+                _JVM_SetNativeThreadName
+                _JVM_SetPrimitiveArrayElement
+                # Preserved so that Graal repo can link against a JDK7 libjava.so works
+                _JVM_SetProtectionDomain
+                _JVM_SetSockOpt
+                _JVM_SetThreadPriority
+                _JVM_Sleep
+                _JVM_Socket
+                _JVM_SocketAvailable
+                _JVM_SocketClose
+                _JVM_SocketShutdown
+                _JVM_StartThread
+                _JVM_StopThread
+                _JVM_SuspendThread
+                _JVM_SupportsCX8
+                _JVM_Sync
+                _JVM_Timeout
+                _JVM_TotalMemory
+                _JVM_TraceInstructions
+                _JVM_TraceMethodCalls
+                _JVM_UnloadLibrary
+                _JVM_Write
+                _JVM_Yield
+                _JVM_handle_bsd_signal
+
+                # miscellaneous functions
+                _jio_fprintf
+                _jio_printf
+                _jio_snprintf
+                _jio_vfprintf
+                _jio_vsnprintf
+
+                # This is for Forte Analyzer profiling support.
+                _AsyncGetCallTrace
+
+                # INSERT VTABLE SYMBOLS HERE
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/bsd/makefiles/mapfile-vers-darwin-product	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,258 @@
+#
+# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+# Only used for OSX/Darwin builds
+
+# Define public interface.
+                # _JNI
+                _JNI_CreateJavaVM
+                _JNI_GetCreatedJavaVMs
+                _JNI_GetDefaultJavaVMInitArgs
+
+                # _JVM
+                _JVM_Accept
+                _JVM_ActiveProcessorCount
+                _JVM_AllocateNewArray
+                _JVM_AllocateNewObject
+                _JVM_ArrayCopy
+                _JVM_AssertionStatusDirectives
+                _JVM_Available
+                _JVM_Bind
+                _JVM_ClassDepth
+                _JVM_ClassLoaderDepth
+                _JVM_Clone
+                _JVM_Close
+                _JVM_CX8Field
+                _JVM_CompileClass
+                _JVM_CompileClasses
+                _JVM_CompilerCommand
+                _JVM_Connect
+                _JVM_ConstantPoolGetClassAt
+                _JVM_ConstantPoolGetClassAtIfLoaded
+                _JVM_ConstantPoolGetDoubleAt
+                _JVM_ConstantPoolGetFieldAt
+                _JVM_ConstantPoolGetFieldAtIfLoaded
+                _JVM_ConstantPoolGetFloatAt
+                _JVM_ConstantPoolGetIntAt
+                _JVM_ConstantPoolGetLongAt
+                _JVM_ConstantPoolGetMethodAt
+                _JVM_ConstantPoolGetMethodAtIfLoaded
+                _JVM_ConstantPoolGetMemberRefInfoAt
+                _JVM_ConstantPoolGetSize
+                _JVM_ConstantPoolGetStringAt
+                _JVM_ConstantPoolGetUTF8At
+                _JVM_CountStackFrames
+                _JVM_CurrentClassLoader
+                _JVM_CurrentLoadedClass
+                _JVM_CurrentThread
+                _JVM_CurrentTimeMillis
+                _JVM_DefineClass
+                _JVM_DefineClassWithSource
+                _JVM_DefineClassWithSourceCond
+                _JVM_DesiredAssertionStatus
+                _JVM_DisableCompiler
+                _JVM_DoPrivileged
+                _JVM_DTraceGetVersion
+                _JVM_DTraceActivate
+                _JVM_DTraceIsProbeEnabled
+                _JVM_DTraceIsSupported
+                _JVM_DTraceDispose
+                _JVM_DumpAllStacks
+                _JVM_DumpThreads
+                _JVM_EnableCompiler
+                _JVM_Exit
+                _JVM_FillInStackTrace
+                _JVM_FindClassFromClass
+                _JVM_FindClassFromClassLoader
+                _JVM_FindClassFromBootLoader
+                _JVM_FindLibraryEntry
+                _JVM_FindLoadedClass
+                _JVM_FindPrimitiveClass
+                _JVM_FindSignal
+                _JVM_FreeMemory
+                _JVM_GC
+                _JVM_GetAllThreads
+                _JVM_GetArrayElement
+                _JVM_GetArrayLength
+                _JVM_GetCPClassNameUTF
+                _JVM_GetCPFieldClassNameUTF
+                _JVM_GetCPFieldModifiers
+                _JVM_GetCPFieldNameUTF
+                _JVM_GetCPFieldSignatureUTF
+                _JVM_GetCPMethodClassNameUTF
+                _JVM_GetCPMethodModifiers
+                _JVM_GetCPMethodNameUTF
+                _JVM_GetCPMethodSignatureUTF
+                _JVM_GetCallerClass
+                _JVM_GetClassAccessFlags
+                _JVM_GetClassAnnotations
+                _JVM_GetClassCPEntriesCount
+                _JVM_GetClassCPTypes
+                _JVM_GetClassConstantPool
+                _JVM_GetClassContext
+                _JVM_GetClassDeclaredConstructors
+                _JVM_GetClassDeclaredFields
+                _JVM_GetClassDeclaredMethods
+                _JVM_GetClassFieldsCount
+                _JVM_GetClassInterfaces
+                _JVM_GetClassLoader
+                _JVM_GetClassMethodsCount
+                _JVM_GetClassModifiers
+                _JVM_GetClassName
+                _JVM_GetClassNameUTF
+                _JVM_GetClassSignature
+                _JVM_GetClassSigners
+                _JVM_GetClassTypeAnnotations
+                _JVM_GetComponentType
+                _JVM_GetDeclaredClasses
+                _JVM_GetDeclaringClass
+                _JVM_GetEnclosingMethodInfo
+                _JVM_GetFieldAnnotations
+                _JVM_GetFieldIxModifiers
+                _JVM_GetFieldTypeAnnotations
+                _JVM_GetHostName
+                _JVM_GetInheritedAccessControlContext
+                _JVM_GetInterfaceVersion
+                _JVM_GetLastErrorString
+                _JVM_GetManagement
+                _JVM_GetMethodAnnotations
+                _JVM_GetMethodDefaultAnnotationValue
+                _JVM_GetMethodIxArgsSize
+                _JVM_GetMethodIxByteCode
+                _JVM_GetMethodIxByteCodeLength
+                _JVM_GetMethodIxExceptionIndexes
+                _JVM_GetMethodIxExceptionTableEntry
+                _JVM_GetMethodIxExceptionTableLength
+                _JVM_GetMethodIxExceptionsCount
+                _JVM_GetMethodIxLocalsCount
+                _JVM_GetMethodIxMaxStack
+                _JVM_GetMethodIxModifiers
+                _JVM_GetMethodIxNameUTF
+                _JVM_GetMethodIxSignatureUTF
+                _JVM_GetMethodParameterAnnotations
+                _JVM_GetMethodParameters
+                _JVM_GetMethodTypeAnnotations
+                _JVM_GetPrimitiveArrayElement
+                _JVM_GetProtectionDomain
+                _JVM_GetSockName
+                _JVM_GetSockOpt
+                _JVM_GetStackAccessControlContext
+                _JVM_GetStackTraceDepth
+                _JVM_GetStackTraceElement
+                _JVM_GetSystemPackage
+                _JVM_GetSystemPackages
+                _JVM_GetThreadStateNames
+                _JVM_GetThreadStateValues
+                _JVM_GetVersionInfo
+                _JVM_Halt
+                _JVM_HoldsLock
+                _JVM_IHashCode
+                _JVM_InitAgentProperties
+                _JVM_InitProperties
+                _JVM_InitializeCompiler
+                _JVM_InitializeSocketLibrary
+                _JVM_InternString
+                _JVM_Interrupt
+                _JVM_InvokeMethod
+                _JVM_IsArrayClass
+                _JVM_IsConstructorIx
+                _JVM_IsInterface
+                _JVM_IsInterrupted
+                _JVM_IsNaN
+                _JVM_IsPrimitiveClass
+                _JVM_IsSameClassPackage
+                _JVM_IsSilentCompiler
+                _JVM_IsSupportedJNIVersion
+                _JVM_IsThreadAlive
+                _JVM_IsVMGeneratedMethodIx
+                _JVM_LatestUserDefinedLoader
+                _JVM_Listen
+                _JVM_LoadClass0
+                _JVM_LoadLibrary
+                _JVM_Lseek
+                _JVM_MaxObjectInspectionAge
+                _JVM_MaxMemory
+                _JVM_MonitorNotify
+                _JVM_MonitorNotifyAll
+                _JVM_MonitorWait
+                _JVM_NanoTime
+                _JVM_NativePath
+                _JVM_NewArray
+                _JVM_NewInstanceFromConstructor
+                _JVM_NewMultiArray
+                _JVM_OnExit
+                _JVM_Open
+                _JVM_RaiseSignal
+                _JVM_RawMonitorCreate
+                _JVM_RawMonitorDestroy
+                _JVM_RawMonitorEnter
+                _JVM_RawMonitorExit
+                _JVM_Read
+                _JVM_Recv
+                _JVM_RecvFrom
+                _JVM_RegisterSignal
+                _JVM_ReleaseUTF
+                _JVM_ResolveClass
+                _JVM_ResumeThread
+                _JVM_Send
+                _JVM_SendTo
+                _JVM_SetArrayElement
+                _JVM_SetClassSigners
+                _JVM_SetLength
+                _JVM_SetNativeThreadName
+                _JVM_SetPrimitiveArrayElement
+                # Preserved so that Graal repo can link against a JDK7 libjava.so works
+                _JVM_SetProtectionDomain
+                _JVM_SetSockOpt
+                _JVM_SetThreadPriority
+                _JVM_Sleep
+                _JVM_Socket
+                _JVM_SocketAvailable
+                _JVM_SocketClose
+                _JVM_SocketShutdown
+                _JVM_StartThread
+                _JVM_StopThread
+                _JVM_SuspendThread
+                _JVM_SupportsCX8
+                _JVM_Sync
+                _JVM_Timeout
+                _JVM_TotalMemory
+                _JVM_TraceInstructions
+                _JVM_TraceMethodCalls
+                _JVM_UnloadLibrary
+                _JVM_Write
+                _JVM_Yield
+                _JVM_handle_bsd_signal
+
+                # miscellaneous functions
+                _jio_fprintf
+                _jio_printf
+                _jio_snprintf
+                _jio_vfprintf
+                _jio_vsnprintf
+
+                # This is for Forte Analyzer profiling support.
+                _AsyncGetCallTrace
+
+                # INSERT VTABLE SYMBOLS HERE
+
--- a/make/bsd/makefiles/mapfile-vers-debug	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/mapfile-vers-debug	Wed Mar 12 13:30:08 2014 +0100
@@ -19,245 +19,252 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#
+#  
 #
-# Only used for OSX/Darwin builds
 
 # Define public interface.
-                # _JNI
-                _JNI_CreateJavaVM
-                _JNI_GetCreatedJavaVMs
-                _JNI_GetDefaultJavaVMInitArgs
+
+SUNWprivate_1.1 {
+        global:
+                # JNI
+                JNI_CreateJavaVM;
+                JNI_GetCreatedJavaVMs;
+                JNI_GetDefaultJavaVMInitArgs;
 
-                # _JVM
-                _JVM_Accept
-                _JVM_ActiveProcessorCount
-                _JVM_AllocateNewArray
-                _JVM_AllocateNewObject
-                _JVM_ArrayCopy
-                _JVM_AssertionStatusDirectives
-                _JVM_Available
-                _JVM_Bind
-                _JVM_ClassDepth
-                _JVM_ClassLoaderDepth
-                _JVM_Clone
-                _JVM_Close
-                _JVM_CX8Field
-                _JVM_CompileClass
-                _JVM_CompileClasses
-                _JVM_CompilerCommand
-                _JVM_Connect
-                _JVM_ConstantPoolGetClassAt
-                _JVM_ConstantPoolGetClassAtIfLoaded
-                _JVM_ConstantPoolGetDoubleAt
-                _JVM_ConstantPoolGetFieldAt
-                _JVM_ConstantPoolGetFieldAtIfLoaded
-                _JVM_ConstantPoolGetFloatAt
-                _JVM_ConstantPoolGetIntAt
-                _JVM_ConstantPoolGetLongAt
-                _JVM_ConstantPoolGetMethodAt
-                _JVM_ConstantPoolGetMethodAtIfLoaded
-                _JVM_ConstantPoolGetMemberRefInfoAt
-                _JVM_ConstantPoolGetSize
-                _JVM_ConstantPoolGetStringAt
-                _JVM_ConstantPoolGetUTF8At
-                _JVM_CountStackFrames
-                _JVM_CurrentClassLoader
-                _JVM_CurrentLoadedClass
-                _JVM_CurrentThread
-                _JVM_CurrentTimeMillis
-                _JVM_DefineClass
-                _JVM_DefineClassWithSource
-                _JVM_DefineClassWithSourceCond
-                _JVM_DesiredAssertionStatus
-                _JVM_DisableCompiler
-                _JVM_DoPrivileged
-                _JVM_DTraceGetVersion
-                _JVM_DTraceActivate
-                _JVM_DTraceIsProbeEnabled
-                _JVM_DTraceIsSupported
-                _JVM_DTraceDispose
-                _JVM_DumpAllStacks
-                _JVM_DumpThreads
-                _JVM_EnableCompiler
-                _JVM_Exit
-                _JVM_FillInStackTrace
-                _JVM_FindClassFromClass
-                _JVM_FindClassFromClassLoader
-                _JVM_FindClassFromBootLoader
-                _JVM_FindLibraryEntry
-                _JVM_FindLoadedClass
-                _JVM_FindPrimitiveClass
-                _JVM_FindSignal
-                _JVM_FreeMemory
-                _JVM_GC
-                _JVM_GetAllThreads
-                _JVM_GetArrayElement
-                _JVM_GetArrayLength
-                _JVM_GetCPClassNameUTF
-                _JVM_GetCPFieldClassNameUTF
-                _JVM_GetCPFieldModifiers
-                _JVM_GetCPFieldNameUTF
-                _JVM_GetCPFieldSignatureUTF
-                _JVM_GetCPMethodClassNameUTF
-                _JVM_GetCPMethodModifiers
-                _JVM_GetCPMethodNameUTF
-                _JVM_GetCPMethodSignatureUTF
-                _JVM_GetCallerClass
-                _JVM_GetClassAccessFlags
-                _JVM_GetClassAnnotations
-                _JVM_GetClassCPEntriesCount
-                _JVM_GetClassCPTypes
-                _JVM_GetClassConstantPool
-                _JVM_GetClassContext
-                _JVM_GetClassDeclaredConstructors
-                _JVM_GetClassDeclaredFields
-                _JVM_GetClassDeclaredMethods
-                _JVM_GetClassFieldsCount
-                _JVM_GetClassInterfaces
-                _JVM_GetClassLoader
-                _JVM_GetClassMethodsCount
-                _JVM_GetClassModifiers
-                _JVM_GetClassName
-                _JVM_GetClassNameUTF
-                _JVM_GetClassSignature
-                _JVM_GetClassSigners
-                _JVM_GetClassTypeAnnotations
-                _JVM_GetComponentType
-                _JVM_GetDeclaredClasses
-                _JVM_GetDeclaringClass
-                _JVM_GetEnclosingMethodInfo
-                _JVM_GetFieldAnnotations
-                _JVM_GetFieldIxModifiers
-                _JVM_GetFieldTypeAnnotations
-                _JVM_GetHostName
-                _JVM_GetInheritedAccessControlContext
-                _JVM_GetInterfaceVersion
-                _JVM_GetLastErrorString
-                _JVM_GetManagement
-                _JVM_GetMethodAnnotations
-                _JVM_GetMethodDefaultAnnotationValue
-                _JVM_GetMethodIxArgsSize
-                _JVM_GetMethodIxByteCode
-                _JVM_GetMethodIxByteCodeLength
-                _JVM_GetMethodIxExceptionIndexes
-                _JVM_GetMethodIxExceptionTableEntry
-                _JVM_GetMethodIxExceptionTableLength
-                _JVM_GetMethodIxExceptionsCount
-                _JVM_GetMethodIxLocalsCount
-                _JVM_GetMethodIxMaxStack
-                _JVM_GetMethodIxModifiers
-                _JVM_GetMethodIxNameUTF
-                _JVM_GetMethodIxSignatureUTF
-                _JVM_GetMethodParameterAnnotations
-                _JVM_GetMethodParameters
-                _JVM_GetMethodTypeAnnotations
-                _JVM_GetPrimitiveArrayElement
-                _JVM_GetProtectionDomain
-                _JVM_GetSockName
-                _JVM_GetSockOpt
-                _JVM_GetStackAccessControlContext
-                _JVM_GetStackTraceDepth
-                _JVM_GetStackTraceElement
-                _JVM_GetSystemPackage
-                _JVM_GetSystemPackages
-                _JVM_GetThreadStateNames
-                _JVM_GetThreadStateValues
-                _JVM_GetVersionInfo
-                _JVM_Halt
-                _JVM_HoldsLock
-                _JVM_IHashCode
-                _JVM_InitAgentProperties
-                _JVM_InitProperties
-                _JVM_InitializeCompiler
-                _JVM_InitializeSocketLibrary
-                _JVM_InternString
-                _JVM_Interrupt
-                _JVM_InvokeMethod
-                _JVM_IsArrayClass
-                _JVM_IsConstructorIx
-                _JVM_IsInterface
-                _JVM_IsInterrupted
-                _JVM_IsNaN
-                _JVM_IsPrimitiveClass
-                _JVM_IsSameClassPackage
-                _JVM_IsSilentCompiler
-                _JVM_IsSupportedJNIVersion
-                _JVM_IsThreadAlive
-                _JVM_IsVMGeneratedMethodIx
-                _JVM_LatestUserDefinedLoader
-                _JVM_Listen
-                _JVM_LoadClass0
-                _JVM_LoadLibrary
-                _JVM_Lseek
-                _JVM_MaxObjectInspectionAge
-                _JVM_MaxMemory
-                _JVM_MonitorNotify
-                _JVM_MonitorNotifyAll
-                _JVM_MonitorWait
-                _JVM_NanoTime
-                _JVM_NativePath
-                _JVM_NewArray
-                _JVM_NewInstanceFromConstructor
-                _JVM_NewMultiArray
-                _JVM_OnExit
-                _JVM_Open
-                _JVM_RaiseSignal
-                _JVM_RawMonitorCreate
-                _JVM_RawMonitorDestroy
-                _JVM_RawMonitorEnter
-                _JVM_RawMonitorExit
-                _JVM_Read
-                _JVM_Recv
-                _JVM_RecvFrom
-                _JVM_RegisterSignal
-                _JVM_ReleaseUTF
-                _JVM_ResolveClass
-                _JVM_ResumeThread
-                _JVM_Send
-                _JVM_SendTo
-                _JVM_SetArrayElement
-                _JVM_SetClassSigners
-                _JVM_SetLength
-                _JVM_SetNativeThreadName
-                _JVM_SetPrimitiveArrayElement
+                # JVM
+                JVM_Accept;
+                JVM_ActiveProcessorCount;
+                JVM_AllocateNewArray;
+                JVM_AllocateNewObject;
+                JVM_ArrayCopy;
+                JVM_AssertionStatusDirectives;
+                JVM_Available;
+                JVM_Bind;
+                JVM_ClassDepth;
+                JVM_ClassLoaderDepth;
+                JVM_Clone;
+                JVM_Close;
+                JVM_CX8Field;
+                JVM_CompileClass;
+                JVM_CompileClasses;
+                JVM_CompilerCommand;
+                JVM_Connect;
+                JVM_ConstantPoolGetClassAt;
+                JVM_ConstantPoolGetClassAtIfLoaded;
+                JVM_ConstantPoolGetDoubleAt;
+                JVM_ConstantPoolGetFieldAt;
+                JVM_ConstantPoolGetFieldAtIfLoaded;
+                JVM_ConstantPoolGetFloatAt;
+                JVM_ConstantPoolGetIntAt;
+                JVM_ConstantPoolGetLongAt;
+                JVM_ConstantPoolGetMethodAt;
+                JVM_ConstantPoolGetMethodAtIfLoaded;
+                JVM_ConstantPoolGetMemberRefInfoAt;
+                JVM_ConstantPoolGetSize;
+                JVM_ConstantPoolGetStringAt;
+                JVM_ConstantPoolGetUTF8At;
+                JVM_CountStackFrames;
+                JVM_CurrentClassLoader;
+                JVM_CurrentLoadedClass;
+                JVM_CurrentThread;
+                JVM_CurrentTimeMillis;
+                JVM_DefineClass;
+                JVM_DefineClassWithSource;
+                JVM_DefineClassWithSourceCond;
+                JVM_DesiredAssertionStatus;
+                JVM_DisableCompiler;
+                JVM_DoPrivileged;
+                JVM_DTraceGetVersion;
+                JVM_DTraceActivate;
+                JVM_DTraceIsProbeEnabled;
+                JVM_DTraceIsSupported;
+                JVM_DTraceDispose;
+                JVM_DumpAllStacks;
+                JVM_DumpThreads;
+                JVM_EnableCompiler;
+                JVM_Exit;
+                JVM_FillInStackTrace;
+                JVM_FindClassFromClass;
+                JVM_FindClassFromClassLoader;
+                JVM_FindClassFromBootLoader;
+                JVM_FindLibraryEntry;
+                JVM_FindLoadedClass;
+                JVM_FindPrimitiveClass;
+                JVM_FindSignal;
+                JVM_FreeMemory;
+                JVM_GC;
+                JVM_GetAllThreads;
+                JVM_GetArrayElement;
+                JVM_GetArrayLength;
+                JVM_GetCPClassNameUTF;
+                JVM_GetCPFieldClassNameUTF;
+                JVM_GetCPFieldModifiers;
+                JVM_GetCPFieldNameUTF;
+                JVM_GetCPFieldSignatureUTF;
+                JVM_GetCPMethodClassNameUTF;
+                JVM_GetCPMethodModifiers;
+                JVM_GetCPMethodNameUTF;
+                JVM_GetCPMethodSignatureUTF;
+                JVM_GetCallerClass;
+                JVM_GetClassAccessFlags;
+                JVM_GetClassAnnotations;
+                JVM_GetClassCPEntriesCount;
+                JVM_GetClassCPTypes;
+                JVM_GetClassConstantPool;
+                JVM_GetClassContext;
+                JVM_GetClassDeclaredConstructors;
+                JVM_GetClassDeclaredFields;
+                JVM_GetClassDeclaredMethods;
+                JVM_GetClassFieldsCount;
+                JVM_GetClassInterfaces;
+                JVM_GetClassLoader;
+                JVM_GetClassMethodsCount;
+                JVM_GetClassModifiers;
+                JVM_GetClassName;
+                JVM_GetClassNameUTF;
+		JVM_GetClassSignature;
+                JVM_GetClassSigners;
+                JVM_GetClassTypeAnnotations;
+                JVM_GetComponentType;
+                JVM_GetDeclaredClasses;
+                JVM_GetDeclaringClass;
+                JVM_GetEnclosingMethodInfo;
+                JVM_GetFieldAnnotations;
+                JVM_GetFieldIxModifiers;
+                JVM_GetFieldTypeAnnotations;
+                JVM_GetHostName;
+                JVM_GetInheritedAccessControlContext;
+                JVM_GetInterfaceVersion;
+                JVM_GetLastErrorString;
+                JVM_GetManagement;
+                JVM_GetMethodAnnotations;
+                JVM_GetMethodDefaultAnnotationValue;
+                JVM_GetMethodIxArgsSize;
+                JVM_GetMethodIxByteCode;
+                JVM_GetMethodIxByteCodeLength;
+                JVM_GetMethodIxExceptionIndexes;
+                JVM_GetMethodIxExceptionTableEntry;
+                JVM_GetMethodIxExceptionTableLength;
+                JVM_GetMethodIxExceptionsCount;
+                JVM_GetMethodIxLocalsCount;
+                JVM_GetMethodIxMaxStack;
+                JVM_GetMethodIxModifiers;
+                JVM_GetMethodIxNameUTF;
+                JVM_GetMethodIxSignatureUTF;
+                JVM_GetMethodParameterAnnotations;
+                JVM_GetMethodParameters;
+                JVM_GetMethodTypeAnnotations;
+                JVM_GetPrimitiveArrayElement;
+                JVM_GetProtectionDomain;
+                JVM_GetSockName;
+                JVM_GetSockOpt;
+                JVM_GetStackAccessControlContext;
+                JVM_GetStackTraceDepth;
+                JVM_GetStackTraceElement;
+                JVM_GetSystemPackage;
+                JVM_GetSystemPackages;
+                JVM_GetThreadStateNames;
+                JVM_GetThreadStateValues;
+                JVM_GetVersionInfo;
+                JVM_Halt;
+                JVM_HoldsLock;
+                JVM_IHashCode;
+                JVM_InitAgentProperties;
+                JVM_InitProperties;
+                JVM_InitializeCompiler;
+                JVM_InitializeSocketLibrary;
+                JVM_InternString;
+                JVM_Interrupt;
+                JVM_InvokeMethod;
+                JVM_IsArrayClass;
+                JVM_IsConstructorIx;
+                JVM_IsInterface;
+                JVM_IsInterrupted;
+                JVM_IsNaN;
+                JVM_IsPrimitiveClass;
+                JVM_IsSameClassPackage;
+                JVM_IsSilentCompiler;
+                JVM_IsSupportedJNIVersion;
+                JVM_IsThreadAlive;
+                JVM_IsVMGeneratedMethodIx;
+                JVM_LatestUserDefinedLoader;
+                JVM_Listen;
+                JVM_LoadClass0;
+                JVM_LoadLibrary;
+                JVM_Lseek;
+                JVM_MaxObjectInspectionAge;
+                JVM_MaxMemory;
+                JVM_MonitorNotify;
+                JVM_MonitorNotifyAll;
+                JVM_MonitorWait;
+                JVM_NanoTime;
+                JVM_NativePath;
+                JVM_NewArray;
+                JVM_NewInstanceFromConstructor;
+                JVM_NewMultiArray;
+                JVM_OnExit;
+                JVM_Open;
+                JVM_RaiseSignal;
+                JVM_RawMonitorCreate;
+                JVM_RawMonitorDestroy;
+                JVM_RawMonitorEnter;
+                JVM_RawMonitorExit;
+                JVM_Read;
+                JVM_Recv;
+                JVM_RecvFrom;
+                JVM_RegisterSignal;
+                JVM_ReleaseUTF;
+                JVM_ResolveClass;
+                JVM_ResumeThread;
+                JVM_Send;
+                JVM_SendTo;
+                JVM_SetArrayElement;
+                JVM_SetClassSigners;
+                JVM_SetLength;
+                JVM_SetNativeThreadName;
+                JVM_SetPrimitiveArrayElement;
                 # Preserved so that Graal repo can link against a JDK7 libjava.so works
-                _JVM_SetProtectionDomain
-                _JVM_SetSockOpt
-                _JVM_SetThreadPriority
-                _JVM_Sleep
-                _JVM_Socket
-                _JVM_SocketAvailable
-                _JVM_SocketClose
-                _JVM_SocketShutdown
-                _JVM_StartThread
-                _JVM_StopThread
-                _JVM_SuspendThread
-                _JVM_SupportsCX8
-                _JVM_Sync
-                _JVM_Timeout
-                _JVM_TotalMemory
-                _JVM_TraceInstructions
-                _JVM_TraceMethodCalls
-                _JVM_UnloadLibrary
-                _JVM_Write
-                _JVM_Yield
-                _JVM_handle_bsd_signal
-
-                # debug _JVM
-                _JVM_AccessVMBooleanFlag
-                _JVM_AccessVMIntFlag
-                _JVM_VMBreakPoint
+                JVM_SetProtectionDomain;
+                JVM_SetSockOpt;
+                JVM_SetThreadPriority;
+                JVM_Sleep;
+                JVM_Socket;
+                JVM_SocketAvailable;
+                JVM_SocketClose;
+                JVM_SocketShutdown;
+                JVM_StartThread;
+                JVM_StopThread;
+                JVM_SuspendThread;
+                JVM_SupportsCX8;
+                JVM_Sync;
+                JVM_Timeout;
+                JVM_TotalMemory;
+                JVM_TraceInstructions;
+                JVM_TraceMethodCalls;
+                JVM_UnloadLibrary;
+                JVM_Write;
+                JVM_Yield;
+                JVM_handle_linux_signal;
 
                 # miscellaneous functions
-                _jio_fprintf
-                _jio_printf
-                _jio_snprintf
-                _jio_vfprintf
-                _jio_vsnprintf
+                jio_fprintf;
+                jio_printf;
+                jio_snprintf;
+                jio_vfprintf;
+                jio_vsnprintf;
+                fork1;
+                numa_warn;
+                numa_error;
+
+                # Needed because there is no JVM interface for this.
+                sysThreadAvailableStackWithSlack;
 
                 # This is for Forte Analyzer profiling support.
-                _AsyncGetCallTrace
+                AsyncGetCallTrace;
+
+		# INSERT VTABLE SYMBOLS HERE
 
-                # INSERT VTABLE SYMBOLS HERE
+        local:
+                *;
+};
 
--- a/make/bsd/makefiles/mapfile-vers-product	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/mapfile-vers-product	Wed Mar 12 13:30:08 2014 +0100
@@ -19,240 +19,252 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#
+#  
 #
-# Only used for OSX/Darwin builds
 
 # Define public interface.
-                # _JNI
-                _JNI_CreateJavaVM
-                _JNI_GetCreatedJavaVMs
-                _JNI_GetDefaultJavaVMInitArgs
+
+SUNWprivate_1.1 {
+        global:
+                # JNI
+                JNI_CreateJavaVM;
+                JNI_GetCreatedJavaVMs;
+                JNI_GetDefaultJavaVMInitArgs;
 
-                # _JVM
-                _JVM_Accept
-                _JVM_ActiveProcessorCount
-                _JVM_AllocateNewArray
-                _JVM_AllocateNewObject
-                _JVM_ArrayCopy
-                _JVM_AssertionStatusDirectives
-                _JVM_Available
-                _JVM_Bind
-                _JVM_ClassDepth
-                _JVM_ClassLoaderDepth
-                _JVM_Clone
-                _JVM_Close
-                _JVM_CX8Field
-                _JVM_CompileClass
-                _JVM_CompileClasses
-                _JVM_CompilerCommand
-                _JVM_Connect
-                _JVM_ConstantPoolGetClassAt
-                _JVM_ConstantPoolGetClassAtIfLoaded
-                _JVM_ConstantPoolGetDoubleAt
-                _JVM_ConstantPoolGetFieldAt
-                _JVM_ConstantPoolGetFieldAtIfLoaded
-                _JVM_ConstantPoolGetFloatAt
-                _JVM_ConstantPoolGetIntAt
-                _JVM_ConstantPoolGetLongAt
-                _JVM_ConstantPoolGetMethodAt
-                _JVM_ConstantPoolGetMethodAtIfLoaded
-                _JVM_ConstantPoolGetMemberRefInfoAt
-                _JVM_ConstantPoolGetSize
-                _JVM_ConstantPoolGetStringAt
-                _JVM_ConstantPoolGetUTF8At
-                _JVM_CountStackFrames
-                _JVM_CurrentClassLoader
-                _JVM_CurrentLoadedClass
-                _JVM_CurrentThread
-                _JVM_CurrentTimeMillis
-                _JVM_DefineClass
-                _JVM_DefineClassWithSource
-                _JVM_DefineClassWithSourceCond
-                _JVM_DesiredAssertionStatus
-                _JVM_DisableCompiler
-                _JVM_DoPrivileged
-                _JVM_DTraceGetVersion
-                _JVM_DTraceActivate
-                _JVM_DTraceIsProbeEnabled
-                _JVM_DTraceIsSupported
-                _JVM_DTraceDispose
-                _JVM_DumpAllStacks
-                _JVM_DumpThreads
-                _JVM_EnableCompiler
-                _JVM_Exit
-                _JVM_FillInStackTrace
-                _JVM_FindClassFromClass
-                _JVM_FindClassFromClassLoader
-                _JVM_FindClassFromBootLoader
-                _JVM_FindLibraryEntry
-                _JVM_FindLoadedClass
-                _JVM_FindPrimitiveClass
-                _JVM_FindSignal
-                _JVM_FreeMemory
-                _JVM_GC
-                _JVM_GetAllThreads
-                _JVM_GetArrayElement
-                _JVM_GetArrayLength
-                _JVM_GetCPClassNameUTF
-                _JVM_GetCPFieldClassNameUTF
-                _JVM_GetCPFieldModifiers
-                _JVM_GetCPFieldNameUTF
-                _JVM_GetCPFieldSignatureUTF
-                _JVM_GetCPMethodClassNameUTF
-                _JVM_GetCPMethodModifiers
-                _JVM_GetCPMethodNameUTF
-                _JVM_GetCPMethodSignatureUTF
-                _JVM_GetCallerClass
-                _JVM_GetClassAccessFlags
-                _JVM_GetClassAnnotations
-                _JVM_GetClassCPEntriesCount
-                _JVM_GetClassCPTypes
-                _JVM_GetClassConstantPool
-                _JVM_GetClassContext
-                _JVM_GetClassDeclaredConstructors
-                _JVM_GetClassDeclaredFields
-                _JVM_GetClassDeclaredMethods
-                _JVM_GetClassFieldsCount
-                _JVM_GetClassInterfaces
-                _JVM_GetClassLoader
-                _JVM_GetClassMethodsCount
-                _JVM_GetClassModifiers
-                _JVM_GetClassName
-                _JVM_GetClassNameUTF
-                _JVM_GetClassSignature
-                _JVM_GetClassSigners
-                _JVM_GetClassTypeAnnotations
-                _JVM_GetComponentType
-                _JVM_GetDeclaredClasses
-                _JVM_GetDeclaringClass
-                _JVM_GetEnclosingMethodInfo
-                _JVM_GetFieldAnnotations
-                _JVM_GetFieldIxModifiers
-                _JVM_GetFieldTypeAnnotations
-                _JVM_GetHostName
-                _JVM_GetInheritedAccessControlContext
-                _JVM_GetInterfaceVersion
-                _JVM_GetLastErrorString
-                _JVM_GetManagement
-                _JVM_GetMethodAnnotations
-                _JVM_GetMethodDefaultAnnotationValue
-                _JVM_GetMethodIxArgsSize
-                _JVM_GetMethodIxByteCode
-                _JVM_GetMethodIxByteCodeLength
-                _JVM_GetMethodIxExceptionIndexes
-                _JVM_GetMethodIxExceptionTableEntry
-                _JVM_GetMethodIxExceptionTableLength
-                _JVM_GetMethodIxExceptionsCount
-                _JVM_GetMethodIxLocalsCount
-                _JVM_GetMethodIxMaxStack
-                _JVM_GetMethodIxModifiers
-                _JVM_GetMethodIxNameUTF
-                _JVM_GetMethodIxSignatureUTF
-                _JVM_GetMethodParameterAnnotations
-                _JVM_GetMethodParameters
-                _JVM_GetMethodTypeAnnotations
-                _JVM_GetPrimitiveArrayElement
-                _JVM_GetProtectionDomain
-                _JVM_GetSockName
-                _JVM_GetSockOpt
-                _JVM_GetStackAccessControlContext
-                _JVM_GetStackTraceDepth
-                _JVM_GetStackTraceElement
-                _JVM_GetSystemPackage
-                _JVM_GetSystemPackages
-                _JVM_GetThreadStateNames
-                _JVM_GetThreadStateValues
-                _JVM_GetVersionInfo
-                _JVM_Halt
-                _JVM_HoldsLock
-                _JVM_IHashCode
-                _JVM_InitAgentProperties
-                _JVM_InitProperties
-                _JVM_InitializeCompiler
-                _JVM_InitializeSocketLibrary
-                _JVM_InternString
-                _JVM_Interrupt
-                _JVM_InvokeMethod
-                _JVM_IsArrayClass
-                _JVM_IsConstructorIx
-                _JVM_IsInterface
-                _JVM_IsInterrupted
-                _JVM_IsNaN
-                _JVM_IsPrimitiveClass
-                _JVM_IsSameClassPackage
-                _JVM_IsSilentCompiler
-                _JVM_IsSupportedJNIVersion
-                _JVM_IsThreadAlive
-                _JVM_IsVMGeneratedMethodIx
-                _JVM_LatestUserDefinedLoader
-                _JVM_Listen
-                _JVM_LoadClass0
-                _JVM_LoadLibrary
-                _JVM_Lseek
-                _JVM_MaxObjectInspectionAge
-                _JVM_MaxMemory
-                _JVM_MonitorNotify
-                _JVM_MonitorNotifyAll
-                _JVM_MonitorWait
-                _JVM_NanoTime
-                _JVM_NativePath
-                _JVM_NewArray
-                _JVM_NewInstanceFromConstructor
-                _JVM_NewMultiArray
-                _JVM_OnExit
-                _JVM_Open
-                _JVM_RaiseSignal
-                _JVM_RawMonitorCreate
-                _JVM_RawMonitorDestroy
-                _JVM_RawMonitorEnter
-                _JVM_RawMonitorExit
-                _JVM_Read
-                _JVM_Recv
-                _JVM_RecvFrom
-                _JVM_RegisterSignal
-                _JVM_ReleaseUTF
-                _JVM_ResolveClass
-                _JVM_ResumeThread
-                _JVM_Send
-                _JVM_SendTo
-                _JVM_SetArrayElement
-                _JVM_SetClassSigners
-                _JVM_SetLength
-                _JVM_SetNativeThreadName
-                _JVM_SetPrimitiveArrayElement
+                # JVM
+                JVM_Accept;
+                JVM_ActiveProcessorCount;
+                JVM_AllocateNewArray;
+                JVM_AllocateNewObject;
+                JVM_ArrayCopy;
+                JVM_AssertionStatusDirectives;
+                JVM_Available;
+                JVM_Bind;
+                JVM_ClassDepth;
+                JVM_ClassLoaderDepth;
+                JVM_Clone;
+                JVM_Close;
+                JVM_CX8Field;
+                JVM_CompileClass;
+                JVM_CompileClasses;
+                JVM_CompilerCommand;
+                JVM_Connect;
+                JVM_ConstantPoolGetClassAt;
+                JVM_ConstantPoolGetClassAtIfLoaded;
+                JVM_ConstantPoolGetDoubleAt;
+                JVM_ConstantPoolGetFieldAt;
+                JVM_ConstantPoolGetFieldAtIfLoaded;
+                JVM_ConstantPoolGetFloatAt;
+                JVM_ConstantPoolGetIntAt;
+                JVM_ConstantPoolGetLongAt;
+                JVM_ConstantPoolGetMethodAt;
+                JVM_ConstantPoolGetMethodAtIfLoaded;
+                JVM_ConstantPoolGetMemberRefInfoAt;
+                JVM_ConstantPoolGetSize;
+                JVM_ConstantPoolGetStringAt;
+                JVM_ConstantPoolGetUTF8At;
+                JVM_CountStackFrames;
+                JVM_CurrentClassLoader;
+                JVM_CurrentLoadedClass;
+                JVM_CurrentThread;
+                JVM_CurrentTimeMillis;
+                JVM_DefineClass;
+                JVM_DefineClassWithSource;
+                JVM_DefineClassWithSourceCond;
+                JVM_DesiredAssertionStatus;
+                JVM_DisableCompiler;
+                JVM_DoPrivileged;
+                JVM_DTraceGetVersion;
+                JVM_DTraceActivate;
+                JVM_DTraceIsProbeEnabled;
+                JVM_DTraceIsSupported;
+                JVM_DTraceDispose;
+                JVM_DumpAllStacks;
+                JVM_DumpThreads;
+                JVM_EnableCompiler;
+                JVM_Exit;
+                JVM_FillInStackTrace;
+                JVM_FindClassFromClass;
+                JVM_FindClassFromClassLoader;
+                JVM_FindClassFromBootLoader;
+                JVM_FindLibraryEntry;
+                JVM_FindLoadedClass;
+                JVM_FindPrimitiveClass;
+                JVM_FindSignal;
+                JVM_FreeMemory;
+                JVM_GC;
+                JVM_GetAllThreads;
+                JVM_GetArrayElement;
+                JVM_GetArrayLength;
+                JVM_GetCPClassNameUTF;
+                JVM_GetCPFieldClassNameUTF;
+                JVM_GetCPFieldModifiers;
+                JVM_GetCPFieldNameUTF;
+                JVM_GetCPFieldSignatureUTF;
+                JVM_GetCPMethodClassNameUTF;
+                JVM_GetCPMethodModifiers;
+                JVM_GetCPMethodNameUTF;
+                JVM_GetCPMethodSignatureUTF;
+                JVM_GetCallerClass;
+                JVM_GetClassAccessFlags;
+                JVM_GetClassAnnotations;
+                JVM_GetClassCPEntriesCount;
+                JVM_GetClassCPTypes;
+                JVM_GetClassConstantPool;
+                JVM_GetClassContext;
+                JVM_GetClassDeclaredConstructors;
+                JVM_GetClassDeclaredFields;
+                JVM_GetClassDeclaredMethods;
+                JVM_GetClassFieldsCount;
+                JVM_GetClassInterfaces;
+                JVM_GetClassLoader;
+                JVM_GetClassMethodsCount;
+                JVM_GetClassModifiers;
+                JVM_GetClassName;
+                JVM_GetClassNameUTF;
+                JVM_GetClassSignature;
+                JVM_GetClassSigners;
+                JVM_GetClassTypeAnnotations;
+                JVM_GetComponentType;
+                JVM_GetDeclaredClasses;
+                JVM_GetDeclaringClass;
+                JVM_GetEnclosingMethodInfo;
+                JVM_GetFieldAnnotations;
+                JVM_GetFieldIxModifiers;
+                JVM_GetFieldTypeAnnotations;
+                JVM_GetHostName;
+                JVM_GetInheritedAccessControlContext;
+                JVM_GetInterfaceVersion;
+                JVM_GetLastErrorString;
+                JVM_GetManagement;
+                JVM_GetMethodAnnotations;
+                JVM_GetMethodDefaultAnnotationValue;
+                JVM_GetMethodIxArgsSize;
+                JVM_GetMethodIxByteCode;
+                JVM_GetMethodIxByteCodeLength;
+                JVM_GetMethodIxExceptionIndexes;
+                JVM_GetMethodIxExceptionTableEntry;
+                JVM_GetMethodIxExceptionTableLength;
+                JVM_GetMethodIxExceptionsCount;
+                JVM_GetMethodIxLocalsCount;
+                JVM_GetMethodIxMaxStack;
+                JVM_GetMethodIxModifiers;
+                JVM_GetMethodIxNameUTF;
+                JVM_GetMethodIxSignatureUTF;
+                JVM_GetMethodParameterAnnotations;
+                JVM_GetMethodParameters;
+                JVM_GetMethodTypeAnnotations;
+                JVM_GetPrimitiveArrayElement;
+                JVM_GetProtectionDomain;
+                JVM_GetSockName;
+                JVM_GetSockOpt;
+                JVM_GetStackAccessControlContext;
+                JVM_GetStackTraceDepth;
+                JVM_GetStackTraceElement;
+                JVM_GetSystemPackage;
+                JVM_GetSystemPackages;
+                JVM_GetThreadStateNames;
+                JVM_GetThreadStateValues;
+                JVM_GetVersionInfo;
+                JVM_Halt;
+                JVM_HoldsLock;
+                JVM_IHashCode;
+                JVM_InitAgentProperties;
+                JVM_InitProperties;
+                JVM_InitializeCompiler;
+                JVM_InitializeSocketLibrary;
+                JVM_InternString;
+                JVM_Interrupt;
+                JVM_InvokeMethod;
+                JVM_IsArrayClass;
+                JVM_IsConstructorIx;
+                JVM_IsInterface;
+                JVM_IsInterrupted;
+                JVM_IsNaN;
+                JVM_IsPrimitiveClass;
+                JVM_IsSameClassPackage;
+                JVM_IsSilentCompiler;
+                JVM_IsSupportedJNIVersion;
+                JVM_IsThreadAlive;
+                JVM_IsVMGeneratedMethodIx;
+                JVM_LatestUserDefinedLoader;
+                JVM_Listen;
+                JVM_LoadClass0;
+                JVM_LoadLibrary;
+                JVM_Lseek;
+                JVM_MaxObjectInspectionAge;
+                JVM_MaxMemory;
+                JVM_MonitorNotify;
+                JVM_MonitorNotifyAll;
+                JVM_MonitorWait;
+                JVM_NanoTime;
+                JVM_NativePath;
+                JVM_NewArray;
+                JVM_NewInstanceFromConstructor;
+                JVM_NewMultiArray;
+                JVM_OnExit;
+                JVM_Open;
+                JVM_RaiseSignal;
+                JVM_RawMonitorCreate;
+                JVM_RawMonitorDestroy;
+                JVM_RawMonitorEnter;
+                JVM_RawMonitorExit;
+                JVM_Read;
+                JVM_Recv;
+                JVM_RecvFrom;
+                JVM_RegisterSignal;
+                JVM_ReleaseUTF;
+                JVM_ResolveClass;
+                JVM_ResumeThread;
+                JVM_Send;
+                JVM_SendTo;
+                JVM_SetArrayElement;
+                JVM_SetClassSigners;
+                JVM_SetLength;
+                JVM_SetNativeThreadName;
+                JVM_SetPrimitiveArrayElement;
                 # Preserved so that Graal repo can link against a JDK7 libjava.so works
-                _JVM_SetProtectionDomain
-                _JVM_SetSockOpt
-                _JVM_SetThreadPriority
-                _JVM_Sleep
-                _JVM_Socket
-                _JVM_SocketAvailable
-                _JVM_SocketClose
-                _JVM_SocketShutdown
-                _JVM_StartThread
-                _JVM_StopThread
-                _JVM_SuspendThread
-                _JVM_SupportsCX8
-                _JVM_Sync
-                _JVM_Timeout
-                _JVM_TotalMemory
-                _JVM_TraceInstructions
-                _JVM_TraceMethodCalls
-                _JVM_UnloadLibrary
-                _JVM_Write
-                _JVM_Yield
-                _JVM_handle_bsd_signal
+                JVM_SetProtectionDomain;
+                JVM_SetSockOpt;
+                JVM_SetThreadPriority;
+                JVM_Sleep;
+                JVM_Socket;
+                JVM_SocketAvailable;
+                JVM_SocketClose;
+                JVM_SocketShutdown;
+                JVM_StartThread;
+                JVM_StopThread;
+                JVM_SuspendThread;
+                JVM_SupportsCX8;
+                JVM_Sync;
+                JVM_Timeout;
+                JVM_TotalMemory;
+                JVM_TraceInstructions;
+                JVM_TraceMethodCalls;
+                JVM_UnloadLibrary;
+                JVM_Write;
+                JVM_Yield;
+                JVM_handle_linux_signal;
 
                 # miscellaneous functions
-                _jio_fprintf
-                _jio_printf
-                _jio_snprintf
-                _jio_vfprintf
-                _jio_vsnprintf
+                jio_fprintf;
+                jio_printf;
+                jio_snprintf;
+                jio_vfprintf;
+                jio_vsnprintf;
+                fork1;
+                numa_warn;
+                numa_error;
+
+                # Needed because there is no JVM interface for this.
+                sysThreadAvailableStackWithSlack;
 
                 # This is for Forte Analyzer profiling support.
-                _AsyncGetCallTrace
+                AsyncGetCallTrace;
+
+		# INSERT VTABLE SYMBOLS HERE
 
-                # INSERT VTABLE SYMBOLS HERE
+        local:
+                *;
+};
 
--- a/make/bsd/makefiles/minimal1.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/minimal1.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/bsd/makefiles/optimized.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/optimized.make	Wed Mar 12 13:30:08 2014 +0100
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
@@ -39,5 +39,8 @@
 
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
+ifeq ($(OS_VENDOR), Darwin)
+MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-darwin-debug
+endif
 
 VERSION = optimized
--- a/make/bsd/makefiles/product.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/product.make	Wed Mar 12 13:30:08 2014 +0100
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
@@ -39,6 +39,9 @@
 
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-product
+ifeq ($(OS_VENDOR), Darwin)
+MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-darwin-product
+endif
 
 SYSDEFS += -DPRODUCT
 VERSION = optimized
--- a/make/bsd/makefiles/saproc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/saproc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -157,10 +157,10 @@
 install_saproc: $(BUILDLIBSAPROC)
 	@echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"
 ifeq ($(OS_VENDOR), Darwin)
-	-$(QUIETLY) test -d $(LIBSAPROC_DEBUGINFO) && \
+	$(QUIETLY) test ! -d $(LIBSAPROC_DEBUGINFO) || \
 	    cp -f -r $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
 else
-	$(QUIETLY) test -f $(LIBSAPROC_DEBUGINFO) && \
+	$(QUIETLY) test ! -f $(LIBSAPROC_DEBUGINFO) || \
 	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
 endif
 	$(QUIETLY) test ! -f $(LIBSAPROC_DIZ) || \
--- a/make/bsd/makefiles/top.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/top.make	Wed Mar 12 13:30:08 2014 +0100
@@ -80,7 +80,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -128,7 +128,7 @@
 	@$(UpdatePCH)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install gamma: the_vm
+install : the_vm
 	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[ois]"
--- a/make/bsd/makefiles/vm.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/bsd/makefiles/vm.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -388,10 +388,10 @@
 install_jvm: $(LIBJVM)
 	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
 ifeq ($(OS_VENDOR), Darwin)
-	-$(QUIETLY) test -d $(LIBJVM_DEBUGINFO) && \
+	$(QUIETLY) test ! -d $(LIBJVM_DEBUGINFO) || \
 	    cp -f -r $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
 else
-	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
+	$(QUIETLY) test ! -f $(LIBJVM_DEBUGINFO) || \
 	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
 endif
 	$(QUIETLY) test ! -f $(LIBJVM_DIZ) || \
--- a/make/defs.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/defs.make	Wed Mar 12 13:30:08 2014 +0100
@@ -176,11 +176,15 @@
   HOST := $(shell uname -n)
 endif
 
-# If not SunOS, not Linux and not BSD, assume Windows
+# If not SunOS, not Linux not BSD and not AIX, assume Windows
 ifneq ($(OS), Linux)
   ifneq ($(OS), SunOS)
     ifneq ($(OS), bsd)
-      OSNAME=windows
+      ifneq ($(OS), AIX)
+        OSNAME=windows
+      else
+        OSNAME=aix
+      endif
     else
       OSNAME=bsd
     endif
@@ -269,7 +273,7 @@
 
   # Use uname output for SRCARCH, but deal with platform differences. If ARCH
   # is not explicitly listed below, it is treated as x86.
-  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH)))
+  SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc ppc64 zero,$(ARCH)))
   ARCH/       = x86
   ARCH/sparc  = sparc
   ARCH/sparc64= sparc
@@ -295,6 +299,11 @@
       BUILDARCH = sparcv9
     endif
   endif
+  ifeq ($(BUILDARCH), ppc)
+    ifdef LP64
+      BUILDARCH = ppc64
+    endif
+  endif
 
   # LIBARCH is 1:1 mapping from BUILDARCH
   LIBARCH         = $(LIBARCH/$(BUILDARCH))
@@ -303,12 +312,12 @@
   LIBARCH/sparc   = sparc
   LIBARCH/sparcv9 = sparcv9
   LIBARCH/ia64    = ia64
-  LIBARCH/ppc64   = ppc
+  LIBARCH/ppc64   = ppc64
   LIBARCH/ppc     = ppc
   LIBARCH/arm     = arm
   LIBARCH/zero    = $(ZERO_LIBARCH)
 
-  LP64_ARCH = sparcv9 amd64 ia64 zero
+  LP64_ARCH = sparcv9 amd64 ia64 ppc64 zero
 endif
 
 # Required make macro settings for all platforms
--- a/make/excludeSrc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/excludeSrc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -86,7 +86,7 @@
 	concurrentMark.cpp concurrentMarkThread.cpp dirtyCardQueue.cpp g1AllocRegion.cpp \
 	g1BlockOffsetTable.cpp g1CardCounts.cpp g1CollectedHeap.cpp g1CollectorPolicy.cpp \
 	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
-	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
+	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp g1OopClosures.cpp \
 	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
 	g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
 	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
--- a/make/hotspot.script	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/hotspot.script	Wed Mar 12 13:30:08 2014 +0100
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-# Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
     GDB=gdb
 fi
 
-# This is the name of the gdb binary to use
+# This is the name of the dbx binary to use
 if [ ! "$DBX" ]
 then
     DBX=dbx
@@ -68,9 +68,16 @@
 # End of user changeable parameters -----------------------------------------
 #
 
+OS=`uname -s`
+
 # Make sure the paths are fully specified, i.e. they must begin with /.
 REL_MYDIR=`dirname $0`
 MYDIR=`cd $REL_MYDIR && pwd`
+case "$OS" in
+CYGWIN*)
+    MYDIR=`cygpath -m "$MYDIR"`
+    ;;
+esac
 
 #
 # Look whether the user wants to run inside gdb
@@ -102,8 +109,17 @@
     JDK=@@JDK_IMPORT_PATH@@
 fi
 
-if [ "${JDK}" = "" ]; then
-    echo "Failed to find JDK.  Either ALT_JAVA_HOME is not set or JDK_IMPORT_PATH is empty."
+if [ "${JDK}" != "" ]; then
+    case "$OS" in
+    CYGWIN*)
+        JDK=`cygpath -m "$JDK"`
+        ;;
+	esac
+
+else
+    echo "Failed to find JDK." \
+        "Either ALT_JAVA_HOME is not set or JDK_IMPORT_PATH is empty."
+    exit 1
 fi
 
 # We will set the LD_LIBRARY_PATH as follows:
@@ -120,7 +136,6 @@
 
 
 # Set up a suitable LD_LIBRARY_PATH or DYLD_LIBRARY_PATH
-OS=`uname -s`
 if [ "${OS}" = "Darwin" ]
 then
     if [ -z "$DYLD_LIBRARY_PATH" ]
@@ -141,7 +156,7 @@
     export LD_LIBRARY_PATH
 fi
 
-JPARMS="-Dsun.java.launcher=gamma -XXaltjvm=$MYDIR $@ $JAVA_ARGS";
+JPARMS="-XXaltjvm=$MYDIR -Dsun.java.launcher.is_altjvm=true $@ $JAVA_ARGS";
 
 # Locate the java launcher
 LAUNCHER=$JDK/bin/java
@@ -152,6 +167,11 @@
 
 GDBSRCDIR=$MYDIR
 BASEDIR=`cd $MYDIR/../../.. && pwd`
+case "$OS" in
+CYGWIN*)
+    BASEDIR=`cygpath -m "$BASEDIR"`
+    ;;
+esac
 
 init_gdb() {
 # Create a gdb script in case we should run inside gdb
--- a/make/linux/makefiles/adjust-mflags.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/adjust-mflags.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -64,7 +64,7 @@
 	echo "$MFLAGS" \
 	| sed '
 		s/^-/ -/
-		s/ -\([^ 	][^ 	]*\)j/ -\1 -j/
+		s/ -\([^ 	I][^ 	I]*\)j/ -\1 -j/
 		s/ -j[0-9][0-9]*/ -j/
 		s/ -j\([^ 	]\)/ -j -\1/
 		s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
--- a/make/linux/makefiles/adlc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/adlc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/buildtree.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/buildtree.make	Wed Mar 12 13:30:08 2014 +0100
@@ -124,7 +124,7 @@
 # For dependencies and recursive makes.
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
-BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -193,6 +193,7 @@
 DATA_MODE/sparc = 32
 DATA_MODE/sparcv9 = 64
 DATA_MODE/amd64 = 64
+DATA_MODE/ppc64 = 64
 
 DATA_MODE = $(DATA_MODE/$(BUILDARCH))
 
@@ -366,6 +367,16 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
+dtrace.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
 FORCE:
 
 .PHONY:  all FORCE
--- a/make/linux/makefiles/defs.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/defs.make	Wed Mar 12 13:30:08 2014 +0100
@@ -120,6 +120,15 @@
   HS_ARCH          = ppc
 endif
 
+# PPC64
+ifeq ($(ARCH), ppc64)
+  ARCH_DATA_MODEL  = 64
+  MAKE_ARGS        += LP64=1
+  PLATFORM         = linux-ppc64
+  VM_PLATFORM      = linux_ppc64
+  HS_ARCH          = ppc
+endif
+
 # On 32 bit linux we build server and client, on 64 bit just server.
 ifeq ($(JVM_VARIANTS),)
   ifeq ($(ARCH_DATA_MODEL), 32)
@@ -255,7 +264,7 @@
 EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
 EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
 
-ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
+ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK) $(JVM_VARIANT_CORE)), true)
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
   EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
   ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
--- a/make/linux/makefiles/dtrace.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/dtrace.make	Wed Mar 12 13:30:08 2014 +0100
@@ -42,18 +42,39 @@
 else
   SDT_H_FILE = /usr/include/sys/sdt.h
 endif
+
 DTRACE_ENABLED = $(shell test -f $(SDT_H_FILE) && echo $(SDT_H_FILE))
 REASON = "$(SDT_H_FILE) not found"
 
+endif # GCC version
+endif # OPENJDK
+
+
+DTRACE_COMMON_SRCDIR = $(GAMMADIR)/src/os/posix/dtrace
+DTRACE_PROG = dtrace
+DtraceOutDir = $(GENERATED)/dtracefiles
+
+$(DtraceOutDir):
+	mkdir $(DtraceOutDir)
+
+$(DtraceOutDir)/hotspot.h: $(DTRACE_COMMON_SRCDIR)/hotspot.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot.d
+
+$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d
+
+$(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hs_private.d
+
 ifneq ($(DTRACE_ENABLED),)
-  CFLAGS += -DDTRACE_ENABLED
-endif
-
-endif
+CFLAGS += -DDTRACE_ENABLED
+dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h
+else
+dtrace_gen_headers:
+	$(QUIETLY) echo "**NOTICE** Dtrace support disabled: $(REASON)"
 endif
 
 # Phony target used in vm.make build target to check whether enabled.
-.PHONY: dtraceCheck
 ifeq ($(DTRACE_ENABLED),)
 dtraceCheck:
 	$(QUIETLY) echo "**NOTICE** Dtrace support disabled: $(REASON)"
@@ -61,5 +82,7 @@
 dtraceCheck:
 endif
 
+.PHONY: dtrace_gen_headers dtraceCheck
+
 # It doesn't support HAVE_DTRACE_H though.
 
--- a/make/linux/makefiles/gcc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/gcc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -181,6 +181,7 @@
 ifndef E500V2
 ARCHFLAG/ppc     =  -mcpu=powerpc
 endif
+ARCHFLAG/ppc64   =  -m64
 
 CFLAGS     += $(ARCHFLAG)
 AOUT_FLAGS += $(ARCHFLAG)
@@ -214,7 +215,7 @@
   WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
 endif
 
-WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value
+WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wno-error=format-nonliteral
 
 ifeq ($(USE_CLANG),)
   # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
@@ -346,6 +347,7 @@
   DEBUG_CFLAGS/amd64 = -g
   DEBUG_CFLAGS/arm   = -g
   DEBUG_CFLAGS/ppc   = -g
+  DEBUG_CFLAGS/ppc64 = -g
   DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
   ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
@@ -361,6 +363,7 @@
     FASTDEBUG_CFLAGS/amd64 = -g
     FASTDEBUG_CFLAGS/arm   = -g
     FASTDEBUG_CFLAGS/ppc   = -g
+    FASTDEBUG_CFLAGS/ppc64 = -g
     FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
     ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
@@ -375,6 +378,7 @@
     OPT_CFLAGS/amd64 = -g
     OPT_CFLAGS/arm   = -g
     OPT_CFLAGS/ppc   = -g
+    OPT_CFLAGS/ppc64 = -g
     OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
     ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
       ifeq ($(USE_CLANG), true)
--- a/make/linux/makefiles/jsig.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/jsig.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/mapfile-vers-debug	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/mapfile-vers-debug	Wed Mar 12 13:30:08 2014 +0100
@@ -246,11 +246,6 @@
                 JVM_Yield;
                 JVM_handle_linux_signal;
 
-                # debug JVM
-                JVM_AccessVMBooleanFlag;
-                JVM_AccessVMIntFlag;
-                JVM_VMBreakPoint;
-
                 # miscellaneous functions
                 jio_fprintf;
                 jio_printf;
--- a/make/linux/makefiles/minimal1.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/minimal1.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/makefiles/optimized.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/optimized.make	Wed Mar 12 13:30:08 2014 +0100
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/makefiles/ppc64.make	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,39 @@
+#
+# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright 2012, 2013 SAP AG. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# make c code know it is on a 64 bit platform.
+CFLAGS += -D_LP64=1
+
+# fixes `relocation truncated to fit' error for gcc 4.1.
+CFLAGS += -mminimal-toc
+
+# finds use ppc64 instructions, but schedule for power5
+CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
+
+# let linker find external 64 bit libs.
+LFLAGS_VM += -L/lib64
+
+# specify lib format.
+LFLAGS_VM +=  -Wl,-melf64ppc
--- a/make/linux/makefiles/product.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/product.make	Wed Mar 12 13:30:08 2014 +0100
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
--- a/make/linux/makefiles/saproc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/saproc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -113,13 +113,13 @@
 endif
 
 install_saproc: $(BUILDLIBSAPROC)
-	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
-	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
-	  test -f $(LIBSAPROC_DEBUGINFO) &&                  \
+	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then                   \
+	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";           \
+	  test ! -f $(LIBSAPROC_DEBUGINFO) ||                      \
 	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO); \
-	  test -f $(LIBSAPROC_DIZ) &&                  \
-	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ); \
-	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
+	  test ! -f $(LIBSAPROC_DIZ) ||                            \
+	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ);             \
+	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";        \
 	fi
 
 .PHONY: install_saproc
--- a/make/linux/makefiles/top.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/top.make	Wed Mar 12 13:30:08 2014 +0100
@@ -80,7 +80,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff dtrace_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -102,6 +102,9 @@
 sa_stuff:
 	@$(MAKE) -f sa.make $(MFLAGS-adjusted)
 
+dtrace_stuff: $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f dtrace.make dtrace_gen_headers $(MFLAGS-adjusted) GENERATED=$(GENERATED)
+
 # and the VM: must use other makefile with dependencies included
 
 # We have to go to great lengths to get control over the -jN argument
@@ -119,7 +122,7 @@
 	@$(UpdatePCH)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install gamma: the_vm
+install: the_vm
 	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[ois]"
--- a/make/linux/makefiles/vm.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/makefiles/vm.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/linux/platform_ppc	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/linux/platform_ppc	Wed Mar 12 13:30:08 2014 +0100
@@ -2,11 +2,11 @@
 
 arch = ppc
 
-arch_model = ppc
+arch_model = ppc_32
 
 os_arch = linux_ppc
 
-os_arch_model = linux_ppc
+os_arch_model = linux_ppc_32
 
 lib_arch = ppc
 
@@ -14,4 +14,4 @@
 
 gnu_dis_arch = ppc
 
-sysdefs = -DLINUX -D_GNU_SOURCE -DPPC
+sysdefs = -DLINUX -D_GNU_SOURCE -DPPC32
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/platform_ppc64	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,17 @@
+os_family = linux
+
+arch = ppc
+
+arch_model = ppc_64
+
+os_arch = linux_ppc
+
+os_arch_model = linux_ppc_64
+
+lib_arch = ppc64
+
+compiler = gcc
+
+gnu_dis_arch = ppc64
+
+sysdefs = -DLINUX -D_GNU_SOURCE -DPPC64
--- a/make/sa.files	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/sa.files	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/adjust-mflags.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/adjust-mflags.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -64,7 +64,7 @@
 	echo "$MFLAGS" \
 	| sed '
 		s/^-/ -/
-		s/ -\([^ 	][^ 	]*\)j/ -\1 -j/
+		s/ -\([^ 	I][^ 	I]*\)j/ -\1 -j/
 		s/ -j[0-9][0-9]*/ -j/
 		s/ -j\([^ 	]\)/ -j -\1/
 		s/ -j/ -j'${HOTSPOT_BUILD_JOBS:-${default_build_jobs}}'/
--- a/make/solaris/makefiles/adlc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/adlc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/buildtree.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/buildtree.make	Wed Mar 12 13:30:08 2014 +0100
@@ -117,7 +117,7 @@
 # For dependencies and recursive makes.
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
-BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
 	ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
@@ -354,6 +354,16 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
+dtrace.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
 FORCE:
 
 .PHONY:  all FORCE
--- a/make/solaris/makefiles/dtrace.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/dtrace.make	Wed Mar 12 13:30:08 2014 +0100
@@ -36,6 +36,8 @@
 
 else
 
+DtraceOutDir = $(GENERATED)/dtracefiles
+
 JVM_DB = libjvm_db
 LIBJVM_DB = libjvm_db.so
 
@@ -53,6 +55,7 @@
 GENOFFS = generate$(JVMOFFS)
 
 DTRACE_SRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/dtrace
+DTRACE_COMMON_SRCDIR = $(GAMMADIR)/src/os/posix/dtrace
 DTRACE = dtrace
 DTRACE.o = $(DTRACE).o
 
@@ -251,8 +254,8 @@
   endif
 endif
 
-$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
-             $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
+$(DTRACE).d: $(DTRACE_COMMON_SRCDIR)/hotspot.d $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d \
+             $(DTRACE_COMMON_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
 	$(QUIETLY) cat $^ > $@
 
 DTraced_Files = ciEnv.o \
@@ -326,6 +329,22 @@
 	$(QUIETLY) if [ -f $(GENOFFS) ]; then touch $(GENOFFS); fi
 	$(QUIETLY) if [ -f $(JVMOFFS.o) ]; then touch $(JVMOFFS.o); fi
 
+
+$(DtraceOutDir):
+	mkdir $(DtraceOutDir)
+
+$(DtraceOutDir)/hotspot.h: $(DTRACE_COMMON_SRCDIR)/hotspot.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot.d
+
+$(DtraceOutDir)/hotspot_jni.h: $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hotspot_jni.d
+
+$(DtraceOutDir)/hs_private.h: $(DTRACE_COMMON_SRCDIR)/hs_private.d | $(DtraceOutDir)
+	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -h -o $@ -s $(DTRACE_COMMON_SRCDIR)/hs_private.d
+
+dtrace_gen_headers: $(DtraceOutDir)/hotspot.h $(DtraceOutDir)/hotspot_jni.h $(DtraceOutDir)/hs_private.h
+
+
 .PHONY: dtraceCheck
 
 SYSTEM_DTRACE_H = /usr/include/dtrace.h
--- a/make/solaris/makefiles/gcc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/gcc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -118,7 +118,7 @@
 # Compiler warnings are treated as errors 
 WARNINGS_ARE_ERRORS = -Werror 
 # Enable these warnings. See 'info gcc' about details on these options
-WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
+WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef -Wformat=2 -Wno-error=format-nonliteral
 CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
 # Special cases 
 CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))  
--- a/make/solaris/makefiles/jsig.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/jsig.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/solaris/makefiles/mapfile-vers-debug	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/mapfile-vers-debug	Wed Mar 12 13:30:08 2014 +0100
@@ -28,10 +28,6 @@
 
 SUNWprivate_1.1 {
         global:
-		# debug JVM
-		JVM_AccessVMBooleanFlag;
-		JVM_AccessVMIntFlag;
-		JVM_VMBreakPoint;
 
 		# miscellaneous
 };
--- a/make/solaris/makefiles/optimized.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/optimized.make	Wed Mar 12 13:30:08 2014 +0100
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
--- a/make/solaris/makefiles/product.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/product.make	Wed Mar 12 13:30:08 2014 +0100
@@ -22,7 +22,7 @@
 #  
 #
 
-# Sets make macros for making optimized version of Gamma VM
+# Sets make macros for making optimized version of HotSpot VM
 # (This is the "product", not the "release" version.)
 
 # Compiler specific OPT_CFLAGS are passed in from gcc.make, sparcWorks.make
--- a/make/solaris/makefiles/saproc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/saproc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -147,13 +147,13 @@
 endif
 
 install_saproc: $(BULDLIBSAPROC)
-	$(QUIETLY) if [ -f $(LIBSAPROC) ] ; then             \
-	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
-	  test -f $(LIBSAPROC_DEBUGINFO) &&             \
+	$(QUIETLY) if [ -f $(LIBSAPROC) ] ; then                   \
+	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";           \
+	  test ! -f $(LIBSAPROC_DEBUGINFO) ||                      \
 	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO); \
-	  test -f $(LIBSAPROC_DIZ) &&             \
-	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ); \
-	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
+	  test ! -f $(LIBSAPROC_DIZ) ||                            \
+	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ);             \
+	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";        \
 	fi
 
 .PHONY: install_saproc
--- a/make/solaris/makefiles/top.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/top.make	Wed Mar 12 13:30:08 2014 +0100
@@ -73,7 +73,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -95,6 +95,9 @@
 sa_stuff:
 	@$(MAKE) -f sa.make $(MFLAGS-adjusted)
 
+dtrace_stuff: $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f dtrace.make dtrace_gen_headers $(MFLAGS-adjusted) GENERATED=$(GENERATED)
+
 # and the VM: must use other makefile with dependencies included
 
 # We have to go to great lengths to get control over the -jN argument
@@ -111,7 +114,7 @@
 the_vm: vm_build_preliminaries $(adjust-mflags)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install gamma: the_vm
+install: the_vm
 	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[oi]"
--- a/make/solaris/makefiles/vm.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/solaris/makefiles/vm.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/build_vm_def.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/windows/build_vm_def.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/adlc.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/windows/makefiles/adlc.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/debug.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/windows/makefiles/debug.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/product.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/windows/makefiles/product.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/projectcreator.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/windows/makefiles/projectcreator.make	Wed Mar 12 13:30:08 2014 +0100
@@ -73,6 +73,7 @@
         -ignorePath arm \
         -ignorePath ppc \
         -ignorePath zero \
+        -ignorePath aix \
         -hidePath .hg
 
 
--- a/make/windows/makefiles/rules.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/windows/makefiles/rules.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/sa.make	Tue Mar 11 15:34:06 2014 +0100
+++ b/make/windows/makefiles/sa.make	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -94,7 +94,7 @@
 SA_LD_FLAGS = bufferoverflowU.lib
 !endif
 !else
-SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 -Gm $(GX_OPTION) -Od -D "WIN32" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -YX -FD -GZ -c
+SA_CFLAGS = -nologo $(MS_RUNTIME_OPTION) -W3 $(GX_OPTION) -Od -D "WIN32" -D "_WINDOWS" -D "_DEBUG" -D "_CONSOLE" -D "_MBCS" -FD -RTC1 -c 
 !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
 SA_CFLAGS = $(SA_CFLAGS) -ZI
 !endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/assembler_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,699 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/assembler.inline.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/cardTableModRefBS.hpp"
+#include "memory/resourceArea.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/objectMonitor.hpp"
+#include "runtime/os.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#endif // INCLUDE_ALL_GCS
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) // nothing
+#else
+#define BLOCK_COMMENT(str) block_comment(str)
+#endif
+
+int AbstractAssembler::code_fill_byte() {
+  return 0x00;                  // illegal instruction 0x00000000
+}
+
+void Assembler::print_instruction(int inst) {
+  Unimplemented();
+}
+
+// Patch instruction `inst' at offset `inst_pos' to refer to
+// `dest_pos' and return the resulting instruction.  We should have
+// pcs, not offsets, but since all is relative, it will work out fine.
+int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) {
+  int m = 0; // mask for displacement field
+  int v = 0; // new value for displacement field
+
+  switch (inv_op_ppc(inst)) {
+  case b_op:  m = li(-1); v = li(disp(dest_pos, inst_pos)); break;
+  case bc_op: m = bd(-1); v = bd(disp(dest_pos, inst_pos)); break;
+    default: ShouldNotReachHere();
+  }
+  return inst & ~m | v;
+}
+
+// Return the offset, relative to _code_begin, of the destination of
+// the branch inst at offset pos.
+int Assembler::branch_destination(int inst, int pos) {
+  int r = 0;
+  switch (inv_op_ppc(inst)) {
+    case b_op:  r = bxx_destination_offset(inst, pos); break;
+    case bc_op: r = inv_bd_field(inst, pos); break;
+    default: ShouldNotReachHere();
+  }
+  return r;
+}
+
+// Low-level andi-one-instruction-macro.
+void Assembler::andi(Register a, Register s, const int ui16) {
+  assert(is_uimm(ui16, 16), "must be 16-bit unsigned immediate");
+  if (is_power_of_2_long(((jlong) ui16)+1)) {
+    // pow2minus1
+    clrldi(a, s, 64-log2_long((((jlong) ui16)+1)));
+  } else if (is_power_of_2_long((jlong) ui16)) {
+    // pow2
+    rlwinm(a, s, 0, 31-log2_long((jlong) ui16), 31-log2_long((jlong) ui16));
+  } else if (is_power_of_2_long((jlong)-ui16)) {
+    // negpow2
+    clrrdi(a, s, log2_long((jlong)-ui16));
+  } else {
+    andi_(a, s, ui16);
+  }
+}
+
+// RegisterOrConstant version.
+void Assembler::ld(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::ld(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::ld(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::ldx(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::ld(d, 0, roc.as_register());
+    else
+      Assembler::ldx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::lwa(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::lwa(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::lwa(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::lwax(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::lwa(d, 0, roc.as_register());
+    else
+      Assembler::lwax(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::lwz(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::lwz(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::lwz(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::lwzx(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::lwz(d, 0, roc.as_register());
+    else
+      Assembler::lwzx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::lha(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::lha(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::lha(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::lhax(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::lha(d, 0, roc.as_register());
+    else
+      Assembler::lhax(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::lhz(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::lhz(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::lhz(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::lhzx(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::lhz(d, 0, roc.as_register());
+    else
+      Assembler::lhzx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::lbz(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      int simm16_rest = load_const_optimized(d, roc.as_constant(), noreg, true);
+      Assembler::lbz(d, simm16_rest, d);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::lbz(d, roc.as_constant(), s1);
+    } else {
+      load_const_optimized(d, roc.as_constant());
+      Assembler::lbzx(d, d, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::lbz(d, 0, roc.as_register());
+    else
+      Assembler::lbzx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::std(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
+      Assembler::std(d, simm16_rest, tmp);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::std(d, roc.as_constant(), s1);
+    } else {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      load_const_optimized(tmp, roc.as_constant());
+      Assembler::stdx(d, tmp, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::std(d, 0, roc.as_register());
+    else
+      Assembler::stdx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::stw(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
+      Assembler::stw(d, simm16_rest, tmp);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::stw(d, roc.as_constant(), s1);
+    } else {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      load_const_optimized(tmp, roc.as_constant());
+      Assembler::stwx(d, tmp, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::stw(d, 0, roc.as_register());
+    else
+      Assembler::stwx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::sth(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
+      Assembler::sth(d, simm16_rest, tmp);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::sth(d, roc.as_constant(), s1);
+    } else {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      load_const_optimized(tmp, roc.as_constant());
+      Assembler::sthx(d, tmp, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::sth(d, 0, roc.as_register());
+    else
+      Assembler::sthx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::stb(Register d, RegisterOrConstant roc, Register s1, Register tmp) {
+  if (roc.is_constant()) {
+    if (s1 == noreg) {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      int simm16_rest = load_const_optimized(tmp, roc.as_constant(), noreg, true);
+      Assembler::stb(d, simm16_rest, tmp);
+    } else if (is_simm(roc.as_constant(), 16)) {
+      Assembler::stb(d, roc.as_constant(), s1);
+    } else {
+      guarantee(tmp != noreg, "Need tmp reg to encode large constants");
+      load_const_optimized(tmp, roc.as_constant());
+      Assembler::stbx(d, tmp, s1);
+    }
+  } else {
+    if (s1 == noreg)
+      Assembler::stb(d, 0, roc.as_register());
+    else
+      Assembler::stbx(d, roc.as_register(), s1);
+  }
+}
+
+void Assembler::add(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    intptr_t c = roc.as_constant();
+    assert(is_simm(c, 16), "too big");
+    addi(d, s1, (int)c);
+  }
+  else add(d, roc.as_register(), s1);
+}
+
+void Assembler::subf(Register d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    intptr_t c = roc.as_constant();
+    assert(is_simm(-c, 16), "too big");
+    addi(d, s1, (int)-c);
+  }
+  else subf(d, roc.as_register(), s1);
+}
+
+void Assembler::cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1) {
+  if (roc.is_constant()) {
+    intptr_t c = roc.as_constant();
+    assert(is_simm(c, 16), "too big");
+    cmpdi(d, s1, (int)c);
+  }
+  else cmpd(d, roc.as_register(), s1);
+}
+
+// Load a 64 bit constant. Patchable.
+void Assembler::load_const(Register d, long x, Register tmp) {
+  // 64-bit value: x = xa xb xc xd
+  int xa = (x >> 48) & 0xffff;
+  int xb = (x >> 32) & 0xffff;
+  int xc = (x >> 16) & 0xffff;
+  int xd = (x >>  0) & 0xffff;
+  if (tmp == noreg) {
+    Assembler::lis( d, (int)(short)xa);
+    Assembler::ori( d, d, (unsigned int)xb);
+    Assembler::sldi(d, d, 32);
+    Assembler::oris(d, d, (unsigned int)xc);
+    Assembler::ori( d, d, (unsigned int)xd);
+  } else {
+    // exploit instruction level parallelism if we have a tmp register
+    assert_different_registers(d, tmp);
+    Assembler::lis(tmp, (int)(short)xa);
+    Assembler::lis(d, (int)(short)xc);
+    Assembler::ori(tmp, tmp, (unsigned int)xb);
+    Assembler::ori(d, d, (unsigned int)xd);
+    Assembler::insrdi(d, tmp, 32, 0);
+  }
+}
+
+// Load a 64 bit constant, optimized, not identifyable.
+// Tmp can be used to increase ILP. Set return_simm16_rest=true to get a
+// 16 bit immediate offset.
+int Assembler::load_const_optimized(Register d, long x, Register tmp, bool return_simm16_rest) {
+  // Avoid accidentally trying to use R0 for indexed addressing.
+  assert(d != R0, "R0 not allowed");
+  assert_different_registers(d, tmp);
+
+  short xa, xb, xc, xd; // Four 16-bit chunks of const.
+  long rem = x;         // Remaining part of const.
+
+  xd = rem & 0xFFFF;    // Lowest 16-bit chunk.
+  rem = (rem >> 16) + ((unsigned short)xd >> 15); // Compensation for sign extend.
+
+  if (rem == 0) { // opt 1: simm16
+    li(d, xd);
+    return 0;
+  }
+
+  xc = rem & 0xFFFF; // Next 16-bit chunk.
+  rem = (rem >> 16) + ((unsigned short)xc >> 15); // Compensation for sign extend.
+
+  if (rem == 0) { // opt 2: simm32
+    lis(d, xc);
+  } else { // High 32 bits needed.
+
+    if (tmp != noreg) { // opt 3: We have a temp reg.
+      // No carry propagation between xc and higher chunks here (use logical instructions).
+      xa = (x >> 48) & 0xffff;
+      xb = (x >> 32) & 0xffff; // No sign compensation, we use lis+ori or li to allow usage of R0.
+      bool load_xa = (xa != 0) || (xb < 0);
+      bool return_xd = false;
+
+      if (load_xa) lis(tmp, xa);
+      if (xc) lis(d, xc);
+      if (load_xa) {
+        if (xb) ori(tmp, tmp, xb); // No addi, we support tmp == R0.
+      } else {
+        li(tmp, xb); // non-negative
+      }
+      if (xc) {
+        if (return_simm16_rest && xd >= 0) { return_xd = true; } // >= 0 to avoid carry propagation after insrdi/rldimi.
+        else if (xd) { addi(d, d, xd); }
+      } else {
+        li(d, xd);
+      }
+      insrdi(d, tmp, 32, 0);
+      return return_xd ? xd : 0; // non-negative
+    }
+
+    xb = rem & 0xFFFF; // Next 16-bit chunk.
+    rem = (rem >> 16) + ((unsigned short)xb >> 15); // Compensation for sign extend.
+
+    xa = rem & 0xFFFF; // Highest 16-bit chunk.
+
+    // opt 4: avoid adding 0
+    if (xa) { // Highest 16-bit needed?
+      lis(d, xa);
+      if (xb) addi(d, d, xb);
+    } else {
+      li(d, xb);
+    }
+    sldi(d, d, 32);
+    if (xc) addis(d, d, xc);
+  }
+
+  // opt 5: Return offset to be inserted into following instruction.
+  if (return_simm16_rest) return xd;
+
+  if (xd) addi(d, d, xd);
+  return 0;
+}
+
+#ifndef PRODUCT
+// Test of ppc assembler.
+void Assembler::test_asm() {
+  // PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
+  addi(   R0,  R1,  10);
+  addis(  R5,  R2,  11);
+  addic_( R3,  R31, 42);
+  subfic( R21, R12, 2112);
+  add(    R3,  R2,  R1);
+  add_(   R11, R22, R30);
+  subf(   R7,  R6,  R5);
+  subf_(  R8,  R9,  R4);
+  addc(   R11, R12, R13);
+  addc_(  R14, R14, R14);
+  subfc(  R15, R16, R17);
+  subfc_( R18, R20, R19);
+  adde(   R20, R22, R24);
+  adde_(  R29, R27, R26);
+  subfe(  R28, R1,  R0);
+  subfe_( R21, R11, R29);
+  neg(    R21, R22);
+  neg_(   R13, R23);
+  mulli(  R0,  R11, -31);
+  mulld(  R1,  R18, R21);
+  mulld_( R2,  R17, R22);
+  mullw(  R3,  R16, R23);
+  mullw_( R4,  R15, R24);
+  divd(   R5,  R14, R25);
+  divd_(  R6,  R13, R26);
+  divw(   R7,  R12, R27);
+  divw_(  R8,  R11, R28);
+
+  li(     R3, -4711);
+
+  // PPC 1, section 3.3.9, Fixed-Point Compare Instructions
+  cmpi(   CCR7,  0, R27, 4711);
+  cmp(    CCR0, 1, R14, R11);
+  cmpli(  CCR5,  1, R17, 45);
+  cmpl(   CCR3, 0, R9,  R10);
+
+  cmpwi(  CCR7,  R27, 4711);
+  cmpw(   CCR0, R14, R11);
+  cmplwi( CCR5,  R17, 45);
+  cmplw(  CCR3, R9,  R10);
+
+  cmpdi(  CCR7,  R27, 4711);
+  cmpd(   CCR0, R14, R11);
+  cmpldi( CCR5,  R17, 45);
+  cmpld(  CCR3, R9,  R10);
+
+  // PPC 1, section 3.3.11, Fixed-Point Logical Instructions
+  andi_(  R4,  R5,  0xff);
+  andis_( R12, R13, 0x7b51);
+  ori(    R1,  R4,  13);
+  oris(   R3,  R5,  177);
+  xori(   R7,  R6,  51);
+  xoris(  R29, R0,  1);
+  andr(   R17, R21, R16);
+  and_(   R3,  R5,  R15);
+  orr(    R2,  R1,  R9);
+  or_(    R17, R15, R11);
+  xorr(   R19, R18, R10);
+  xor_(   R31, R21, R11);
+  nand(   R5,  R7,  R3);
+  nand_(  R3,  R1,  R0);
+  nor(    R2,  R3,  R5);
+  nor_(   R3,  R6,  R8);
+  andc(   R25, R12, R11);
+  andc_(  R24, R22, R21);
+  orc(    R20, R10, R12);
+  orc_(   R22, R2,  R13);
+
+  nop();
+
+  // PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
+  sld(    R5,  R6,  R8);
+  sld_(   R3,  R5,  R9);
+  slw(    R2,  R1,  R10);
+  slw_(   R6,  R26, R16);
+  srd(    R16, R24, R8);
+  srd_(   R21, R14, R7);
+  srw(    R22, R25, R29);
+  srw_(   R5,  R18, R17);
+  srad(   R7,  R11, R0);
+  srad_(  R9,  R13, R1);
+  sraw(   R7,  R15, R2);
+  sraw_(  R4,  R17, R3);
+  sldi(   R3,  R18, 63);
+  sldi_(  R2,  R20, 30);
+  slwi(   R1,  R21, 30);
+  slwi_(  R7,  R23, 8);
+  srdi(   R0,  R19, 2);
+  srdi_(  R12, R24, 5);
+  srwi(   R13, R27, 6);
+  srwi_(  R14, R29, 7);
+  sradi(  R15, R30, 9);
+  sradi_( R16, R31, 19);
+  srawi(  R17, R31, 15);
+  srawi_( R18, R31, 12);
+
+  clrrdi( R3, R30, 5);
+  clrldi( R9, R10, 11);
+
+  rldicr( R19, R20, 13, 15);
+  rldicr_(R20, R20, 16, 14);
+  rldicl( R21, R21, 30, 33);
+  rldicl_(R22, R1,  20, 25);
+  rlwinm( R23, R2,  25, 10, 11);
+  rlwinm_(R24, R3,  12, 13, 14);
+
+  // PPC 1, section 3.3.2 Fixed-Point Load Instructions
+  lwzx(   R3,  R5, R7);
+  lwz(    R11,  0, R1);
+  lwzu(   R31, -4, R11);
+
+  lwax(   R3,  R5, R7);
+  lwa(    R31, -4, R11);
+  lhzx(   R3,  R5, R7);
+  lhz(    R31, -4, R11);
+  lhzu(   R31, -4, R11);
+
+
+  lhax(   R3,  R5, R7);
+  lha(    R31, -4, R11);
+  lhau(   R11,  0, R1);
+
+  lbzx(   R3,  R5, R7);
+  lbz(    R31, -4, R11);
+  lbzu(   R11,  0, R1);
+
+  ld(     R31, -4, R11);
+  ldx(    R3,  R5, R7);
+  ldu(    R31, -4, R11);
+
+  //  PPC 1, section 3.3.3 Fixed-Point Store Instructions
+  stwx(   R3,  R5, R7);
+  stw(    R31, -4, R11);
+  stwu(   R11,  0, R1);
+
+  sthx(   R3,  R5, R7 );
+  sth(    R31, -4, R11);
+  sthu(   R31, -4, R11);
+
+  stbx(   R3,  R5, R7);
+  stb(    R31, -4, R11);
+  stbu(   R31, -4, R11);
+
+  std(    R31, -4, R11);
+  stdx(   R3,  R5, R7);
+  stdu(   R31, -4, R11);
+
+ // PPC 1, section 3.3.13 Move To/From System Register Instructions
+  mtlr(   R3);
+  mflr(   R3);
+  mtctr(  R3);
+  mfctr(  R3);
+  mtcrf(  0xff, R15);
+  mtcr(   R15);
+  mtcrf(  0x03, R15);
+  mtcr(   R15);
+  mfcr(   R15);
+
+ // PPC 1, section 2.4.1 Branch Instructions
+  Label lbl1, lbl2, lbl3;
+  bind(lbl1);
+
+  b(pc());
+  b(pc() - 8);
+  b(lbl1);
+  b(lbl2);
+  b(lbl3);
+
+  bl(pc() - 8);
+  bl(lbl1);
+  bl(lbl2);
+
+  bcl(4, 10, pc() - 8);
+  bcl(4, 10, lbl1);
+  bcl(4, 10, lbl2);
+
+  bclr( 4, 6, 0);
+  bclrl(4, 6, 0);
+
+  bind(lbl2);
+
+  bcctr( 4, 6, 0);
+  bcctrl(4, 6, 0);
+
+  blt(CCR0, lbl2);
+  bgt(CCR1, lbl2);
+  beq(CCR2, lbl2);
+  bso(CCR3, lbl2);
+  bge(CCR4, lbl2);
+  ble(CCR5, lbl2);
+  bne(CCR6, lbl2);
+  bns(CCR7, lbl2);
+
+  bltl(CCR0, lbl2);
+  bgtl(CCR1, lbl2);
+  beql(CCR2, lbl2);
+  bsol(CCR3, lbl2);
+  bgel(CCR4, lbl2);
+  blel(CCR5, lbl2);
+  bnel(CCR6, lbl2);
+  bnsl(CCR7, lbl2);
+  blr();
+
+  sync();
+  icbi( R1, R2);
+  dcbst(R2, R3);
+
+  // FLOATING POINT instructions ppc.
+  // PPC 1, section 4.6.2 Floating-Point Load Instructions
+  lfs( F1, -11, R3);
+  lfsu(F2, 123, R4);
+  lfsx(F3, R5,  R6);
+  lfd( F4, 456, R7);
+  lfdu(F5, 789, R8);
+  lfdx(F6, R10, R11);
+
+  // PPC 1, section 4.6.3 Floating-Point Store Instructions
+  stfs(  F7,  876, R12);
+  stfsu( F8,  543, R13);
+  stfsx( F9,  R14, R15);
+  stfd(  F10, 210, R16);
+  stfdu( F11, 111, R17);
+  stfdx( F12, R18, R19);
+
+  // PPC 1, section 4.6.4 Floating-Point Move Instructions
+  fmr(   F13, F14);
+  fmr_(  F14, F15);
+  fneg(  F16, F17);
+  fneg_( F18, F19);
+  fabs(  F20, F21);
+  fabs_( F22, F23);
+  fnabs( F24, F25);
+  fnabs_(F26, F27);
+
+  // PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic
+  // Instructions
+  fadd(  F28, F29, F30);
+  fadd_( F31, F0,  F1);
+  fadds( F2,  F3,  F4);
+  fadds_(F5,  F6,  F7);
+  fsub(  F8,  F9,  F10);
+  fsub_( F11, F12, F13);
+  fsubs( F14, F15, F16);
+  fsubs_(F17, F18, F19);
+  fmul(  F20, F21, F22);
+  fmul_( F23, F24, F25);
+  fmuls( F26, F27, F28);
+  fmuls_(F29, F30, F31);
+  fdiv(  F0,  F1,  F2);
+  fdiv_( F3,  F4,  F5);
+  fdivs( F6,  F7,  F8);
+  fdivs_(F9,  F10, F11);
+
+  // PPC 1, section 4.6.6 Floating-Point Rounding and Conversion
+  // Instructions
+  frsp(  F12, F13);
+  fctid( F14, F15);
+  fctidz(F16, F17);
+  fctiw( F18, F19);
+  fctiwz(F20, F21);
+  fcfid( F22, F23);
+
+  // PPC 1, section 4.6.7 Floating-Point Compare Instructions
+  fcmpu( CCR7, F24, F25);
+
+  tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", code()->insts_begin(), code()->insts_end());
+  code()->decode();
+}
+#endif // !PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/assembler_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,1973 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_ASSEMBLER_PPC_HPP
+#define CPU_PPC_VM_ASSEMBLER_PPC_HPP
+
+#include "asm/register.hpp"
+
+// Address is an abstraction used to represent a memory location
+// as used in assembler instructions.
+// PPC instructions grok either baseReg + indexReg or baseReg + disp.
+// So far we do not use this as simplification by this class is low
+// on PPC with its simple addressing mode. Use RegisterOrConstant to
+// represent an offset.
+class Address VALUE_OBJ_CLASS_SPEC {
+};
+
+class AddressLiteral VALUE_OBJ_CLASS_SPEC {
+ private:
+  address          _address;
+  RelocationHolder _rspec;
+
+  RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
+    switch (rtype) {
+    case relocInfo::external_word_type:
+      return external_word_Relocation::spec(addr);
+    case relocInfo::internal_word_type:
+      return internal_word_Relocation::spec(addr);
+    case relocInfo::opt_virtual_call_type:
+      return opt_virtual_call_Relocation::spec();
+    case relocInfo::static_call_type:
+      return static_call_Relocation::spec();
+    case relocInfo::runtime_call_type:
+      return runtime_call_Relocation::spec();
+    case relocInfo::none:
+      return RelocationHolder();
+    default:
+      ShouldNotReachHere();
+      return RelocationHolder();
+    }
+  }
+
+ protected:
+  // creation
+  AddressLiteral() : _address(NULL), _rspec(NULL) {}
+
+ public:
+  AddressLiteral(address addr, RelocationHolder const& rspec)
+    : _address(addr),
+      _rspec(rspec) {}
+
+  AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
+    : _address((address) addr),
+      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+  AddressLiteral(oop* addr, relocInfo::relocType rtype = relocInfo::none)
+    : _address((address) addr),
+      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
+
+  intptr_t value() const { return (intptr_t) _address; }
+
+  const RelocationHolder& rspec() const { return _rspec; }
+};
+
+// Argument is an abstraction used to represent an outgoing
+// actual argument or an incoming formal parameter, whether
+// it resides in memory or in a register, in a manner consistent
+// with the PPC Application Binary Interface, or ABI. This is
+// often referred to as the native or C calling convention.
+
+class Argument VALUE_OBJ_CLASS_SPEC {
+ private:
+  int _number;  // The number of the argument.
+ public:
+  enum {
+    // Only 8 registers may contain integer parameters.
+    n_register_parameters = 8,
+    // Can have up to 8 floating registers.
+    n_float_register_parameters = 8,
+
+    // PPC C calling conventions.
+    // The first eight arguments are passed in int regs if they are int.
+    n_int_register_parameters_c = 8,
+    // The first thirteen float arguments are passed in float regs.
+    n_float_register_parameters_c = 13,
+    // Only the first 8 parameters are not placed on the stack. Aix disassembly
+    // shows that xlC places all float args after argument 8 on the stack AND
+    // in a register. This is not documented, but we follow this convention, too.
+    n_regs_not_on_stack_c = 8,
+  };
+  // creation
+  Argument(int number) : _number(number) {}
+
+  int  number() const { return _number; }
+
+  // Locating register-based arguments:
+  bool is_register() const { return _number < n_register_parameters; }
+
+  Register as_register() const {
+    assert(is_register(), "must be a register argument");
+    return as_Register(number() + R3_ARG1->encoding());
+  }
+};
+
+// A ppc64 function descriptor.
+struct FunctionDescriptor VALUE_OBJ_CLASS_SPEC {
+ private:
+  address _entry;
+  address _toc;
+  address _env;
+
+ public:
+  inline address entry() const { return _entry; }
+  inline address toc()   const { return _toc; }
+  inline address env()   const { return _env; }
+
+  inline void set_entry(address entry) { _entry = entry; }
+  inline void set_toc(  address toc)   { _toc   = toc; }
+  inline void set_env(  address env)   { _env   = env; }
+
+  inline static ByteSize entry_offset() { return byte_offset_of(FunctionDescriptor, _entry); }
+  inline static ByteSize toc_offset()   { return byte_offset_of(FunctionDescriptor, _toc); }
+  inline static ByteSize env_offset()   { return byte_offset_of(FunctionDescriptor, _env); }
+
+  // Friend functions can be called without loading toc and env.
+  enum {
+    friend_toc = 0xcafe,
+    friend_env = 0xc0de
+  };
+
+  inline bool is_friend_function() const {
+    return (toc() == (address) friend_toc) && (env() == (address) friend_env);
+  }
+
+  // Constructor for stack-allocated instances.
+  FunctionDescriptor() {
+    _entry = (address) 0xbad;
+    _toc   = (address) 0xbad;
+    _env   = (address) 0xbad;
+  }
+};
+
+class Assembler : public AbstractAssembler {
+ protected:
+  // Displacement routines
+  static void print_instruction(int inst);
+  static int  patched_branch(int dest_pos, int inst, int inst_pos);
+  static int  branch_destination(int inst, int pos);
+
+  friend class AbstractAssembler;
+
+  // Code patchers need various routines like inv_wdisp()
+  friend class NativeInstruction;
+  friend class NativeGeneralJump;
+  friend class Relocation;
+
+ public:
+
+  enum shifts {
+    XO_21_29_SHIFT = 2,
+    XO_21_30_SHIFT = 1,
+    XO_27_29_SHIFT = 2,
+    XO_30_31_SHIFT = 0,
+    SPR_5_9_SHIFT  = 11u, // SPR_5_9 field in bits 11 -- 15
+    SPR_0_4_SHIFT  = 16u, // SPR_0_4 field in bits 16 -- 20
+    RS_SHIFT       = 21u, // RS field in bits 21 -- 25
+    OPCODE_SHIFT   = 26u, // opcode in bits 26 -- 31
+  };
+
+  enum opcdxos_masks {
+    XL_FORM_OPCODE_MASK = (63u << OPCODE_SHIFT) | (1023u << 1),
+    ADDI_OPCODE_MASK    = (63u << OPCODE_SHIFT),
+    ADDIS_OPCODE_MASK   = (63u << OPCODE_SHIFT),
+    BXX_OPCODE_MASK     = (63u << OPCODE_SHIFT),
+    BCXX_OPCODE_MASK    = (63u << OPCODE_SHIFT),
+    // trap instructions
+    TDI_OPCODE_MASK     = (63u << OPCODE_SHIFT),
+    TWI_OPCODE_MASK     = (63u << OPCODE_SHIFT),
+    TD_OPCODE_MASK      = (63u << OPCODE_SHIFT) | (1023u << 1),
+    TW_OPCODE_MASK      = (63u << OPCODE_SHIFT) | (1023u << 1),
+    LD_OPCODE_MASK      = (63u << OPCODE_SHIFT) | (3u << XO_30_31_SHIFT), // DS-FORM
+    STD_OPCODE_MASK     = LD_OPCODE_MASK,
+    STDU_OPCODE_MASK    = STD_OPCODE_MASK,
+    STDX_OPCODE_MASK    = (63u << OPCODE_SHIFT) | (1023u << 1),
+    STDUX_OPCODE_MASK   = STDX_OPCODE_MASK,
+    STW_OPCODE_MASK     = (63u << OPCODE_SHIFT),
+    STWU_OPCODE_MASK    = STW_OPCODE_MASK,
+    STWX_OPCODE_MASK    = (63u << OPCODE_SHIFT) | (1023u << 1),
+    STWUX_OPCODE_MASK   = STWX_OPCODE_MASK,
+    MTCTR_OPCODE_MASK   = ~(31u << RS_SHIFT),
+    ORI_OPCODE_MASK     = (63u << OPCODE_SHIFT),
+    ORIS_OPCODE_MASK    = (63u << OPCODE_SHIFT),
+    RLDICR_OPCODE_MASK  = (63u << OPCODE_SHIFT) | (7u << XO_27_29_SHIFT)
+  };
+
+  enum opcdxos {
+    ADD_OPCODE    = (31u << OPCODE_SHIFT | 266u << 1),
+    ADDC_OPCODE   = (31u << OPCODE_SHIFT |  10u << 1),
+    ADDI_OPCODE   = (14u << OPCODE_SHIFT),
+    ADDIS_OPCODE  = (15u << OPCODE_SHIFT),
+    ADDIC__OPCODE = (13u << OPCODE_SHIFT),
+    ADDE_OPCODE   = (31u << OPCODE_SHIFT | 138u << 1),
+    SUBF_OPCODE   = (31u << OPCODE_SHIFT |  40u << 1),
+    SUBFC_OPCODE  = (31u << OPCODE_SHIFT |   8u << 1),
+    SUBFE_OPCODE  = (31u << OPCODE_SHIFT | 136u << 1),
+    SUBFIC_OPCODE = (8u  << OPCODE_SHIFT),
+    SUBFZE_OPCODE = (31u << OPCODE_SHIFT | 200u << 1),
+    DIVW_OPCODE   = (31u << OPCODE_SHIFT | 491u << 1),
+    MULLW_OPCODE  = (31u << OPCODE_SHIFT | 235u << 1),
+    MULHW_OPCODE  = (31u << OPCODE_SHIFT |  75u << 1),
+    MULHWU_OPCODE = (31u << OPCODE_SHIFT |  11u << 1),
+    MULLI_OPCODE  = (7u  << OPCODE_SHIFT),
+    AND_OPCODE    = (31u << OPCODE_SHIFT |  28u << 1),
+    ANDI_OPCODE   = (28u << OPCODE_SHIFT),
+    ANDIS_OPCODE  = (29u << OPCODE_SHIFT),
+    ANDC_OPCODE   = (31u << OPCODE_SHIFT |  60u << 1),
+    ORC_OPCODE    = (31u << OPCODE_SHIFT | 412u << 1),
+    OR_OPCODE     = (31u << OPCODE_SHIFT | 444u << 1),
+    ORI_OPCODE    = (24u << OPCODE_SHIFT),
+    ORIS_OPCODE   = (25u << OPCODE_SHIFT),
+    XOR_OPCODE    = (31u << OPCODE_SHIFT | 316u << 1),
+    XORI_OPCODE   = (26u << OPCODE_SHIFT),
+    XORIS_OPCODE  = (27u << OPCODE_SHIFT),
+
+    NEG_OPCODE    = (31u << OPCODE_SHIFT | 104u << 1),
+
+    RLWINM_OPCODE = (21u << OPCODE_SHIFT),
+    CLRRWI_OPCODE = RLWINM_OPCODE,
+    CLRLWI_OPCODE = RLWINM_OPCODE,
+
+    RLWIMI_OPCODE = (20u << OPCODE_SHIFT),
+
+    SLW_OPCODE    = (31u << OPCODE_SHIFT |  24u << 1),
+    SLWI_OPCODE   = RLWINM_OPCODE,
+    SRW_OPCODE    = (31u << OPCODE_SHIFT | 536u << 1),
+    SRWI_OPCODE   = RLWINM_OPCODE,
+    SRAW_OPCODE   = (31u << OPCODE_SHIFT | 792u << 1),
+    SRAWI_OPCODE  = (31u << OPCODE_SHIFT | 824u << 1),
+
+    CMP_OPCODE    = (31u << OPCODE_SHIFT |   0u << 1),
+    CMPI_OPCODE   = (11u << OPCODE_SHIFT),
+    CMPL_OPCODE   = (31u << OPCODE_SHIFT |  32u << 1),
+    CMPLI_OPCODE  = (10u << OPCODE_SHIFT),
+
+    ISEL_OPCODE   = (31u << OPCODE_SHIFT |  15u << 1),
+
+    MTLR_OPCODE   = (31u << OPCODE_SHIFT | 467u << 1 | 8 << SPR_0_4_SHIFT),
+    MFLR_OPCODE   = (31u << OPCODE_SHIFT | 339u << 1 | 8 << SPR_0_4_SHIFT),
+
+    MTCRF_OPCODE  = (31u << OPCODE_SHIFT | 144u << 1),
+    MFCR_OPCODE   = (31u << OPCODE_SHIFT | 19u << 1),
+    MCRF_OPCODE   = (19u << OPCODE_SHIFT | 0u << 1),
+
+    // condition register logic instructions
+    CRAND_OPCODE  = (19u << OPCODE_SHIFT | 257u << 1),
+    CRNAND_OPCODE = (19u << OPCODE_SHIFT | 225u << 1),
+    CROR_OPCODE   = (19u << OPCODE_SHIFT | 449u << 1),
+    CRXOR_OPCODE  = (19u << OPCODE_SHIFT | 193u << 1),
+    CRNOR_OPCODE  = (19u << OPCODE_SHIFT |  33u << 1),
+    CREQV_OPCODE  = (19u << OPCODE_SHIFT | 289u << 1),
+    CRANDC_OPCODE = (19u << OPCODE_SHIFT | 129u << 1),
+    CRORC_OPCODE  = (19u << OPCODE_SHIFT | 417u << 1),
+
+    BCLR_OPCODE   = (19u << OPCODE_SHIFT | 16u << 1),
+    BXX_OPCODE      = (18u << OPCODE_SHIFT),
+    BCXX_OPCODE     = (16u << OPCODE_SHIFT),
+
+    // CTR-related opcodes
+    BCCTR_OPCODE  = (19u << OPCODE_SHIFT | 528u << 1),
+    MTCTR_OPCODE  = (31u << OPCODE_SHIFT | 467u << 1 | 9 << SPR_0_4_SHIFT),
+    MFCTR_OPCODE  = (31u << OPCODE_SHIFT | 339u << 1 | 9 << SPR_0_4_SHIFT),
+
+
+    LWZ_OPCODE   = (32u << OPCODE_SHIFT),
+    LWZX_OPCODE  = (31u << OPCODE_SHIFT |  23u << 1),
+    LWZU_OPCODE  = (33u << OPCODE_SHIFT),
+
+    LHA_OPCODE   = (42u << OPCODE_SHIFT),
+    LHAX_OPCODE  = (31u << OPCODE_SHIFT | 343u << 1),
+    LHAU_OPCODE  = (43u << OPCODE_SHIFT),
+
+    LHZ_OPCODE   = (40u << OPCODE_SHIFT),
+    LHZX_OPCODE  = (31u << OPCODE_SHIFT | 279u << 1),
+    LHZU_OPCODE  = (41u << OPCODE_SHIFT),
+
+    LBZ_OPCODE   = (34u << OPCODE_SHIFT),
+    LBZX_OPCODE  = (31u << OPCODE_SHIFT |  87u << 1),
+    LBZU_OPCODE  = (35u << OPCODE_SHIFT),
+
+    STW_OPCODE   = (36u << OPCODE_SHIFT),
+    STWX_OPCODE  = (31u << OPCODE_SHIFT | 151u << 1),
+    STWU_OPCODE  = (37u << OPCODE_SHIFT),
+    STWUX_OPCODE = (31u << OPCODE_SHIFT | 183u << 1),
+
+    STH_OPCODE   = (44u << OPCODE_SHIFT),
+    STHX_OPCODE  = (31u << OPCODE_SHIFT | 407u << 1),
+    STHU_OPCODE  = (45u << OPCODE_SHIFT),
+
+    STB_OPCODE   = (38u << OPCODE_SHIFT),
+    STBX_OPCODE  = (31u << OPCODE_SHIFT | 215u << 1),
+    STBU_OPCODE  = (39u << OPCODE_SHIFT),
+
+    EXTSB_OPCODE = (31u << OPCODE_SHIFT | 954u << 1),
+    EXTSH_OPCODE = (31u << OPCODE_SHIFT | 922u << 1),
+    EXTSW_OPCODE = (31u << OPCODE_SHIFT | 986u << 1),               // X-FORM
+
+    // 32 bit opcode encodings
+
+    LWA_OPCODE    = (58u << OPCODE_SHIFT |   2u << XO_30_31_SHIFT), // DS-FORM
+    LWAX_OPCODE   = (31u << OPCODE_SHIFT | 341u << XO_21_30_SHIFT), // X-FORM
+
+    CNTLZW_OPCODE = (31u << OPCODE_SHIFT |  26u << XO_21_30_SHIFT), // X-FORM
+
+    // 64 bit opcode encodings
+
+    LD_OPCODE     = (58u << OPCODE_SHIFT |   0u << XO_30_31_SHIFT), // DS-FORM
+    LDU_OPCODE    = (58u << OPCODE_SHIFT |   1u << XO_30_31_SHIFT), // DS-FORM
+    LDX_OPCODE    = (31u << OPCODE_SHIFT |  21u << XO_21_30_SHIFT), // X-FORM
+
+    STD_OPCODE    = (62u << OPCODE_SHIFT |   0u << XO_30_31_SHIFT), // DS-FORM
+    STDU_OPCODE   = (62u << OPCODE_SHIFT |   1u << XO_30_31_SHIFT), // DS-FORM
+    STDUX_OPCODE  = (31u << OPCODE_SHIFT | 181u << 1),                  // X-FORM
+    STDX_OPCODE   = (31u << OPCODE_SHIFT | 149u << XO_21_30_SHIFT), // X-FORM
+
+    RLDICR_OPCODE = (30u << OPCODE_SHIFT |   1u << XO_27_29_SHIFT), // MD-FORM
+    RLDICL_OPCODE = (30u << OPCODE_SHIFT |   0u << XO_27_29_SHIFT), // MD-FORM
+    RLDIC_OPCODE  = (30u << OPCODE_SHIFT |   2u << XO_27_29_SHIFT), // MD-FORM
+    RLDIMI_OPCODE = (30u << OPCODE_SHIFT |   3u << XO_27_29_SHIFT), // MD-FORM
+
+    SRADI_OPCODE  = (31u << OPCODE_SHIFT | 413u << XO_21_29_SHIFT), // XS-FORM
+
+    SLD_OPCODE    = (31u << OPCODE_SHIFT |  27u << 1),              // X-FORM
+    SRD_OPCODE    = (31u << OPCODE_SHIFT | 539u << 1),              // X-FORM
+    SRAD_OPCODE   = (31u << OPCODE_SHIFT | 794u << 1),              // X-FORM
+
+    MULLD_OPCODE  = (31u << OPCODE_SHIFT | 233u << 1),              // XO-FORM
+    MULHD_OPCODE  = (31u << OPCODE_SHIFT |  73u << 1),              // XO-FORM
+    MULHDU_OPCODE = (31u << OPCODE_SHIFT |   9u << 1),              // XO-FORM
+    DIVD_OPCODE   = (31u << OPCODE_SHIFT | 489u << 1),              // XO-FORM
+
+    CNTLZD_OPCODE = (31u << OPCODE_SHIFT |  58u << XO_21_30_SHIFT), // X-FORM
+    NAND_OPCODE   = (31u << OPCODE_SHIFT | 476u << XO_21_30_SHIFT), // X-FORM
+    NOR_OPCODE    = (31u << OPCODE_SHIFT | 124u << XO_21_30_SHIFT), // X-FORM
+
+
+    // opcodes only used for floating arithmetic
+    FADD_OPCODE   = (63u << OPCODE_SHIFT |  21u << 1),
+    FADDS_OPCODE  = (59u << OPCODE_SHIFT |  21u << 1),
+    FCMPU_OPCODE  = (63u << OPCODE_SHIFT |  00u << 1),
+    FDIV_OPCODE   = (63u << OPCODE_SHIFT |  18u << 1),
+    FDIVS_OPCODE  = (59u << OPCODE_SHIFT |  18u << 1),
+    FMR_OPCODE    = (63u << OPCODE_SHIFT |  72u << 1),
+    // These are special Power6 opcodes, reused for "lfdepx" and "stfdepx"
+    // on Power7.  Do not use.
+    // MFFGPR_OPCODE  = (31u << OPCODE_SHIFT | 607u << 1),
+    // MFTGPR_OPCODE  = (31u << OPCODE_SHIFT | 735u << 1),
+    CMPB_OPCODE    = (31u << OPCODE_SHIFT |  508  << 1),
+    POPCNTB_OPCODE = (31u << OPCODE_SHIFT |  122  << 1),
+    POPCNTW_OPCODE = (31u << OPCODE_SHIFT |  378  << 1),
+    POPCNTD_OPCODE = (31u << OPCODE_SHIFT |  506  << 1),
+    FABS_OPCODE    = (63u << OPCODE_SHIFT |  264u << 1),
+    FNABS_OPCODE   = (63u << OPCODE_SHIFT |  136u << 1),
+    FMUL_OPCODE    = (63u << OPCODE_SHIFT |   25u << 1),
+    FMULS_OPCODE   = (59u << OPCODE_SHIFT |   25u << 1),
+    FNEG_OPCODE    = (63u << OPCODE_SHIFT |   40u << 1),
+    FSUB_OPCODE    = (63u << OPCODE_SHIFT |   20u << 1),
+    FSUBS_OPCODE   = (59u << OPCODE_SHIFT |   20u << 1),
+
+    // PPC64-internal FPU conversion opcodes
+    FCFID_OPCODE   = (63u << OPCODE_SHIFT |  846u << 1),
+    FCFIDS_OPCODE  = (59u << OPCODE_SHIFT |  846u << 1),
+    FCTID_OPCODE   = (63u << OPCODE_SHIFT |  814u << 1),
+    FCTIDZ_OPCODE  = (63u << OPCODE_SHIFT |  815u << 1),
+    FCTIW_OPCODE   = (63u << OPCODE_SHIFT |   14u << 1),
+    FCTIWZ_OPCODE  = (63u << OPCODE_SHIFT |   15u << 1),
+    FRSP_OPCODE    = (63u << OPCODE_SHIFT |   12u << 1),
+
+    // WARNING: using fmadd results in a non-compliant vm. Some floating
+    // point tck tests will fail.
+    FMADD_OPCODE   = (59u << OPCODE_SHIFT |   29u << 1),
+    DMADD_OPCODE   = (63u << OPCODE_SHIFT |   29u << 1),
+    FMSUB_OPCODE   = (59u << OPCODE_SHIFT |   28u << 1),
+    DMSUB_OPCODE   = (63u << OPCODE_SHIFT |   28u << 1),
+    FNMADD_OPCODE  = (59u << OPCODE_SHIFT |   31u << 1),
+    DNMADD_OPCODE  = (63u << OPCODE_SHIFT |   31u << 1),
+    FNMSUB_OPCODE  = (59u << OPCODE_SHIFT |   30u << 1),
+    DNMSUB_OPCODE  = (63u << OPCODE_SHIFT |   30u << 1),
+
+    LFD_OPCODE     = (50u << OPCODE_SHIFT |   00u << 1),
+    LFDU_OPCODE    = (51u << OPCODE_SHIFT |   00u << 1),
+    LFDX_OPCODE    = (31u << OPCODE_SHIFT |  599u << 1),
+    LFS_OPCODE     = (48u << OPCODE_SHIFT |   00u << 1),
+    LFSU_OPCODE    = (49u << OPCODE_SHIFT |   00u << 1),
+    LFSX_OPCODE    = (31u << OPCODE_SHIFT |  535u << 1),
+
+    STFD_OPCODE    = (54u << OPCODE_SHIFT |   00u << 1),
+    STFDU_OPCODE   = (55u << OPCODE_SHIFT |   00u << 1),
+    STFDX_OPCODE   = (31u << OPCODE_SHIFT |  727u << 1),
+    STFS_OPCODE    = (52u << OPCODE_SHIFT |   00u << 1),
+    STFSU_OPCODE   = (53u << OPCODE_SHIFT |   00u << 1),
+    STFSX_OPCODE   = (31u << OPCODE_SHIFT |  663u << 1),
+
+    FSQRT_OPCODE   = (63u << OPCODE_SHIFT |   22u << 1),            // A-FORM
+    FSQRTS_OPCODE  = (59u << OPCODE_SHIFT |   22u << 1),            // A-FORM
+
+    // Vector instruction support for >= Power6
+    // Vector Storage Access
+    LVEBX_OPCODE   = (31u << OPCODE_SHIFT |    7u << 1),
+    LVEHX_OPCODE   = (31u << OPCODE_SHIFT |   39u << 1),
+    LVEWX_OPCODE   = (31u << OPCODE_SHIFT |   71u << 1),
+    LVX_OPCODE     = (31u << OPCODE_SHIFT |  103u << 1),
+    LVXL_OPCODE    = (31u << OPCODE_SHIFT |  359u << 1),
+    STVEBX_OPCODE  = (31u << OPCODE_SHIFT |  135u << 1),
+    STVEHX_OPCODE  = (31u << OPCODE_SHIFT |  167u << 1),
+    STVEWX_OPCODE  = (31u << OPCODE_SHIFT |  199u << 1),
+    STVX_OPCODE    = (31u << OPCODE_SHIFT |  231u << 1),
+    STVXL_OPCODE   = (31u << OPCODE_SHIFT |  487u << 1),
+    LVSL_OPCODE    = (31u << OPCODE_SHIFT |    6u << 1),
+    LVSR_OPCODE    = (31u << OPCODE_SHIFT |   38u << 1),
+
+    // Vector Permute and Formatting
+    VPKPX_OPCODE   = (4u  << OPCODE_SHIFT |  782u     ),
+    VPKSHSS_OPCODE = (4u  << OPCODE_SHIFT |  398u     ),
+    VPKSWSS_OPCODE = (4u  << OPCODE_SHIFT |  462u     ),
+    VPKSHUS_OPCODE = (4u  << OPCODE_SHIFT |  270u     ),
+    VPKSWUS_OPCODE = (4u  << OPCODE_SHIFT |  334u     ),
+    VPKUHUM_OPCODE = (4u  << OPCODE_SHIFT |   14u     ),
+    VPKUWUM_OPCODE = (4u  << OPCODE_SHIFT |   78u     ),
+    VPKUHUS_OPCODE = (4u  << OPCODE_SHIFT |  142u     ),
+    VPKUWUS_OPCODE = (4u  << OPCODE_SHIFT |  206u     ),
+    VUPKHPX_OPCODE = (4u  << OPCODE_SHIFT |  846u     ),
+    VUPKHSB_OPCODE = (4u  << OPCODE_SHIFT |  526u     ),
+    VUPKHSH_OPCODE = (4u  << OPCODE_SHIFT |  590u     ),
+    VUPKLPX_OPCODE = (4u  << OPCODE_SHIFT |  974u     ),
+    VUPKLSB_OPCODE = (4u  << OPCODE_SHIFT |  654u     ),
+    VUPKLSH_OPCODE = (4u  << OPCODE_SHIFT |  718u     ),
+
+    VMRGHB_OPCODE  = (4u  << OPCODE_SHIFT |   12u     ),
+    VMRGHW_OPCODE  = (4u  << OPCODE_SHIFT |  140u     ),
+    VMRGHH_OPCODE  = (4u  << OPCODE_SHIFT |   76u     ),
+    VMRGLB_OPCODE  = (4u  << OPCODE_SHIFT |  268u     ),
+    VMRGLW_OPCODE  = (4u  << OPCODE_SHIFT |  396u     ),
+    VMRGLH_OPCODE  = (4u  << OPCODE_SHIFT |  332u     ),
+
+    VSPLT_OPCODE   = (4u  << OPCODE_SHIFT |  524u     ),
+    VSPLTH_OPCODE  = (4u  << OPCODE_SHIFT |  588u     ),
+    VSPLTW_OPCODE  = (4u  << OPCODE_SHIFT |  652u     ),
+    VSPLTISB_OPCODE= (4u  << OPCODE_SHIFT |  780u     ),
+    VSPLTISH_OPCODE= (4u  << OPCODE_SHIFT |  844u     ),
+    VSPLTISW_OPCODE= (4u  << OPCODE_SHIFT |  908u     ),
+
+    VPERM_OPCODE   = (4u  << OPCODE_SHIFT |   43u     ),
+    VSEL_OPCODE    = (4u  << OPCODE_SHIFT |   42u     ),
+
+    VSL_OPCODE     = (4u  << OPCODE_SHIFT |  452u     ),
+    VSLDOI_OPCODE  = (4u  << OPCODE_SHIFT |   44u     ),
+    VSLO_OPCODE    = (4u  << OPCODE_SHIFT | 1036u     ),
+    VSR_OPCODE     = (4u  << OPCODE_SHIFT |  708u     ),
+    VSRO_OPCODE    = (4u  << OPCODE_SHIFT | 1100u     ),
+
+    // Vector Integer
+    VADDCUW_OPCODE = (4u  << OPCODE_SHIFT |  384u     ),
+    VADDSHS_OPCODE = (4u  << OPCODE_SHIFT |  832u     ),
+    VADDSBS_OPCODE = (4u  << OPCODE_SHIFT |  768u     ),
+    VADDSWS_OPCODE = (4u  << OPCODE_SHIFT |  896u     ),
+    VADDUBM_OPCODE = (4u  << OPCODE_SHIFT |    0u     ),
+    VADDUWM_OPCODE = (4u  << OPCODE_SHIFT |  128u     ),
+    VADDUHM_OPCODE = (4u  << OPCODE_SHIFT |   64u     ),
+    VADDUBS_OPCODE = (4u  << OPCODE_SHIFT |  512u     ),
+    VADDUWS_OPCODE = (4u  << OPCODE_SHIFT |  640u     ),
+    VADDUHS_OPCODE = (4u  << OPCODE_SHIFT |  576u     ),
+    VSUBCUW_OPCODE = (4u  << OPCODE_SHIFT | 1408u     ),
+    VSUBSHS_OPCODE = (4u  << OPCODE_SHIFT | 1856u     ),
+    VSUBSBS_OPCODE = (4u  << OPCODE_SHIFT | 1792u     ),
+    VSUBSWS_OPCODE = (4u  << OPCODE_SHIFT | 1920u     ),
+    VSUBUBM_OPCODE = (4u  << OPCODE_SHIFT | 1024u     ),
+    VSUBUWM_OPCODE = (4u  << OPCODE_SHIFT | 1152u     ),
+    VSUBUHM_OPCODE = (4u  << OPCODE_SHIFT | 1088u     ),
+    VSUBUBS_OPCODE = (4u  << OPCODE_SHIFT | 1536u     ),
+    VSUBUWS_OPCODE = (4u  << OPCODE_SHIFT | 1664u     ),
+    VSUBUHS_OPCODE = (4u  << OPCODE_SHIFT | 1600u     ),
+
+    VMULESB_OPCODE = (4u  << OPCODE_SHIFT |  776u     ),
+    VMULEUB_OPCODE = (4u  << OPCODE_SHIFT |  520u     ),
+    VMULESH_OPCODE = (4u  << OPCODE_SHIFT |  840u     ),
+    VMULEUH_OPCODE = (4u  << OPCODE_SHIFT |  584u     ),
+    VMULOSB_OPCODE = (4u  << OPCODE_SHIFT |  264u     ),
+    VMULOUB_OPCODE = (4u  << OPCODE_SHIFT |    8u     ),
+    VMULOSH_OPCODE = (4u  << OPCODE_SHIFT |  328u     ),
+    VMULOUH_OPCODE = (4u  << OPCODE_SHIFT |   72u     ),
+    VMHADDSHS_OPCODE=(4u  << OPCODE_SHIFT |   32u     ),
+    VMHRADDSHS_OPCODE=(4u << OPCODE_SHIFT |   33u     ),
+    VMLADDUHM_OPCODE=(4u  << OPCODE_SHIFT |   34u     ),
+    VMSUBUHM_OPCODE= (4u  << OPCODE_SHIFT |   36u     ),
+    VMSUMMBM_OPCODE= (4u  << OPCODE_SHIFT |   37u     ),
+    VMSUMSHM_OPCODE= (4u  << OPCODE_SHIFT |   40u     ),
+    VMSUMSHS_OPCODE= (4u  << OPCODE_SHIFT |   41u     ),
+    VMSUMUHM_OPCODE= (4u  << OPCODE_SHIFT |   38u     ),
+    VMSUMUHS_OPCODE= (4u  << OPCODE_SHIFT |   39u     ),
+
+    VSUMSWS_OPCODE = (4u  << OPCODE_SHIFT | 1928u     ),
+    VSUM2SWS_OPCODE= (4u  << OPCODE_SHIFT | 1672u     ),
+    VSUM4SBS_OPCODE= (4u  << OPCODE_SHIFT | 1800u     ),
+    VSUM4UBS_OPCODE= (4u  << OPCODE_SHIFT | 1544u     ),
+    VSUM4SHS_OPCODE= (4u  << OPCODE_SHIFT | 1608u     ),
+
+    VAVGSB_OPCODE  = (4u  << OPCODE_SHIFT | 1282u     ),
+    VAVGSW_OPCODE  = (4u  << OPCODE_SHIFT | 1410u     ),
+    VAVGSH_OPCODE  = (4u  << OPCODE_SHIFT | 1346u     ),
+    VAVGUB_OPCODE  = (4u  << OPCODE_SHIFT | 1026u     ),
+    VAVGUW_OPCODE  = (4u  << OPCODE_SHIFT | 1154u     ),
+    VAVGUH_OPCODE  = (4u  << OPCODE_SHIFT | 1090u     ),
+
+    VMAXSB_OPCODE  = (4u  << OPCODE_SHIFT |  258u     ),
+    VMAXSW_OPCODE  = (4u  << OPCODE_SHIFT |  386u     ),
+    VMAXSH_OPCODE  = (4u  << OPCODE_SHIFT |  322u     ),
+    VMAXUB_OPCODE  = (4u  << OPCODE_SHIFT |    2u     ),
+    VMAXUW_OPCODE  = (4u  << OPCODE_SHIFT |  130u     ),
+    VMAXUH_OPCODE  = (4u  << OPCODE_SHIFT |   66u     ),
+    VMINSB_OPCODE  = (4u  << OPCODE_SHIFT |  770u     ),
+    VMINSW_OPCODE  = (4u  << OPCODE_SHIFT |  898u     ),
+    VMINSH_OPCODE  = (4u  << OPCODE_SHIFT |  834u     ),
+    VMINUB_OPCODE  = (4u  << OPCODE_SHIFT |  514u     ),
+    VMINUW_OPCODE  = (4u  << OPCODE_SHIFT |  642u     ),
+    VMINUH_OPCODE  = (4u  << OPCODE_SHIFT |  578u     ),
+
+    VCMPEQUB_OPCODE= (4u  << OPCODE_SHIFT |    6u     ),
+    VCMPEQUH_OPCODE= (4u  << OPCODE_SHIFT |   70u     ),
+    VCMPEQUW_OPCODE= (4u  << OPCODE_SHIFT |  134u     ),
+    VCMPGTSH_OPCODE= (4u  << OPCODE_SHIFT |  838u     ),
+    VCMPGTSB_OPCODE= (4u  << OPCODE_SHIFT |  774u     ),
+    VCMPGTSW_OPCODE= (4u  << OPCODE_SHIFT |  902u     ),
+    VCMPGTUB_OPCODE= (4u  << OPCODE_SHIFT |  518u     ),
+    VCMPGTUH_OPCODE= (4u  << OPCODE_SHIFT |  582u     ),
+    VCMPGTUW_OPCODE= (4u  << OPCODE_SHIFT |  646u     ),
+
+    VAND_OPCODE    = (4u  << OPCODE_SHIFT | 1028u     ),
+    VANDC_OPCODE   = (4u  << OPCODE_SHIFT | 1092u     ),
+    VNOR_OPCODE    = (4u  << OPCODE_SHIFT | 1284u     ),
+    VOR_OPCODE     = (4u  << OPCODE_SHIFT | 1156u     ),
+    VXOR_OPCODE    = (4u  << OPCODE_SHIFT | 1220u     ),
+    VRLB_OPCODE    = (4u  << OPCODE_SHIFT |    4u     ),
+    VRLW_OPCODE    = (4u  << OPCODE_SHIFT |  132u     ),
+    VRLH_OPCODE    = (4u  << OPCODE_SHIFT |   68u     ),
+    VSLB_OPCODE    = (4u  << OPCODE_SHIFT |  260u     ),
+    VSKW_OPCODE    = (4u  << OPCODE_SHIFT |  388u     ),
+    VSLH_OPCODE    = (4u  << OPCODE_SHIFT |  324u     ),
+    VSRB_OPCODE    = (4u  << OPCODE_SHIFT |  516u     ),
+    VSRW_OPCODE    = (4u  << OPCODE_SHIFT |  644u     ),
+    VSRH_OPCODE    = (4u  << OPCODE_SHIFT |  580u     ),
+    VSRAB_OPCODE   = (4u  << OPCODE_SHIFT |  772u     ),
+    VSRAW_OPCODE   = (4u  << OPCODE_SHIFT |  900u     ),
+    VSRAH_OPCODE   = (4u  << OPCODE_SHIFT |  836u     ),
+
+    // Vector Floating-Point
+    // not implemented yet
+
+    // Vector Status and Control
+    MTVSCR_OPCODE  = (4u  << OPCODE_SHIFT | 1604u     ),
+    MFVSCR_OPCODE  = (4u  << OPCODE_SHIFT | 1540u     ),
+
+    // Icache and dcache related instructions
+    DCBA_OPCODE    = (31u << OPCODE_SHIFT |  758u << 1),
+    DCBZ_OPCODE    = (31u << OPCODE_SHIFT | 1014u << 1),
+    DCBST_OPCODE   = (31u << OPCODE_SHIFT |   54u << 1),
+    DCBF_OPCODE    = (31u << OPCODE_SHIFT |   86u << 1),
+
+    DCBT_OPCODE    = (31u << OPCODE_SHIFT |  278u << 1),
+    DCBTST_OPCODE  = (31u << OPCODE_SHIFT |  246u << 1),
+    ICBI_OPCODE    = (31u << OPCODE_SHIFT |  982u << 1),
+
+    // Instruction synchronization
+    ISYNC_OPCODE   = (19u << OPCODE_SHIFT |  150u << 1),
+    // Memory barriers
+    SYNC_OPCODE    = (31u << OPCODE_SHIFT |  598u << 1),
+    EIEIO_OPCODE   = (31u << OPCODE_SHIFT |  854u << 1),
+
+    // Trap instructions
+    TDI_OPCODE     = (2u  << OPCODE_SHIFT),
+    TWI_OPCODE     = (3u  << OPCODE_SHIFT),
+    TD_OPCODE      = (31u << OPCODE_SHIFT |   68u << 1),
+    TW_OPCODE      = (31u << OPCODE_SHIFT |    4u << 1),
+
+    // Atomics.
+    LWARX_OPCODE   = (31u << OPCODE_SHIFT |   20u << 1),
+    LDARX_OPCODE   = (31u << OPCODE_SHIFT |   84u << 1),
+    STWCX_OPCODE   = (31u << OPCODE_SHIFT |  150u << 1),
+    STDCX_OPCODE   = (31u << OPCODE_SHIFT |  214u << 1)
+
+  };
+
+  // Trap instructions TO bits
+  enum trap_to_bits {
+    // single bits
+    traptoLessThanSigned      = 1 << 4, // 0, left end
+    traptoGreaterThanSigned   = 1 << 3,
+    traptoEqual               = 1 << 2,
+    traptoLessThanUnsigned    = 1 << 1,
+    traptoGreaterThanUnsigned = 1 << 0, // 4, right end
+
+    // compound ones
+    traptoUnconditional       = (traptoLessThanSigned |
+                                 traptoGreaterThanSigned |
+                                 traptoEqual |
+                                 traptoLessThanUnsigned |
+                                 traptoGreaterThanUnsigned)
+  };
+
+  // Branch hints BH field
+  enum branch_hint_bh {
+    // bclr cases:
+    bhintbhBCLRisReturn            = 0,
+    bhintbhBCLRisNotReturnButSame  = 1,
+    bhintbhBCLRisNotPredictable    = 3,
+
+    // bcctr cases:
+    bhintbhBCCTRisNotReturnButSame = 0,
+    bhintbhBCCTRisNotPredictable   = 3
+  };
+
+  // Branch prediction hints AT field
+  enum branch_hint_at {
+    bhintatNoHint     = 0,  // at=00
+    bhintatIsNotTaken = 2,  // at=10
+    bhintatIsTaken    = 3   // at=11
+  };
+
+  // Branch prediction hints
+  enum branch_hint_concept {
+    // Use the same encoding as branch_hint_at to simply code.
+    bhintNoHint       = bhintatNoHint,
+    bhintIsNotTaken   = bhintatIsNotTaken,
+    bhintIsTaken      = bhintatIsTaken
+  };
+
+  // Used in BO field of branch instruction.
+  enum branch_condition {
+    bcondCRbiIs0      =  4, // bo=001at
+    bcondCRbiIs1      = 12, // bo=011at
+    bcondAlways       = 20  // bo=10100
+  };
+
+  // Branch condition with combined prediction hints.
+  enum branch_condition_with_hint {
+    bcondCRbiIs0_bhintNoHint     = bcondCRbiIs0 | bhintatNoHint,
+    bcondCRbiIs0_bhintIsNotTaken = bcondCRbiIs0 | bhintatIsNotTaken,
+    bcondCRbiIs0_bhintIsTaken    = bcondCRbiIs0 | bhintatIsTaken,
+    bcondCRbiIs1_bhintNoHint     = bcondCRbiIs1 | bhintatNoHint,
+    bcondCRbiIs1_bhintIsNotTaken = bcondCRbiIs1 | bhintatIsNotTaken,
+    bcondCRbiIs1_bhintIsTaken    = bcondCRbiIs1 | bhintatIsTaken,
+  };
+
+  // Elemental Memory Barriers (>=Power 8)
+  enum Elemental_Membar_mask_bits {
+    StoreStore = 1 << 0,
+    StoreLoad  = 1 << 1,
+    LoadStore  = 1 << 2,
+    LoadLoad   = 1 << 3
+  };
+
+  // Branch prediction hints.
+  inline static int add_bhint_to_boint(const int bhint, const int boint) {
+    switch (boint) {
+      case bcondCRbiIs0:
+      case bcondCRbiIs1:
+        // branch_hint and branch_hint_at have same encodings
+        assert(   (int)bhintNoHint     == (int)bhintatNoHint
+               && (int)bhintIsNotTaken == (int)bhintatIsNotTaken
+               && (int)bhintIsTaken    == (int)bhintatIsTaken,
+               "wrong encodings");
+        assert((bhint & 0x03) == bhint, "wrong encodings");
+        return (boint & ~0x03) | bhint;
+      case bcondAlways:
+        // no branch_hint
+        return boint;
+      default:
+        ShouldNotReachHere();
+        return 0;
+    }
+  }
+
+  // Extract bcond from boint.
+  inline static int inv_boint_bcond(const int boint) {
+    int r_bcond = boint & ~0x03;
+    assert(r_bcond == bcondCRbiIs0 ||
+           r_bcond == bcondCRbiIs1 ||
+           r_bcond == bcondAlways,
+           "bad branch condition");
+    return r_bcond;
+  }
+
+  // Extract bhint from boint.
+  inline static int inv_boint_bhint(const int boint) {
+    int r_bhint = boint & 0x03;
+    assert(r_bhint == bhintatNoHint ||
+           r_bhint == bhintatIsNotTaken ||
+           r_bhint == bhintatIsTaken,
+           "bad branch hint");
+    return r_bhint;
+  }
+
+  // Calculate opposite of given bcond.
+  inline static int opposite_bcond(const int bcond) {
+    switch (bcond) {
+      case bcondCRbiIs0:
+        return bcondCRbiIs1;
+      case bcondCRbiIs1:
+        return bcondCRbiIs0;
+      default:
+        ShouldNotReachHere();
+        return 0;
+    }
+  }
+
+  // Calculate opposite of given bhint.
+  inline static int opposite_bhint(const int bhint) {
+    switch (bhint) {
+      case bhintatNoHint:
+        return bhintatNoHint;
+      case bhintatIsNotTaken:
+        return bhintatIsTaken;
+      case bhintatIsTaken:
+        return bhintatIsNotTaken;
+      default:
+        ShouldNotReachHere();
+        return 0;
+    }
+  }
+
+  // PPC branch instructions
+  enum ppcops {
+    b_op    = 18,
+    bc_op   = 16,
+    bcr_op  = 19
+  };
+
+  enum Condition {
+    negative         = 0,
+    less             = 0,
+    positive         = 1,
+    greater          = 1,
+    zero             = 2,
+    equal            = 2,
+    summary_overflow = 3,
+  };
+
+ public:
+  // Helper functions for groups of instructions
+
+  enum Predict { pt = 1, pn = 0 }; // pt = predict taken
+
+  // instruction must start at passed address
+  static int instr_len(unsigned char *instr) { return BytesPerInstWord; }
+
+  // instruction must be left-justified in argument
+  static int instr_len(unsigned long instr)  { return BytesPerInstWord; }
+
+  // longest instructions
+  static int instr_maxlen() { return BytesPerInstWord; }
+
+  // Test if x is within signed immediate range for nbits.
+  static bool is_simm(int x, unsigned int nbits) {
+    assert(0 < nbits && nbits < 32, "out of bounds");
+    const int   min      = -( ((int)1) << nbits-1 );
+    const int   maxplus1 =  ( ((int)1) << nbits-1 );
+    return min <= x && x < maxplus1;
+  }
+
+  static bool is_simm(jlong x, unsigned int nbits) {
+    assert(0 < nbits && nbits < 64, "out of bounds");
+    const jlong min      = -( ((jlong)1) << nbits-1 );
+    const jlong maxplus1 =  ( ((jlong)1) << nbits-1 );
+    return min <= x && x < maxplus1;
+  }
+
+  // Test if x is within unsigned immediate range for nbits
+  static bool is_uimm(int x, unsigned int nbits) {
+    assert(0 < nbits && nbits < 32, "out of bounds");
+    const int   maxplus1 = ( ((int)1) << nbits );
+    return 0 <= x && x < maxplus1;
+  }
+
+  static bool is_uimm(jlong x, unsigned int nbits) {
+    assert(0 < nbits && nbits < 64, "out of bounds");
+    const jlong maxplus1 =  ( ((jlong)1) << nbits );
+    return 0 <= x && x < maxplus1;
+  }
+
+ protected:
+  // helpers
+
+  // X is supposed to fit in a field "nbits" wide
+  // and be sign-extended. Check the range.
+  static void assert_signed_range(intptr_t x, int nbits) {
+    assert(nbits == 32 || (-(1 << nbits-1) <= x && x < (1 << nbits-1)),
+           "value out of range");
+  }
+
+  static void assert_signed_word_disp_range(intptr_t x, int nbits) {
+    assert((x & 3) == 0, "not word aligned");
+    assert_signed_range(x, nbits + 2);
+  }
+
+  static void assert_unsigned_const(int x, int nbits) {
+    assert(juint(x) < juint(1 << nbits), "unsigned constant out of range");
+  }
+
+  static int fmask(juint hi_bit, juint lo_bit) {
+    assert(hi_bit >= lo_bit && hi_bit < 32, "bad bits");
+    return (1 << ( hi_bit-lo_bit + 1 )) - 1;
+  }
+
+  // inverse of u_field
+  static int inv_u_field(int x, int hi_bit, int lo_bit) {
+    juint r = juint(x) >> lo_bit;
+    r &= fmask(hi_bit, lo_bit);
+    return int(r);
+  }
+
+  // signed version: extract from field and sign-extend
+  static int inv_s_field_ppc(int x, int hi_bit, int lo_bit) {
+    x = x << (31-hi_bit);
+    x = x >> (31-hi_bit+lo_bit);
+    return x;
+  }
+
+  static int u_field(int x, int hi_bit, int lo_bit) {
+    assert((x & ~fmask(hi_bit, lo_bit)) == 0, "value out of range");
+    int r = x << lo_bit;
+    assert(inv_u_field(r, hi_bit, lo_bit) == x, "just checking");
+    return r;
+  }
+
+  // Same as u_field for signed values
+  static int s_field(int x, int hi_bit, int lo_bit) {
+    int nbits = hi_bit - lo_bit + 1;
+    assert(nbits == 32 || (-(1 << nbits-1) <= x && x < (1 << nbits-1)),
+      "value out of range");
+    x &= fmask(hi_bit, lo_bit);
+    int r = x << lo_bit;
+    return r;
+  }
+
+  // inv_op for ppc instructions
+  static int inv_op_ppc(int x) { return inv_u_field(x, 31, 26); }
+
+  // Determine target address from li, bd field of branch instruction.
+  static intptr_t inv_li_field(int x) {
+    intptr_t r = inv_s_field_ppc(x, 25, 2);
+    r = (r << 2);
+    return r;
+  }
+  static intptr_t inv_bd_field(int x, intptr_t pos) {
+    intptr_t r = inv_s_field_ppc(x, 15, 2);
+    r = (r << 2) + pos;
+    return r;
+  }
+
+  #define inv_opp_u_field(x, hi_bit, lo_bit) inv_u_field(x, 31-(lo_bit), 31-(hi_bit))
+  #define inv_opp_s_field(x, hi_bit, lo_bit) inv_s_field_ppc(x, 31-(lo_bit), 31-(hi_bit))
+  // Extract instruction fields from instruction words.
+ public:
+  static int inv_ra_field(int x)  { return inv_opp_u_field(x, 15, 11); }
+  static int inv_rb_field(int x)  { return inv_opp_u_field(x, 20, 16); }
+  static int inv_rt_field(int x)  { return inv_opp_u_field(x, 10,  6); }
+  static int inv_rta_field(int x) { return inv_opp_u_field(x, 15, 11); }
+  static int inv_rs_field(int x)  { return inv_opp_u_field(x, 10,  6); }
+  // Ds uses opp_s_field(x, 31, 16), but lowest 2 bits must be 0.
+  // Inv_ds_field uses range (x, 29, 16) but shifts by 2 to ensure that lowest bits are 0.
+  static int inv_ds_field(int x)  { return inv_opp_s_field(x, 29, 16) << 2; }
+  static int inv_d1_field(int x)  { return inv_opp_s_field(x, 31, 16); }
+  static int inv_si_field(int x)  { return inv_opp_s_field(x, 31, 16); }
+  static int inv_to_field(int x)  { return inv_opp_u_field(x, 10, 6);  }
+  static int inv_lk_field(int x)  { return inv_opp_u_field(x, 31, 31); }
+  static int inv_bo_field(int x)  { return inv_opp_u_field(x, 10,  6); }
+  static int inv_bi_field(int x)  { return inv_opp_u_field(x, 15, 11); }
+
+  #define opp_u_field(x, hi_bit, lo_bit) u_field(x, 31-(lo_bit), 31-(hi_bit))
+  #define opp_s_field(x, hi_bit, lo_bit) s_field(x, 31-(lo_bit), 31-(hi_bit))
+
+  // instruction fields
+  static int aa(       int         x)  { return  opp_u_field(x,             30, 30); }
+  static int ba(       int         x)  { return  opp_u_field(x,             15, 11); }
+  static int bb(       int         x)  { return  opp_u_field(x,             20, 16); }
+  static int bc(       int         x)  { return  opp_u_field(x,             25, 21); }
+  static int bd(       int         x)  { return  opp_s_field(x,             29, 16); }
+  static int bf( ConditionRegister cr) { return  bf(cr->encoding()); }
+  static int bf(       int         x)  { return  opp_u_field(x,              8,  6); }
+  static int bfa(ConditionRegister cr) { return  bfa(cr->encoding()); }
+  static int bfa(      int         x)  { return  opp_u_field(x,             13, 11); }
+  static int bh(       int         x)  { return  opp_u_field(x,             20, 19); }
+  static int bi(       int         x)  { return  opp_u_field(x,             15, 11); }
+  static int bi0(ConditionRegister cr, Condition c) { return (cr->encoding() << 2) | c; }
+  static int bo(       int         x)  { return  opp_u_field(x,             10,  6); }
+  static int bt(       int         x)  { return  opp_u_field(x,             10,  6); }
+  static int d1(       int         x)  { return  opp_s_field(x,             31, 16); }
+  static int ds(       int         x)  { assert((x & 0x3) == 0, "unaligned offset"); return opp_s_field(x, 31, 16); }
+  static int eh(       int         x)  { return  opp_u_field(x,             31, 31); }
+  static int flm(      int         x)  { return  opp_u_field(x,             14,  7); }
+  static int fra(    FloatRegister r)  { return  fra(r->encoding());}
+  static int frb(    FloatRegister r)  { return  frb(r->encoding());}
+  static int frc(    FloatRegister r)  { return  frc(r->encoding());}
+  static int frs(    FloatRegister r)  { return  frs(r->encoding());}
+  static int frt(    FloatRegister r)  { return  frt(r->encoding());}
+  static int fra(      int         x)  { return  opp_u_field(x,             15, 11); }
+  static int frb(      int         x)  { return  opp_u_field(x,             20, 16); }
+  static int frc(      int         x)  { return  opp_u_field(x,             25, 21); }
+  static int frs(      int         x)  { return  opp_u_field(x,             10,  6); }
+  static int frt(      int         x)  { return  opp_u_field(x,             10,  6); }
+  static int fxm(      int         x)  { return  opp_u_field(x,             19, 12); }
+  static int l10(      int         x)  { return  opp_u_field(x,             10, 10); }
+  static int l15(      int         x)  { return  opp_u_field(x,             15, 15); }
+  static int l910(     int         x)  { return  opp_u_field(x,             10,  9); }
+  static int e1215(    int         x)  { return  opp_u_field(x,             15, 12); }
+  static int lev(      int         x)  { return  opp_u_field(x,             26, 20); }
+  static int li(       int         x)  { return  opp_s_field(x,             29,  6); }
+  static int lk(       int         x)  { return  opp_u_field(x,             31, 31); }
+  static int mb2125(   int         x)  { return  opp_u_field(x,             25, 21); }
+  static int me2630(   int         x)  { return  opp_u_field(x,             30, 26); }
+  static int mb2126(   int         x)  { return  opp_u_field(((x & 0x1f) << 1) | ((x & 0x20) >> 5), 26, 21); }
+  static int me2126(   int         x)  { return  mb2126(x); }
+  static int nb(       int         x)  { return  opp_u_field(x,             20, 16); }
+  //static int opcd(   int         x)  { return  opp_u_field(x,              5,  0); } // is contained in our opcodes
+  static int oe(       int         x)  { return  opp_u_field(x,             21, 21); }
+  static int ra(       Register    r)  { return  ra(r->encoding()); }
+  static int ra(       int         x)  { return  opp_u_field(x,             15, 11); }
+  static int rb(       Register    r)  { return  rb(r->encoding()); }
+  static int rb(       int         x)  { return  opp_u_field(x,             20, 16); }
+  static int rc(       int         x)  { return  opp_u_field(x,             31, 31); }
+  static int rs(       Register    r)  { return  rs(r->encoding()); }
+  static int rs(       int         x)  { return  opp_u_field(x,             10,  6); }
+  // we don't want to use R0 in memory accesses, because it has value `0' then
+  static int ra0mem(   Register    r)  { assert(r != R0, "cannot use register R0 in memory access"); return ra(r); }
+  static int ra0mem(   int         x)  { assert(x != 0,  "cannot use register 0 in memory access");  return ra(x); }
+
+  // register r is target
+  static int rt(       Register    r)  { return rs(r); }
+  static int rt(       int         x)  { return rs(x); }
+  static int rta(      Register    r)  { return ra(r); }
+  static int rta0mem(  Register    r)  { rta(r); return ra0mem(r); }
+
+  static int sh1620(   int         x)  { return  opp_u_field(x,             20, 16); }
+  static int sh30(     int         x)  { return  opp_u_field(x,             30, 30); }
+  static int sh162030( int         x)  { return  sh1620(x & 0x1f) | sh30((x & 0x20) >> 5); }
+  static int si(       int         x)  { return  opp_s_field(x,             31, 16); }
+  static int spr(      int         x)  { return  opp_u_field(x,             20, 11); }
+  static int sr(       int         x)  { return  opp_u_field(x,             15, 12); }
+  static int tbr(      int         x)  { return  opp_u_field(x,             20, 11); }
+  static int th(       int         x)  { return  opp_u_field(x,             10,  7); }
+  static int thct(     int         x)  { assert((x&8) == 0, "must be valid cache specification");  return th(x); }
+  static int thds(     int         x)  { assert((x&8) == 8, "must be valid stream specification"); return th(x); }
+  static int to(       int         x)  { return  opp_u_field(x,             10,  6); }
+  static int u(        int         x)  { return  opp_u_field(x,             19, 16); }
+  static int ui(       int         x)  { return  opp_u_field(x,             31, 16); }
+
+  // Support vector instructions for >= Power6.
+  static int vra(      int         x)  { return  opp_u_field(x,             15, 11); }
+  static int vrb(      int         x)  { return  opp_u_field(x,             20, 16); }
+  static int vrc(      int         x)  { return  opp_u_field(x,             25, 21); }
+  static int vrs(      int         x)  { return  opp_u_field(x,             10,  6); }
+  static int vrt(      int         x)  { return  opp_u_field(x,             10,  6); }
+
+  static int vra(   VectorRegister r)  { return  vra(r->encoding());}
+  static int vrb(   VectorRegister r)  { return  vrb(r->encoding());}
+  static int vrc(   VectorRegister r)  { return  vrc(r->encoding());}
+  static int vrs(   VectorRegister r)  { return  vrs(r->encoding());}
+  static int vrt(   VectorRegister r)  { return  vrt(r->encoding());}
+
+  static int vsplt_uim( int        x)  { return  opp_u_field(x,             15, 12); } // for vsplt* instructions
+  static int vsplti_sim(int        x)  { return  opp_u_field(x,             15, 11); } // for vsplti* instructions
+  static int vsldoi_shb(int        x)  { return  opp_u_field(x,             25, 22); } // for vsldoi instruction
+  static int vcmp_rc(   int        x)  { return  opp_u_field(x,             21, 21); } // for vcmp* instructions
+
+  //static int xo1(     int        x)  { return  opp_u_field(x,             29, 21); }// is contained in our opcodes
+  //static int xo2(     int        x)  { return  opp_u_field(x,             30, 21); }// is contained in our opcodes
+  //static int xo3(     int        x)  { return  opp_u_field(x,             30, 22); }// is contained in our opcodes
+  //static int xo4(     int        x)  { return  opp_u_field(x,             30, 26); }// is contained in our opcodes
+  //static int xo5(     int        x)  { return  opp_u_field(x,             29, 27); }// is contained in our opcodes
+  //static int xo6(     int        x)  { return  opp_u_field(x,             30, 27); }// is contained in our opcodes
+  //static int xo7(     int        x)  { return  opp_u_field(x,             31, 30); }// is contained in our opcodes
+
+ protected:
+  // Compute relative address for branch.
+  static intptr_t disp(intptr_t x, intptr_t off) {
+    int xx = x - off;
+    xx = xx >> 2;
+    return xx;
+  }
+
+ public:
+  // signed immediate, in low bits, nbits long
+  static int simm(int x, int nbits) {
+    assert_signed_range(x, nbits);
+    return x & ((1 << nbits) - 1);
+  }
+
+  // unsigned immediate, in low bits, nbits long
+  static int uimm(int x, int nbits) {
+    assert_unsigned_const(x, nbits);
+    return x & ((1 << nbits) - 1);
+  }
+
+  static void set_imm(int* instr, short s) {
+    short* p = ((short *)instr) + 1;
+    *p = s;
+  }
+
+  static int get_imm(address a, int instruction_number) {
+    short imm;
+    short *p =((short *)a)+2*instruction_number+1;
+    imm = *p;
+    return (int)imm;
+  }
+
+  static inline int hi16_signed(  int x) { return (int)(int16_t)(x >> 16); }
+  static inline int lo16_unsigned(int x) { return x & 0xffff; }
+
+ protected:
+
+  // Extract the top 32 bits in a 64 bit word.
+  static int32_t hi32(int64_t x) {
+    int32_t r = int32_t((uint64_t)x >> 32);
+    return r;
+  }
+
+ public:
+
+  static inline unsigned int align_addr(unsigned int addr, unsigned int a) {
+    return ((addr + (a - 1)) & ~(a - 1));
+  }
+
+  static inline bool is_aligned(unsigned int addr, unsigned int a) {
+    return (0 == addr % a);
+  }
+
+  void flush() {
+    AbstractAssembler::flush();
+  }
+
+  inline void emit_int32(int);  // shadows AbstractAssembler::emit_int32
+  inline void emit_data(int);
+  inline void emit_data(int, RelocationHolder const&);
+  inline void emit_data(int, relocInfo::relocType rtype);
+
+  // Emit an address.
+  inline address emit_addr(const address addr = NULL);
+
+  // Emit a function descriptor with the specified entry point, TOC,
+  // and ENV. If the entry point is NULL, the descriptor will point
+  // just past the descriptor.
+  // Use values from friend functions as defaults.
+  inline address emit_fd(address entry = NULL,
+                         address toc = (address) FunctionDescriptor::friend_toc,
+                         address env = (address) FunctionDescriptor::friend_env);
+
+  /////////////////////////////////////////////////////////////////////////////////////
+  // PPC instructions
+  /////////////////////////////////////////////////////////////////////////////////////
+
+  // Memory instructions use r0 as hard coded 0, e.g. to simulate loading
+  // immediates. The normal instruction encoders enforce that r0 is not
+  // passed to them. Use either extended mnemonics encoders or the special ra0
+  // versions.
+
+  // Issue an illegal instruction.
+  inline void illtrap();
+  static inline bool is_illtrap(int x);
+
+  // PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
+  inline void addi( Register d, Register a, int si16);
+  inline void addis(Register d, Register a, int si16);
+ private:
+  inline void addi_r0ok( Register d, Register a, int si16);
+  inline void addis_r0ok(Register d, Register a, int si16);
+ public:
+  inline void addic_( Register d, Register a, int si16);
+  inline void subfic( Register d, Register a, int si16);
+  inline void add(    Register d, Register a, Register b);
+  inline void add_(   Register d, Register a, Register b);
+  inline void subf(   Register d, Register a, Register b);  // d = b - a    "Sub_from", as in ppc spec.
+  inline void sub(    Register d, Register a, Register b);  // d = a - b    Swap operands of subf for readability.
+  inline void subf_(  Register d, Register a, Register b);
+  inline void addc(   Register d, Register a, Register b);
+  inline void addc_(  Register d, Register a, Register b);
+  inline void subfc(  Register d, Register a, Register b);
+  inline void subfc_( Register d, Register a, Register b);
+  inline void adde(   Register d, Register a, Register b);
+  inline void adde_(  Register d, Register a, Register b);
+  inline void subfe(  Register d, Register a, Register b);
+  inline void subfe_( Register d, Register a, Register b);
+  inline void neg(    Register d, Register a);
+  inline void neg_(   Register d, Register a);
+  inline void mulli(  Register d, Register a, int si16);
+  inline void mulld(  Register d, Register a, Register b);
+  inline void mulld_( Register d, Register a, Register b);
+  inline void mullw(  Register d, Register a, Register b);
+  inline void mullw_( Register d, Register a, Register b);
+  inline void mulhw(  Register d, Register a, Register b);
+  inline void mulhw_( Register d, Register a, Register b);
+  inline void mulhd(  Register d, Register a, Register b);
+  inline void mulhd_( Register d, Register a, Register b);
+  inline void mulhdu( Register d, Register a, Register b);
+  inline void mulhdu_(Register d, Register a, Register b);
+  inline void divd(   Register d, Register a, Register b);
+  inline void divd_(  Register d, Register a, Register b);
+  inline void divw(   Register d, Register a, Register b);
+  inline void divw_(  Register d, Register a, Register b);
+
+  // extended mnemonics
+  inline void li(   Register d, int si16);
+  inline void lis(  Register d, int si16);
+  inline void addir(Register d, int si16, Register a);
+
+  static bool is_addi(int x) {
+     return ADDI_OPCODE == (x & ADDI_OPCODE_MASK);
+  }
+  static bool is_addis(int x) {
+     return ADDIS_OPCODE == (x & ADDIS_OPCODE_MASK);
+  }
+  static bool is_bxx(int x) {
+     return BXX_OPCODE == (x & BXX_OPCODE_MASK);
+  }
+  static bool is_b(int x) {
+     return BXX_OPCODE == (x & BXX_OPCODE_MASK) && inv_lk_field(x) == 0;
+  }
+  static bool is_bl(int x) {
+     return BXX_OPCODE == (x & BXX_OPCODE_MASK) && inv_lk_field(x) == 1;
+  }
+  static bool is_bcxx(int x) {
+     return BCXX_OPCODE == (x & BCXX_OPCODE_MASK);
+  }
+  static bool is_bxx_or_bcxx(int x) {
+     return is_bxx(x) || is_bcxx(x);
+  }
+  static bool is_bctrl(int x) {
+     return x == 0x4e800421;
+  }
+  static bool is_bctr(int x) {
+     return x == 0x4e800420;
+  }
+  static bool is_bclr(int x) {
+     return BCLR_OPCODE == (x & XL_FORM_OPCODE_MASK);
+  }
+  static bool is_li(int x) {
+     return is_addi(x) && inv_ra_field(x)==0;
+  }
+  static bool is_lis(int x) {
+     return is_addis(x) && inv_ra_field(x)==0;
+  }
+  static bool is_mtctr(int x) {
+     return MTCTR_OPCODE == (x & MTCTR_OPCODE_MASK);
+  }
+  static bool is_ld(int x) {
+     return LD_OPCODE == (x & LD_OPCODE_MASK);
+  }
+  static bool is_std(int x) {
+     return STD_OPCODE == (x & STD_OPCODE_MASK);
+  }
+  static bool is_stdu(int x) {
+     return STDU_OPCODE == (x & STDU_OPCODE_MASK);
+  }
+  static bool is_stdx(int x) {
+     return STDX_OPCODE == (x & STDX_OPCODE_MASK);
+  }
+  static bool is_stdux(int x) {
+     return STDUX_OPCODE == (x & STDUX_OPCODE_MASK);
+  }
+  static bool is_stwx(int x) {
+     return STWX_OPCODE == (x & STWX_OPCODE_MASK);
+  }
+  static bool is_stwux(int x) {
+     return STWUX_OPCODE == (x & STWUX_OPCODE_MASK);
+  }
+  static bool is_stw(int x) {
+     return STW_OPCODE == (x & STW_OPCODE_MASK);
+  }
+  static bool is_stwu(int x) {
+     return STWU_OPCODE == (x & STWU_OPCODE_MASK);
+  }
+  static bool is_ori(int x) {
+     return ORI_OPCODE == (x & ORI_OPCODE_MASK);
+  };
+  static bool is_oris(int x) {
+     return ORIS_OPCODE == (x & ORIS_OPCODE_MASK);
+  };
+  static bool is_rldicr(int x) {
+     return (RLDICR_OPCODE == (x & RLDICR_OPCODE_MASK));
+  };
+  static bool is_nop(int x) {
+    return x == 0x60000000;
+  }
+  // endgroup opcode for Power6
+  static bool is_endgroup(int x) {
+    return is_ori(x) && inv_ra_field(x) == 1 && inv_rs_field(x) == 1 && inv_d1_field(x) == 0;
+  }
+
+
+ private:
+  // PPC 1, section 3.3.9, Fixed-Point Compare Instructions
+  inline void cmpi( ConditionRegister bf, int l, Register a, int si16);
+  inline void cmp(  ConditionRegister bf, int l, Register a, Register b);
+  inline void cmpli(ConditionRegister bf, int l, Register a, int ui16);
+  inline void cmpl( ConditionRegister bf, int l, Register a, Register b);
+
+ public:
+  // extended mnemonics of Compare Instructions
+  inline void cmpwi( ConditionRegister crx, Register a, int si16);
+  inline void cmpdi( ConditionRegister crx, Register a, int si16);
+  inline void cmpw(  ConditionRegister crx, Register a, Register b);
+  inline void cmpd(  ConditionRegister crx, Register a, Register b);
+  inline void cmplwi(ConditionRegister crx, Register a, int ui16);
+  inline void cmpldi(ConditionRegister crx, Register a, int ui16);
+  inline void cmplw( ConditionRegister crx, Register a, Register b);
+  inline void cmpld( ConditionRegister crx, Register a, Register b);
+
+  inline void isel(   Register d, Register a, Register b, int bc);
+  // Convenient version which takes: Condition register, Condition code and invert flag. Omit b to keep old value.
+  inline void isel(   Register d, ConditionRegister cr, Condition cc, bool inv, Register a, Register b = noreg);
+  // Set d = 0 if (cr.cc) equals 1, otherwise b.
+  inline void isel_0( Register d, ConditionRegister cr, Condition cc, Register b = noreg);
+
+  // PPC 1, section 3.3.11, Fixed-Point Logical Instructions
+         void andi(   Register a, Register s, int ui16);   // optimized version
+  inline void andi_(  Register a, Register s, int ui16);
+  inline void andis_( Register a, Register s, int ui16);
+  inline void ori(    Register a, Register s, int ui16);
+  inline void oris(   Register a, Register s, int ui16);
+  inline void xori(   Register a, Register s, int ui16);
+  inline void xoris(  Register a, Register s, int ui16);
+  inline void andr(   Register a, Register s, Register b);  // suffixed by 'r' as 'and' is C++ keyword
+  inline void and_(   Register a, Register s, Register b);
+  // Turn or0(rx,rx,rx) into a nop and avoid that we accidently emit a
+  // SMT-priority change instruction (see SMT instructions below).
+  inline void or_unchecked(Register a, Register s, Register b);
+  inline void orr(    Register a, Register s, Register b);  // suffixed by 'r' as 'or' is C++ keyword
+  inline void or_(    Register a, Register s, Register b);
+  inline void xorr(   Register a, Register s, Register b);  // suffixed by 'r' as 'xor' is C++ keyword
+  inline void xor_(   Register a, Register s, Register b);
+  inline void nand(   Register a, Register s, Register b);
+  inline void nand_(  Register a, Register s, Register b);
+  inline void nor(    Register a, Register s, Register b);
+  inline void nor_(   Register a, Register s, Register b);
+  inline void andc(   Register a, Register s, Register b);
+  inline void andc_(  Register a, Register s, Register b);
+  inline void orc(    Register a, Register s, Register b);
+  inline void orc_(   Register a, Register s, Register b);
+  inline void extsb(  Register a, Register s);
+  inline void extsh(  Register a, Register s);
+  inline void extsw(  Register a, Register s);
+
+  // extended mnemonics
+  inline void nop();
+  // NOP for FP and BR units (different versions to allow them to be in one group)
+  inline void fpnop0();
+  inline void fpnop1();
+  inline void brnop0();
+  inline void brnop1();
+  inline void brnop2();
+
+  inline void mr(      Register d, Register s);
+  inline void ori_opt( Register d, int ui16);
+  inline void oris_opt(Register d, int ui16);
+
+  // endgroup opcode for Power6
+  inline void endgroup();
+
+  // count instructions
+  inline void cntlzw(  Register a, Register s);
+  inline void cntlzw_( Register a, Register s);
+  inline void cntlzd(  Register a, Register s);
+  inline void cntlzd_( Register a, Register s);
+
+  // PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
+  inline void sld(     Register a, Register s, Register b);
+  inline void sld_(    Register a, Register s, Register b);
+  inline void slw(     Register a, Register s, Register b);
+  inline void slw_(    Register a, Register s, Register b);
+  inline void srd(     Register a, Register s, Register b);
+  inline void srd_(    Register a, Register s, Register b);
+  inline void srw(     Register a, Register s, Register b);
+  inline void srw_(    Register a, Register s, Register b);
+  inline void srad(    Register a, Register s, Register b);
+  inline void srad_(   Register a, Register s, Register b);
+  inline void sraw(    Register a, Register s, Register b);
+  inline void sraw_(   Register a, Register s, Register b);
+  inline void sradi(   Register a, Register s, int sh6);
+  inline void sradi_(  Register a, Register s, int sh6);
+  inline void srawi(   Register a, Register s, int sh5);
+  inline void srawi_(  Register a, Register s, int sh5);
+
+  // extended mnemonics for Shift Instructions
+  inline void sldi(    Register a, Register s, int sh6);
+  inline void sldi_(   Register a, Register s, int sh6);
+  inline void slwi(    Register a, Register s, int sh5);
+  inline void slwi_(   Register a, Register s, int sh5);
+  inline void srdi(    Register a, Register s, int sh6);
+  inline void srdi_(   Register a, Register s, int sh6);
+  inline void srwi(    Register a, Register s, int sh5);
+  inline void srwi_(   Register a, Register s, int sh5);
+
+  inline void clrrdi(  Register a, Register s, int ui6);
+  inline void clrrdi_( Register a, Register s, int ui6);
+  inline void clrldi(  Register a, Register s, int ui6);
+  inline void clrldi_( Register a, Register s, int ui6);
+  inline void clrlsldi(Register a, Register s, int clrl6, int shl6);
+  inline void clrlsldi_(Register a, Register s, int clrl6, int shl6);
+  inline void extrdi(  Register a, Register s, int n, int b);
+  // testbit with condition register
+  inline void testbitdi(ConditionRegister cr, Register a, Register s, int ui6);
+
+  // rotate instructions
+  inline void rotldi(  Register a, Register s, int n);
+  inline void rotrdi(  Register a, Register s, int n);
+  inline void rotlwi(  Register a, Register s, int n);
+  inline void rotrwi(  Register a, Register s, int n);
+
+  // Rotate Instructions
+  inline void rldic(   Register a, Register s, int sh6, int mb6);
+  inline void rldic_(  Register a, Register s, int sh6, int mb6);
+  inline void rldicr(  Register a, Register s, int sh6, int mb6);
+  inline void rldicr_( Register a, Register s, int sh6, int mb6);
+  inline void rldicl(  Register a, Register s, int sh6, int mb6);
+  inline void rldicl_( Register a, Register s, int sh6, int mb6);
+  inline void rlwinm(  Register a, Register s, int sh5, int mb5, int me5);
+  inline void rlwinm_( Register a, Register s, int sh5, int mb5, int me5);
+  inline void rldimi(  Register a, Register s, int sh6, int mb6);
+  inline void rldimi_( Register a, Register s, int sh6, int mb6);
+  inline void rlwimi(  Register a, Register s, int sh5, int mb5, int me5);
+  inline void insrdi(  Register a, Register s, int n,   int b);
+  inline void insrwi(  Register a, Register s, int n,   int b);
+
+  // PPC 1, section 3.3.2 Fixed-Point Load Instructions
+  // 4 bytes
+  inline void lwzx( Register d, Register s1, Register s2);
+  inline void lwz(  Register d, int si16,    Register s1);
+  inline void lwzu( Register d, int si16,    Register s1);
+
+  // 4 bytes
+  inline void lwax( Register d, Register s1, Register s2);
+  inline void lwa(  Register d, int si16,    Register s1);
+
+  // 2 bytes
+  inline void lhzx( Register d, Register s1, Register s2);
+  inline void lhz(  Register d, int si16,    Register s1);
+  inline void lhzu( Register d, int si16,    Register s1);
+
+  // 2 bytes
+  inline void lhax( Register d, Register s1, Register s2);
+  inline void lha(  Register d, int si16,    Register s1);
+  inline void lhau( Register d, int si16,    Register s1);
+
+  // 1 byte
+  inline void lbzx( Register d, Register s1, Register s2);
+  inline void lbz(  Register d, int si16,    Register s1);
+  inline void lbzu( Register d, int si16,    Register s1);
+
+  // 8 bytes
+  inline void ldx(  Register d, Register s1, Register s2);
+  inline void ld(   Register d, int si16,    Register s1);
+  inline void ldu(  Register d, int si16,    Register s1);
+
+  //  PPC 1, section 3.3.3 Fixed-Point Store Instructions
+  inline void stwx( Register d, Register s1, Register s2);
+  inline void stw(  Register d, int si16,    Register s1);
+  inline void stwu( Register d, int si16,    Register s1);
+
+  inline void sthx( Register d, Register s1, Register s2);
+  inline void sth(  Register d, int si16,    Register s1);
+  inline void sthu( Register d, int si16,    Register s1);
+
+  inline void stbx( Register d, Register s1, Register s2);
+  inline void stb(  Register d, int si16,    Register s1);
+  inline void stbu( Register d, int si16,    Register s1);
+
+  inline void stdx( Register d, Register s1, Register s2);
+  inline void std(  Register d, int si16,    Register s1);
+  inline void stdu( Register d, int si16,    Register s1);
+  inline void stdux(Register s, Register a,  Register b);
+
+  // PPC 1, section 3.3.13 Move To/From System Register Instructions
+  inline void mtlr( Register s1);
+  inline void mflr( Register d);
+  inline void mtctr(Register s1);
+  inline void mfctr(Register d);
+  inline void mtcrf(int fxm, Register s);
+  inline void mfcr( Register d);
+  inline void mcrf( ConditionRegister crd, ConditionRegister cra);
+  inline void mtcr( Register s);
+
+  // PPC 1, section 2.4.1 Branch Instructions
+  inline void b(  address a, relocInfo::relocType rt = relocInfo::none);
+  inline void b(  Label& L);
+  inline void bl( address a, relocInfo::relocType rt = relocInfo::none);
+  inline void bl( Label& L);
+  inline void bc( int boint, int biint, address a, relocInfo::relocType rt = relocInfo::none);
+  inline void bc( int boint, int biint, Label& L);
+  inline void bcl(int boint, int biint, address a, relocInfo::relocType rt = relocInfo::none);
+  inline void bcl(int boint, int biint, Label& L);
+
+  inline void bclr(  int boint, int biint, int bhint, relocInfo::relocType rt = relocInfo::none);
+  inline void bclrl( int boint, int biint, int bhint, relocInfo::relocType rt = relocInfo::none);
+  inline void bcctr( int boint, int biint, int bhint = bhintbhBCCTRisNotReturnButSame,
+                         relocInfo::relocType rt = relocInfo::none);
+  inline void bcctrl(int boint, int biint, int bhint = bhintbhBCLRisReturn,
+                         relocInfo::relocType rt = relocInfo::none);
+
+  // helper function for b, bcxx
+  inline bool is_within_range_of_b(address a, address pc);
+  inline bool is_within_range_of_bcxx(address a, address pc);
+
+  // get the destination of a bxx branch (b, bl, ba, bla)
+  static inline address  bxx_destination(address baddr);
+  static inline address  bxx_destination(int instr, address pc);
+  static inline intptr_t bxx_destination_offset(int instr, intptr_t bxx_pos);
+
+  // extended mnemonics for branch instructions
+  inline void blt(ConditionRegister crx, Label& L);
+  inline void bgt(ConditionRegister crx, Label& L);
+  inline void beq(ConditionRegister crx, Label& L);
+  inline void bso(ConditionRegister crx, Label& L);
+  inline void bge(ConditionRegister crx, Label& L);
+  inline void ble(ConditionRegister crx, Label& L);
+  inline void bne(ConditionRegister crx, Label& L);
+  inline void bns(ConditionRegister crx, Label& L);
+
+  // Branch instructions with static prediction hints.
+  inline void blt_predict_taken(    ConditionRegister crx, Label& L);
+  inline void bgt_predict_taken(    ConditionRegister crx, Label& L);
+  inline void beq_predict_taken(    ConditionRegister crx, Label& L);
+  inline void bso_predict_taken(    ConditionRegister crx, Label& L);
+  inline void bge_predict_taken(    ConditionRegister crx, Label& L);
+  inline void ble_predict_taken(    ConditionRegister crx, Label& L);
+  inline void bne_predict_taken(    ConditionRegister crx, Label& L);
+  inline void bns_predict_taken(    ConditionRegister crx, Label& L);
+  inline void blt_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void bgt_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void beq_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void bso_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void bge_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void ble_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void bne_predict_not_taken(ConditionRegister crx, Label& L);
+  inline void bns_predict_not_taken(ConditionRegister crx, Label& L);
+
+  // for use in conjunction with testbitdi:
+  inline void btrue( ConditionRegister crx, Label& L);
+  inline void bfalse(ConditionRegister crx, Label& L);
+
+  inline void bltl(ConditionRegister crx, Label& L);
+  inline void bgtl(ConditionRegister crx, Label& L);
+  inline void beql(ConditionRegister crx, Label& L);
+  inline void bsol(ConditionRegister crx, Label& L);
+  inline void bgel(ConditionRegister crx, Label& L);
+  inline void blel(ConditionRegister crx, Label& L);
+  inline void bnel(ConditionRegister crx, Label& L);
+  inline void bnsl(ConditionRegister crx, Label& L);
+
+  // extended mnemonics for Branch Instructions via LR
+  // We use `blr' for returns.
+  inline void blr(relocInfo::relocType rt = relocInfo::none);
+
+  // extended mnemonics for Branch Instructions with CTR
+  // bdnz means `decrement CTR and jump to L if CTR is not zero'
+  inline void bdnz(Label& L);
+  // Decrement and branch if result is zero.
+  inline void bdz(Label& L);
+  // we use `bctr[l]' for jumps/calls in function descriptor glue
+  // code, e.g. calls to runtime functions
+  inline void bctr( relocInfo::relocType rt = relocInfo::none);
+  inline void bctrl(relocInfo::relocType rt = relocInfo::none);
+  // conditional jumps/branches via CTR
+  inline void beqctr( ConditionRegister crx, relocInfo::relocType rt = relocInfo::none);
+  inline void beqctrl(ConditionRegister crx, relocInfo::relocType rt = relocInfo::none);
+  inline void bnectr( ConditionRegister crx, relocInfo::relocType rt = relocInfo::none);
+  inline void bnectrl(ConditionRegister crx, relocInfo::relocType rt = relocInfo::none);
+
+  // condition register logic instructions
+  inline void crand( int d, int s1, int s2);
+  inline void crnand(int d, int s1, int s2);
+  inline void cror(  int d, int s1, int s2);
+  inline void crxor( int d, int s1, int s2);
+  inline void crnor( int d, int s1, int s2);
+  inline void creqv( int d, int s1, int s2);
+  inline void crandc(int d, int s1, int s2);
+  inline void crorc( int d, int s1, int s2);
+
+  // icache and dcache related instructions
+  inline void icbi(  Register s1, Register s2);
+  //inline void dcba(Register s1, Register s2); // Instruction for embedded processor only.
+  inline void dcbz(  Register s1, Register s2);
+  inline void dcbst( Register s1, Register s2);
+  inline void dcbf(  Register s1, Register s2);
+
+  enum ct_cache_specification {
+    ct_primary_cache   = 0,
+    ct_secondary_cache = 2
+  };
+  // dcache read hint
+  inline void dcbt(    Register s1, Register s2);
+  inline void dcbtct(  Register s1, Register s2, int ct);
+  inline void dcbtds(  Register s1, Register s2, int ds);
+  // dcache write hint
+  inline void dcbtst(  Register s1, Register s2);
+  inline void dcbtstct(Register s1, Register s2, int ct);
+
+  //  machine barrier instructions:
+  //
+  //  - sync    two-way memory barrier, aka fence
+  //  - lwsync  orders  Store|Store,
+  //                     Load|Store,
+  //                     Load|Load,
+  //            but not Store|Load
+  //  - eieio   orders memory accesses for device memory (only)
+  //  - isync   invalidates speculatively executed instructions
+  //            From the Power ISA 2.06 documentation:
+  //             "[...] an isync instruction prevents the execution of
+  //            instructions following the isync until instructions
+  //            preceding the isync have completed, [...]"
+  //            From IBM's AIX assembler reference:
+  //             "The isync [...] instructions causes the processor to
+  //            refetch any instructions that might have been fetched
+  //            prior to the isync instruction. The instruction isync
+  //            causes the processor to wait for all previous instructions
+  //            to complete. Then any instructions already fetched are
+  //            discarded and instruction processing continues in the
+  //            environment established by the previous instructions."
+  //
+  //  semantic barrier instructions:
+  //  (as defined in orderAccess.hpp)
+  //
+  //  - release  orders Store|Store,       (maps to lwsync)
+  //                     Load|Store
+  //  - acquire  orders  Load|Store,       (maps to lwsync)
+  //                     Load|Load
+  //  - fence    orders Store|Store,       (maps to sync)
+  //                     Load|Store,
+  //                     Load|Load,
+  //                    Store|Load
+  //
+ private:
+  inline void sync(int l);
+ public:
+  inline void sync();
+  inline void lwsync();
+  inline void ptesync();
+  inline void eieio();
+  inline void isync();
+  inline void elemental_membar(int e); // Elemental Memory Barriers (>=Power 8)
+
+  // atomics
+  inline void lwarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
+  inline void ldarx_unchecked(Register d, Register a, Register b, int eh1 = 0);
+  inline bool lxarx_hint_exclusive_access();
+  inline void lwarx(  Register d, Register a, Register b, bool hint_exclusive_access = false);
+  inline void ldarx(  Register d, Register a, Register b, bool hint_exclusive_access = false);
+  inline void stwcx_( Register s, Register a, Register b);
+  inline void stdcx_( Register s, Register a, Register b);
+
+  // Instructions for adjusting thread priority for simultaneous
+  // multithreading (SMT) on Power5.
+ private:
+  inline void smt_prio_very_low();
+  inline void smt_prio_medium_high();
+  inline void smt_prio_high();
+
+ public:
+  inline void smt_prio_low();
+  inline void smt_prio_medium_low();
+  inline void smt_prio_medium();
+
+  // trap instructions
+  inline void twi_0(Register a); // for load with acquire semantics use load+twi_0+isync (trap can't occur)
+  // NOT FOR DIRECT USE!!
+ protected:
+  inline void tdi_unchecked(int tobits, Register a, int si16);
+  inline void twi_unchecked(int tobits, Register a, int si16);
+  inline void tdi(          int tobits, Register a, int si16);   // asserts UseSIGTRAP
+  inline void twi(          int tobits, Register a, int si16);   // asserts UseSIGTRAP
+  inline void td(           int tobits, Register a, Register b); // asserts UseSIGTRAP
+  inline void tw(           int tobits, Register a, Register b); // asserts UseSIGTRAP
+
+  static bool is_tdi(int x, int tobits, int ra, int si16) {
+     return (TDI_OPCODE == (x & TDI_OPCODE_MASK))
+         && (tobits == inv_to_field(x))
+         && (ra == -1/*any reg*/ || ra == inv_ra_field(x))
+         && (si16 == inv_si_field(x));
+  }
+
+  static bool is_twi(int x, int tobits, int ra, int si16) {
+     return (TWI_OPCODE == (x & TWI_OPCODE_MASK))
+         && (tobits == inv_to_field(x))
+         && (ra == -1/*any reg*/ || ra == inv_ra_field(x))
+         && (si16 == inv_si_field(x));
+  }
+
+  static bool is_twi(int x, int tobits, int ra) {
+     return (TWI_OPCODE == (x & TWI_OPCODE_MASK))
+         && (tobits == inv_to_field(x))
+         && (ra == -1/*any reg*/ || ra == inv_ra_field(x));
+  }
+
+  static bool is_td(int x, int tobits, int ra, int rb) {
+     return (TD_OPCODE == (x & TD_OPCODE_MASK))
+         && (tobits == inv_to_field(x))
+         && (ra == -1/*any reg*/ || ra == inv_ra_field(x))
+         && (rb == -1/*any reg*/ || rb == inv_rb_field(x));
+  }
+
+  static bool is_tw(int x, int tobits, int ra, int rb) {
+     return (TW_OPCODE == (x & TW_OPCODE_MASK))
+         && (tobits == inv_to_field(x))
+         && (ra == -1/*any reg*/ || ra == inv_ra_field(x))
+         && (rb == -1/*any reg*/ || rb == inv_rb_field(x));
+  }
+
+ public:
+  // PPC floating point instructions
+  // PPC 1, section 4.6.2 Floating-Point Load Instructions
+  inline void lfs(  FloatRegister d, int si16,   Register a);
+  inline void lfsu( FloatRegister d, int si16,   Register a);
+  inline void lfsx( FloatRegister d, Register a, Register b);
+  inline void lfd(  FloatRegister d, int si16,   Register a);
+  inline void lfdu( FloatRegister d, int si16,   Register a);
+  inline void lfdx( FloatRegister d, Register a, Register b);
+
+  // PPC 1, section 4.6.3 Floating-Point Store Instructions
+  inline void stfs(  FloatRegister s, int si16,   Register a);
+  inline void stfsu( FloatRegister s, int si16,   Register a);
+  inline void stfsx( FloatRegister s, Register a, Register b);
+  inline void stfd(  FloatRegister s, int si16,   Register a);
+  inline void stfdu( FloatRegister s, int si16,   Register a);
+  inline void stfdx( FloatRegister s, Register a, Register b);
+
+  // PPC 1, section 4.6.4 Floating-Point Move Instructions
+  inline void fmr(  FloatRegister d, FloatRegister b);
+  inline void fmr_( FloatRegister d, FloatRegister b);
+
+  //  inline void mffgpr( FloatRegister d, Register b);
+  //  inline void mftgpr( Register d, FloatRegister b);
+  inline void cmpb(   Register a, Register s, Register b);
+  inline void popcntb(Register a, Register s);
+  inline void popcntw(Register a, Register s);
+  inline void popcntd(Register a, Register s);
+
+  inline void fneg(  FloatRegister d, FloatRegister b);
+  inline void fneg_( FloatRegister d, FloatRegister b);
+  inline void fabs(  FloatRegister d, FloatRegister b);
+  inline void fabs_( FloatRegister d, FloatRegister b);
+  inline void fnabs( FloatRegister d, FloatRegister b);
+  inline void fnabs_(FloatRegister d, FloatRegister b);
+
+  // PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic Instructions
+  inline void fadd(  FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fadd_( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fadds( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fadds_(FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fsub(  FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fsub_( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fsubs( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fsubs_(FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fmul(  FloatRegister d, FloatRegister a, FloatRegister c);
+  inline void fmul_( FloatRegister d, FloatRegister a, FloatRegister c);
+  inline void fmuls( FloatRegister d, FloatRegister a, FloatRegister c);
+  inline void fmuls_(FloatRegister d, FloatRegister a, FloatRegister c);
+  inline void fdiv(  FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fdiv_( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fdivs( FloatRegister d, FloatRegister a, FloatRegister b);
+  inline void fdivs_(FloatRegister d, FloatRegister a, FloatRegister b);
+
+  // PPC 1, section 4.6.6 Floating-Point Rounding and Conversion Instructions
+  inline void frsp(  FloatRegister d, FloatRegister b);
+  inline void fctid( FloatRegister d, FloatRegister b);
+  inline void fctidz(FloatRegister d, FloatRegister b);
+  inline void fctiw( FloatRegister d, FloatRegister b);
+  inline void fctiwz(FloatRegister d, FloatRegister b);
+  inline void fcfid( FloatRegister d, FloatRegister b);
+  inline void fcfids(FloatRegister d, FloatRegister b);
+
+  // PPC 1, section 4.6.7 Floating-Point Compare Instructions
+  inline void fcmpu( ConditionRegister crx, FloatRegister a, FloatRegister b);
+
+  inline void fsqrt( FloatRegister d, FloatRegister b);
+  inline void fsqrts(FloatRegister d, FloatRegister b);
+
+  // Vector instructions for >= Power6.
+  inline void lvebx(    VectorRegister d, Register s1, Register s2);
+  inline void lvehx(    VectorRegister d, Register s1, Register s2);
+  inline void lvewx(    VectorRegister d, Register s1, Register s2);
+  inline void lvx(      VectorRegister d, Register s1, Register s2);
+  inline void lvxl(     VectorRegister d, Register s1, Register s2);
+  inline void stvebx(   VectorRegister d, Register s1, Register s2);
+  inline void stvehx(   VectorRegister d, Register s1, Register s2);
+  inline void stvewx(   VectorRegister d, Register s1, Register s2);
+  inline void stvx(     VectorRegister d, Register s1, Register s2);
+  inline void stvxl(    VectorRegister d, Register s1, Register s2);
+  inline void lvsl(     VectorRegister d, Register s1, Register s2);
+  inline void lvsr(     VectorRegister d, Register s1, Register s2);
+  inline void vpkpx(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkshss(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkswss(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkshus(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkswus(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkuhum(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkuwum(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkuhus(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vpkuwus(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vupkhpx(  VectorRegister d, VectorRegister b);
+  inline void vupkhsb(  VectorRegister d, VectorRegister b);
+  inline void vupkhsh(  VectorRegister d, VectorRegister b);
+  inline void vupklpx(  VectorRegister d, VectorRegister b);
+  inline void vupklsb(  VectorRegister d, VectorRegister b);
+  inline void vupklsh(  VectorRegister d, VectorRegister b);
+  inline void vmrghb(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmrghw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmrghh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmrglb(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmrglw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmrglh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsplt(    VectorRegister d, int ui4,          VectorRegister b);
+  inline void vsplth(   VectorRegister d, int ui3,          VectorRegister b);
+  inline void vspltw(   VectorRegister d, int ui2,          VectorRegister b);
+  inline void vspltisb( VectorRegister d, int si5);
+  inline void vspltish( VectorRegister d, int si5);
+  inline void vspltisw( VectorRegister d, int si5);
+  inline void vperm(    VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vsel(     VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vsl(      VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsldoi(   VectorRegister d, VectorRegister a, VectorRegister b, int si4);
+  inline void vslo(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsr(      VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsro(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddcuw(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddshs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddsbs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddsws(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddubm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vadduwm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vadduhm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vaddubs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vadduws(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vadduhs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubcuw(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubshs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubsbs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubsws(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsububm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubuwm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubuhm(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsububs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubuws(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsubuhs(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmulesb(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmuleub(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmulesh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmuleuh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmulosb(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmuloub(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmulosh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmulouh(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmhaddshs(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmhraddshs(VectorRegister d,VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmladduhm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsubuhm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsummbm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsumshm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsumshs( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsumuhm( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vmsumuhs( VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c);
+  inline void vsumsws(  VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsum2sws( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsum4sbs( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsum4ubs( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsum4shs( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavgsb(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavgsw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavgsh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavgub(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavguw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vavguh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxsb(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxsw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxsh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxub(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxuw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vmaxuh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminsb(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminsw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminsh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminub(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminuw(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vminuh(   VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequb( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequh( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequw( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsh( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsb( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsw( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtub( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtuh( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtuw( VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequb_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequh_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpequw_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsh_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsb_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtsw_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtub_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtuh_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vcmpgtuw_(VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vand(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vandc(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vnor(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vor(      VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vxor(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vrlb(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vrlw(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vrlh(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vslb(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vskw(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vslh(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsrb(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsrw(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsrh(     VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsrab(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsraw(    VectorRegister d, VectorRegister a, VectorRegister b);
+  inline void vsrah(    VectorRegister d, VectorRegister a, VectorRegister b);
+  // Vector Floating-Point not implemented yet
+  inline void mtvscr(   VectorRegister b);
+  inline void mfvscr(   VectorRegister d);
+
+  // The following encoders use r0 as second operand. These instructions
+  // read r0 as '0'.
+  inline void lwzx( Register d, Register s2);
+  inline void lwz(  Register d, int si16);
+  inline void lwax( Register d, Register s2);
+  inline void lwa(  Register d, int si16);
+  inline void lhzx( Register d, Register s2);
+  inline void lhz(  Register d, int si16);
+  inline void lhax( Register d, Register s2);
+  inline void lha(  Register d, int si16);
+  inline void lbzx( Register d, Register s2);
+  inline void lbz(  Register d, int si16);
+  inline void ldx(  Register d, Register s2);
+  inline void ld(   Register d, int si16);
+  inline void stwx( Register d, Register s2);
+  inline void stw(  Register d, int si16);
+  inline void sthx( Register d, Register s2);
+  inline void sth(  Register d, int si16);
+  inline void stbx( Register d, Register s2);
+  inline void stb(  Register d, int si16);
+  inline void stdx( Register d, Register s2);
+  inline void std(  Register d, int si16);
+
+  // PPC 2, section 3.2.1 Instruction Cache Instructions
+  inline void icbi(    Register s2);
+  // PPC 2, section 3.2.2 Data Cache Instructions
+  //inlinevoid dcba(   Register s2); // Instruction for embedded processor only.
+  inline void dcbz(    Register s2);
+  inline void dcbst(   Register s2);
+  inline void dcbf(    Register s2);
+  // dcache read hint
+  inline void dcbt(    Register s2);
+  inline void dcbtct(  Register s2, int ct);
+  inline void dcbtds(  Register s2, int ds);
+  // dcache write hint
+  inline void dcbtst(  Register s2);
+  inline void dcbtstct(Register s2, int ct);
+
+  // Atomics: use ra0mem to disallow R0 as base.
+  inline void lwarx_unchecked(Register d, Register b, int eh1);
+  inline void ldarx_unchecked(Register d, Register b, int eh1);
+  inline void lwarx( Register d, Register b, bool hint_exclusive_access);
+  inline void ldarx( Register d, Register b, bool hint_exclusive_access);
+  inline void stwcx_(Register s, Register b);
+  inline void stdcx_(Register s, Register b);
+  inline void lfs(   FloatRegister d, int si16);
+  inline void lfsx(  FloatRegister d, Register b);
+  inline void lfd(   FloatRegister d, int si16);
+  inline void lfdx(  FloatRegister d, Register b);
+  inline void stfs(  FloatRegister s, int si16);
+  inline void stfsx( FloatRegister s, Register b);
+  inline void stfd(  FloatRegister s, int si16);
+  inline void stfdx( FloatRegister s, Register b);
+  inline void lvebx( VectorRegister d, Register s2);
+  inline void lvehx( VectorRegister d, Register s2);
+  inline void lvewx( VectorRegister d, Register s2);
+  inline void lvx(   VectorRegister d, Register s2);
+  inline void lvxl(  VectorRegister d, Register s2);
+  inline void stvebx(VectorRegister d, Register s2);
+  inline void stvehx(VectorRegister d, Register s2);
+  inline void stvewx(VectorRegister d, Register s2);
+  inline void stvx(  VectorRegister d, Register s2);
+  inline void stvxl( VectorRegister d, Register s2);
+  inline void lvsl(  VectorRegister d, Register s2);
+  inline void lvsr(  VectorRegister d, Register s2);
+
+  // RegisterOrConstant versions.
+  // These emitters choose between the versions using two registers and
+  // those with register and immediate, depending on the content of roc.
+  // If the constant is not encodable as immediate, instructions to
+  // load the constant are emitted beforehand. Store instructions need a
+  // tmp reg if the constant is not encodable as immediate.
+  // Size unpredictable.
+  void ld(  Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void lwa( Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void lwz( Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void lha( Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void lhz( Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void lbz( Register d, RegisterOrConstant roc, Register s1 = noreg);
+  void std( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+  void stw( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+  void sth( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+  void stb( Register d, RegisterOrConstant roc, Register s1 = noreg, Register tmp = noreg);
+  void add( Register d, RegisterOrConstant roc, Register s1);
+  void subf(Register d, RegisterOrConstant roc, Register s1);
+  void cmpd(ConditionRegister d, RegisterOrConstant roc, Register s1);
+
+
+  // Emit several instructions to load a 64 bit constant. This issues a fixed
+  // instruction pattern so that the constant can be patched later on.
+  enum {
+    load_const_size = 5 * BytesPerInstWord
+  };
+         void load_const(Register d, long a,            Register tmp = noreg);
+  inline void load_const(Register d, void* a,           Register tmp = noreg);
+  inline void load_const(Register d, Label& L,          Register tmp = noreg);
+  inline void load_const(Register d, AddressLiteral& a, Register tmp = noreg);
+
+  // Load a 64 bit constant, optimized, not identifyable.
+  // Tmp can be used to increase ILP. Set return_simm16_rest = true to get a
+  // 16 bit immediate offset. This is useful if the offset can be encoded in
+  // a succeeding instruction.
+         int load_const_optimized(Register d, long a,  Register tmp = noreg, bool return_simm16_rest = false);
+  inline int load_const_optimized(Register d, void* a, Register tmp = noreg, bool return_simm16_rest = false) {
+    return load_const_optimized(d, (long)(unsigned long)a, tmp, return_simm16_rest);
+  }
+
+  // Creation
+  Assembler(CodeBuffer* code) : AbstractAssembler(code) {
+#ifdef CHECK_DELAY
+    delay_state = no_delay;
+#endif
+  }
+
+  // Testing
+#ifndef PRODUCT
+  void test_asm();
+#endif
+};
+
+
+#endif // CPU_PPC_VM_ASSEMBLER_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,813 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP
+#define CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP
+
+#include "asm/assembler.inline.hpp"
+#include "asm/codeBuffer.hpp"
+#include "code/codeCache.hpp"
+
+inline void Assembler::emit_int32(int x) {
+  AbstractAssembler::emit_int32(x);
+}
+
+inline void Assembler::emit_data(int x) {
+  emit_int32(x);
+}
+
+inline void Assembler::emit_data(int x, relocInfo::relocType rtype) {
+  relocate(rtype);
+  emit_int32(x);
+}
+
+inline void Assembler::emit_data(int x, RelocationHolder const& rspec) {
+  relocate(rspec);
+  emit_int32(x);
+}
+
+// Emit an address
+inline address Assembler::emit_addr(const address addr) {
+  address start = pc();
+  emit_address(addr);
+  return start;
+}
+
+// Emit a function descriptor with the specified entry point, TOC, and
+// ENV. If the entry point is NULL, the descriptor will point just
+// past the descriptor.
+inline address Assembler::emit_fd(address entry, address toc, address env) {
+  FunctionDescriptor* fd = (FunctionDescriptor*)pc();
+
+  assert(sizeof(FunctionDescriptor) == 3*sizeof(address), "function descriptor size");
+
+  (void)emit_addr();
+  (void)emit_addr();
+  (void)emit_addr();
+
+  fd->set_entry(entry == NULL ? pc() : entry);
+  fd->set_toc(toc);
+  fd->set_env(env);
+
+  return (address)fd;
+}
+
+// Issue an illegal instruction. 0 is guaranteed to be an illegal instruction.
+inline void Assembler::illtrap() { Assembler::emit_int32(0); }
+inline bool Assembler::is_illtrap(int x) { return x == 0; }
+
+// PPC 1, section 3.3.8, Fixed-Point Arithmetic Instructions
+inline void Assembler::addi(   Register d, Register a, int si16)   { assert(a != R0, "r0 not allowed"); addi_r0ok( d, a, si16); }
+inline void Assembler::addis(  Register d, Register a, int si16)   { assert(a != R0, "r0 not allowed"); addis_r0ok(d, a, si16); }
+inline void Assembler::addi_r0ok(Register d,Register a,int si16)   { emit_int32(ADDI_OPCODE   | rt(d) | ra(a) | simm(si16, 16)); }
+inline void Assembler::addis_r0ok(Register d,Register a,int si16)  { emit_int32(ADDIS_OPCODE  | rt(d) | ra(a) | simm(si16, 16)); }
+inline void Assembler::addic_( Register d, Register a, int si16)   { emit_int32(ADDIC__OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
+inline void Assembler::subfic( Register d, Register a, int si16)   { emit_int32(SUBFIC_OPCODE | rt(d) | ra(a) | simm(si16, 16)); }
+inline void Assembler::add(    Register d, Register a, Register b) { emit_int32(ADD_OPCODE    | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::add_(   Register d, Register a, Register b) { emit_int32(ADD_OPCODE    | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::subf(   Register d, Register a, Register b) { emit_int32(SUBF_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::sub(    Register d, Register a, Register b) { subf(d, b, a); }
+inline void Assembler::subf_(  Register d, Register a, Register b) { emit_int32(SUBF_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::addc(   Register d, Register a, Register b) { emit_int32(ADDC_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::addc_(  Register d, Register a, Register b) { emit_int32(ADDC_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::subfc(  Register d, Register a, Register b) { emit_int32(SUBFC_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::subfc_( Register d, Register a, Register b) { emit_int32(SUBFC_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::adde(   Register d, Register a, Register b) { emit_int32(ADDE_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::adde_(  Register d, Register a, Register b) { emit_int32(ADDE_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::subfe(  Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::subfe_( Register d, Register a, Register b) { emit_int32(SUBFE_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::neg(    Register d, Register a)             { emit_int32(NEG_OPCODE    | rt(d) | ra(a) | oe(0) | rc(0)); }
+inline void Assembler::neg_(   Register d, Register a)             { emit_int32(NEG_OPCODE    | rt(d) | ra(a) | oe(0) | rc(1)); }
+inline void Assembler::mulli(  Register d, Register a, int si16)   { emit_int32(MULLI_OPCODE  | rt(d) | ra(a) | simm(si16, 16)); }
+inline void Assembler::mulld(  Register d, Register a, Register b) { emit_int32(MULLD_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::mulld_( Register d, Register a, Register b) { emit_int32(MULLD_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::mullw(  Register d, Register a, Register b) { emit_int32(MULLW_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::mullw_( Register d, Register a, Register b) { emit_int32(MULLW_OPCODE  | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::mulhw(  Register d, Register a, Register b) { emit_int32(MULHW_OPCODE  | rt(d) | ra(a) | rb(b) | rc(0)); }
+inline void Assembler::mulhw_( Register d, Register a, Register b) { emit_int32(MULHW_OPCODE  | rt(d) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::mulhd(  Register d, Register a, Register b) { emit_int32(MULHD_OPCODE  | rt(d) | ra(a) | rb(b) | rc(0)); }
+inline void Assembler::mulhd_( Register d, Register a, Register b) { emit_int32(MULHD_OPCODE  | rt(d) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::mulhdu( Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(0)); }
+inline void Assembler::mulhdu_(Register d, Register a, Register b) { emit_int32(MULHDU_OPCODE | rt(d) | ra(a) | rb(b) | rc(1)); }
+inline void Assembler::divd(   Register d, Register a, Register b) { emit_int32(DIVD_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::divd_(  Register d, Register a, Register b) { emit_int32(DIVD_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+inline void Assembler::divw(   Register d, Register a, Register b) { emit_int32(DIVW_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(0)); }
+inline void Assembler::divw_(  Register d, Register a, Register b) { emit_int32(DIVW_OPCODE   | rt(d) | ra(a) | rb(b) | oe(0) | rc(1)); }
+
+// extended mnemonics
+inline void Assembler::li(   Register d, int si16)             { Assembler::addi_r0ok( d, R0, si16); }
+inline void Assembler::lis(  Register d, int si16)             { Assembler::addis_r0ok(d, R0, si16); }
+inline void Assembler::addir(Register d, int si16, Register a) { Assembler::addi(d, a, si16); }
+
+// PPC 1, section 3.3.9, Fixed-Point Compare Instructions
+inline void Assembler::cmpi(  ConditionRegister f, int l, Register a, int si16)   { emit_int32( CMPI_OPCODE  | bf(f) | l10(l) | ra(a) | simm(si16,16)); }
+inline void Assembler::cmp(   ConditionRegister f, int l, Register a, Register b) { emit_int32( CMP_OPCODE   | bf(f) | l10(l) | ra(a) | rb(b)); }
+inline void Assembler::cmpli( ConditionRegister f, int l, Register a, int ui16)   { emit_int32( CMPLI_OPCODE | bf(f) | l10(l) | ra(a) | uimm(ui16,16)); }
+inline void Assembler::cmpl(  ConditionRegister f, int l, Register a, Register b) { emit_int32( CMPL_OPCODE  | bf(f) | l10(l) | ra(a) | rb(b)); }
+
+// extended mnemonics of Compare Instructions
+inline void Assembler::cmpwi( ConditionRegister crx, Register a, int si16)   { Assembler::cmpi( crx, 0, a, si16); }
+inline void Assembler::cmpdi( ConditionRegister crx, Register a, int si16)   { Assembler::cmpi( crx, 1, a, si16); }
+inline void Assembler::cmpw(  ConditionRegister crx, Register a, Register b) { Assembler::cmp(  crx, 0, a, b); }
+inline void Assembler::cmpd(  ConditionRegister crx, Register a, Register b) { Assembler::cmp(  crx, 1, a, b); }
+inline void Assembler::cmplwi(ConditionRegister crx, Register a, int ui16)   { Assembler::cmpli(crx, 0, a, ui16); }
+inline void Assembler::cmpldi(ConditionRegister crx, Register a, int ui16)   { Assembler::cmpli(crx, 1, a, ui16); }
+inline void Assembler::cmplw( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 0, a, b); }
+inline void Assembler::cmpld( ConditionRegister crx, Register a, Register b) { Assembler::cmpl( crx, 1, a, b); }
+
+inline void Assembler::isel(Register d, Register a, Register b, int c) { emit_int32(ISEL_OPCODE    | rt(d)  | ra(a) | rb(b) | bc(c)); }
+
+// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
+inline void Assembler::andi_(   Register a, Register s, int ui16)      { emit_int32(ANDI_OPCODE    | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::andis_(  Register a, Register s, int ui16)      { emit_int32(ANDIS_OPCODE   | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::ori(     Register a, Register s, int ui16)      { emit_int32(ORI_OPCODE     | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::oris(    Register a, Register s, int ui16)      { emit_int32(ORIS_OPCODE    | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::xori(    Register a, Register s, int ui16)      { emit_int32(XORI_OPCODE    | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::xoris(   Register a, Register s, int ui16)      { emit_int32(XORIS_OPCODE   | rta(a) | rs(s) | uimm(ui16, 16)); }
+inline void Assembler::andr(    Register a, Register s, Register b)    { emit_int32(AND_OPCODE     | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::and_(    Register a, Register s, Register b)    { emit_int32(AND_OPCODE     | rta(a) | rs(s) | rb(b) | rc(1)); }
+
+inline void Assembler::or_unchecked(Register a, Register s, Register b){ emit_int32(OR_OPCODE      | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::orr(     Register a, Register s, Register b)    { if (a==s && s==b) { Assembler::nop(); } else { Assembler::or_unchecked(a,s,b); } }
+inline void Assembler::or_(     Register a, Register s, Register b)    { emit_int32(OR_OPCODE      | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::xorr(    Register a, Register s, Register b)    { emit_int32(XOR_OPCODE     | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::xor_(    Register a, Register s, Register b)    { emit_int32(XOR_OPCODE     | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::nand(    Register a, Register s, Register b)    { emit_int32(NAND_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::nand_(   Register a, Register s, Register b)    { emit_int32(NAND_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::nor(     Register a, Register s, Register b)    { emit_int32(NOR_OPCODE     | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::nor_(    Register a, Register s, Register b)    { emit_int32(NOR_OPCODE     | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::andc(    Register a, Register s, Register b)    { emit_int32(ANDC_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::andc_(   Register a, Register s, Register b)    { emit_int32(ANDC_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::orc(     Register a, Register s, Register b)    { emit_int32(ORC_OPCODE     | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::orc_(    Register a, Register s, Register b)    { emit_int32(ORC_OPCODE     | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::extsb(   Register a, Register s)                { emit_int32(EXTSB_OPCODE   | rta(a) | rs(s) | rc(0)); }
+inline void Assembler::extsh(   Register a, Register s)                { emit_int32(EXTSH_OPCODE   | rta(a) | rs(s) | rc(0)); }
+inline void Assembler::extsw(   Register a, Register s)                { emit_int32(EXTSW_OPCODE   | rta(a) | rs(s) | rc(0)); }
+
+// extended mnemonics
+inline void Assembler::nop()                              { Assembler::ori(R0, R0, 0); }
+// NOP for FP and BR units (different versions to allow them to be in one group)
+inline void Assembler::fpnop0()                           { Assembler::fmr(F30, F30); }
+inline void Assembler::fpnop1()                           { Assembler::fmr(F31, F31); }
+inline void Assembler::brnop0()                           { Assembler::mcrf(CCR2, CCR2); }
+inline void Assembler::brnop1()                           { Assembler::mcrf(CCR3, CCR3); }
+inline void Assembler::brnop2()                           { Assembler::mcrf(CCR4,  CCR4); }
+
+inline void Assembler::mr(      Register d, Register s)   { Assembler::orr(d, s, s); }
+inline void Assembler::ori_opt( Register d, int ui16)     { if (ui16!=0) Assembler::ori( d, d, ui16); }
+inline void Assembler::oris_opt(Register d, int ui16)     { if (ui16!=0) Assembler::oris(d, d, ui16); }
+
+inline void Assembler::endgroup()                         { Assembler::ori(R1, R1, 0); }
+
+// count instructions
+inline void Assembler::cntlzw(  Register a, Register s)              { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(0)); }
+inline void Assembler::cntlzw_( Register a, Register s)              { emit_int32(CNTLZW_OPCODE | rta(a) | rs(s) | rc(1)); }
+inline void Assembler::cntlzd(  Register a, Register s)              { emit_int32(CNTLZD_OPCODE | rta(a) | rs(s) | rc(0)); }
+inline void Assembler::cntlzd_( Register a, Register s)              { emit_int32(CNTLZD_OPCODE | rta(a) | rs(s) | rc(1)); }
+
+// PPC 1, section 3.3.12, Fixed-Point Rotate and Shift Instructions
+inline void Assembler::sld(     Register a, Register s, Register b)  { emit_int32(SLD_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::sld_(    Register a, Register s, Register b)  { emit_int32(SLD_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::slw(     Register a, Register s, Register b)  { emit_int32(SLW_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::slw_(    Register a, Register s, Register b)  { emit_int32(SLW_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::srd(     Register a, Register s, Register b)  { emit_int32(SRD_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::srd_(    Register a, Register s, Register b)  { emit_int32(SRD_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::srw(     Register a, Register s, Register b)  { emit_int32(SRW_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::srw_(    Register a, Register s, Register b)  { emit_int32(SRW_OPCODE    | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::srad(    Register a, Register s, Register b)  { emit_int32(SRAD_OPCODE   | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::srad_(   Register a, Register s, Register b)  { emit_int32(SRAD_OPCODE   | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::sraw(    Register a, Register s, Register b)  { emit_int32(SRAW_OPCODE   | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::sraw_(   Register a, Register s, Register b)  { emit_int32(SRAW_OPCODE   | rta(a) | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::sradi(   Register a, Register s, int sh6)     { emit_int32(SRADI_OPCODE  | rta(a) | rs(s) | sh162030(sh6) | rc(0)); }
+inline void Assembler::sradi_(  Register a, Register s, int sh6)     { emit_int32(SRADI_OPCODE  | rta(a) | rs(s) | sh162030(sh6) | rc(1)); }
+inline void Assembler::srawi(   Register a, Register s, int sh5)     { emit_int32(SRAWI_OPCODE  | rta(a) | rs(s) | sh1620(sh5) | rc(0)); }
+inline void Assembler::srawi_(  Register a, Register s, int sh5)     { emit_int32(SRAWI_OPCODE  | rta(a) | rs(s) | sh1620(sh5) | rc(1)); }
+
+// extended mnemonics for Shift Instructions
+inline void Assembler::sldi(    Register a, Register s, int sh6)     { Assembler::rldicr(a, s, sh6, 63-sh6); }
+inline void Assembler::sldi_(   Register a, Register s, int sh6)     { Assembler::rldicr_(a, s, sh6, 63-sh6); }
+inline void Assembler::slwi(    Register a, Register s, int sh5)     { Assembler::rlwinm(a, s, sh5, 0, 31-sh5); }
+inline void Assembler::slwi_(   Register a, Register s, int sh5)     { Assembler::rlwinm_(a, s, sh5, 0, 31-sh5); }
+inline void Assembler::srdi(    Register a, Register s, int sh6)     { Assembler::rldicl(a, s, 64-sh6, sh6); }
+inline void Assembler::srdi_(   Register a, Register s, int sh6)     { Assembler::rldicl_(a, s, 64-sh6, sh6); }
+inline void Assembler::srwi(    Register a, Register s, int sh5)     { Assembler::rlwinm(a, s, 32-sh5, sh5, 31); }
+inline void Assembler::srwi_(   Register a, Register s, int sh5)     { Assembler::rlwinm_(a, s, 32-sh5, sh5, 31); }
+
+inline void Assembler::clrrdi(  Register a, Register s, int ui6)     { Assembler::rldicr(a, s, 0, 63-ui6); }
+inline void Assembler::clrrdi_( Register a, Register s, int ui6)     { Assembler::rldicr_(a, s, 0, 63-ui6); }
+inline void Assembler::clrldi(  Register a, Register s, int ui6)     { Assembler::rldicl(a, s, 0, ui6); }
+inline void Assembler::clrldi_( Register a, Register s, int ui6)     { Assembler::rldicl_(a, s, 0, ui6); }
+inline void Assembler::clrlsldi( Register a, Register s, int clrl6, int shl6) { Assembler::rldic( a, s, shl6, clrl6-shl6); }
+inline void Assembler::clrlsldi_(Register a, Register s, int clrl6, int shl6) { Assembler::rldic_(a, s, shl6, clrl6-shl6); }
+inline void Assembler::extrdi(  Register a, Register s, int n, int b){ Assembler::rldicl(a, s, b+n, 64-n); }
+// testbit with condition register.
+inline void Assembler::testbitdi(ConditionRegister cr, Register a, Register s, int ui6) {
+  if (cr == CCR0) {
+    Assembler::rldicr_(a, s, 63-ui6, 0);
+  } else {
+    Assembler::rldicr(a, s, 63-ui6, 0);
+    Assembler::cmpdi(cr, a, 0);
+  }
+}
+
+// rotate instructions
+inline void Assembler::rotldi( Register a, Register s, int n) { Assembler::rldicl(a, s, n, 0); }
+inline void Assembler::rotrdi( Register a, Register s, int n) { Assembler::rldicl(a, s, 64-n, 0); }
+inline void Assembler::rotlwi( Register a, Register s, int n) { Assembler::rlwinm(a, s, n, 0, 31); }
+inline void Assembler::rotrwi( Register a, Register s, int n) { Assembler::rlwinm(a, s, 32-n, 0, 31); }
+
+inline void Assembler::rldic(   Register a, Register s, int sh6, int mb6)         { emit_int32(RLDIC_OPCODE  | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
+inline void Assembler::rldic_(  Register a, Register s, int sh6, int mb6)         { emit_int32(RLDIC_OPCODE  | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
+inline void Assembler::rldicr(  Register a, Register s, int sh6, int mb6)         { emit_int32(RLDICR_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
+inline void Assembler::rldicr_( Register a, Register s, int sh6, int mb6)         { emit_int32(RLDICR_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
+inline void Assembler::rldicl(  Register a, Register s, int sh6, int me6)         { emit_int32(RLDICL_OPCODE | rta(a) | rs(s) | sh162030(sh6) | me2126(me6) | rc(0)); }
+inline void Assembler::rldicl_( Register a, Register s, int sh6, int me6)         { emit_int32(RLDICL_OPCODE | rta(a) | rs(s) | sh162030(sh6) | me2126(me6) | rc(1)); }
+inline void Assembler::rlwinm(  Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWINM_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(0)); }
+inline void Assembler::rlwinm_( Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWINM_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(1)); }
+inline void Assembler::rldimi(  Register a, Register s, int sh6, int mb6)         { emit_int32(RLDIMI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(0)); }
+inline void Assembler::rlwimi(  Register a, Register s, int sh5, int mb5, int me5){ emit_int32(RLWIMI_OPCODE | rta(a) | rs(s) | sh1620(sh5) | mb2125(mb5) | me2630(me5) | rc(0)); }
+inline void Assembler::rldimi_( Register a, Register s, int sh6, int mb6)         { emit_int32(RLDIMI_OPCODE | rta(a) | rs(s) | sh162030(sh6) | mb2126(mb6) | rc(1)); }
+inline void Assembler::insrdi(  Register a, Register s, int n,   int b)           { Assembler::rldimi(a, s, 64-(b+n), b); }
+inline void Assembler::insrwi(  Register a, Register s, int n,   int b)           { Assembler::rlwimi(a, s, 32-(b+n), b, b+n-1); }
+
+// PPC 1, section 3.3.2 Fixed-Point Load Instructions
+inline void Assembler::lwzx( Register d, Register s1, Register s2) { emit_int32(LWZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lwz(  Register d, int si16,    Register s1) { emit_int32(LWZ_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::lwzu( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LWZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
+
+inline void Assembler::lwax( Register d, Register s1, Register s2) { emit_int32(LWAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lwa(  Register d, int si16,    Register s1) { emit_int32(LWA_OPCODE  | rt(d) | ds(si16)   | ra0mem(s1));}
+
+inline void Assembler::lhzx( Register d, Register s1, Register s2) { emit_int32(LHZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lhz(  Register d, int si16,    Register s1) { emit_int32(LHZ_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::lhzu( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
+
+inline void Assembler::lhax( Register d, Register s1, Register s2) { emit_int32(LHAX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lha(  Register d, int si16,    Register s1) { emit_int32(LHA_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::lhau( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LHAU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
+
+inline void Assembler::lbzx( Register d, Register s1, Register s2) { emit_int32(LBZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::lbz(  Register d, int si16,    Register s1) { emit_int32(LBZ_OPCODE  | rt(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::lbzu( Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LBZU_OPCODE | rt(d) | d1(si16) | rta0mem(s1));}
+
+inline void Assembler::ld(   Register d, int si16,    Register s1) { emit_int32(LD_OPCODE  | rt(d) | ds(si16)   | ra0mem(s1));}
+inline void Assembler::ldx(  Register d, Register s1, Register s2) { emit_int32(LDX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::ldu(  Register d, int si16,    Register s1) { assert(d != s1, "according to ibm manual"); emit_int32(LDU_OPCODE | rt(d) | ds(si16) | rta0mem(s1));}
+
+//  PPC 1, section 3.3.3 Fixed-Point Store Instructions
+inline void Assembler::stwx( Register d, Register s1, Register s2) { emit_int32(STWX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::stw(  Register d, int si16,    Register s1) { emit_int32(STW_OPCODE  | rs(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::stwu( Register d, int si16,    Register s1) { emit_int32(STWU_OPCODE | rs(d) | d1(si16)   | rta0mem(s1));}
+
+inline void Assembler::sthx( Register d, Register s1, Register s2) { emit_int32(STHX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::sth(  Register d, int si16,    Register s1) { emit_int32(STH_OPCODE  | rs(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::sthu( Register d, int si16,    Register s1) { emit_int32(STHU_OPCODE | rs(d) | d1(si16)   | rta0mem(s1));}
+
+inline void Assembler::stbx( Register d, Register s1, Register s2) { emit_int32(STBX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::stb(  Register d, int si16,    Register s1) { emit_int32(STB_OPCODE  | rs(d) | d1(si16)   | ra0mem(s1));}
+inline void Assembler::stbu( Register d, int si16,    Register s1) { emit_int32(STBU_OPCODE | rs(d) | d1(si16)   | rta0mem(s1));}
+
+inline void Assembler::std(  Register d, int si16,    Register s1) { emit_int32(STD_OPCODE  | rs(d) | ds(si16)   | ra0mem(s1));}
+inline void Assembler::stdx( Register d, Register s1, Register s2) { emit_int32(STDX_OPCODE | rs(d) | ra0mem(s1) | rb(s2));}
+inline void Assembler::stdu( Register d, int si16,    Register s1) { emit_int32(STDU_OPCODE | rs(d) | ds(si16)   | rta0mem(s1));}
+inline void Assembler::stdux(Register s, Register a,  Register b)  { emit_int32(STDUX_OPCODE| rs(s) | rta0mem(a) | rb(b));}
+
+// PPC 1, section 3.3.13 Move To/From System Register Instructions
+inline void Assembler::mtlr( Register s1)         { emit_int32(MTLR_OPCODE  | rs(s1)); }
+inline void Assembler::mflr( Register d )         { emit_int32(MFLR_OPCODE  | rt(d)); }
+inline void Assembler::mtctr(Register s1)         { emit_int32(MTCTR_OPCODE | rs(s1)); }
+inline void Assembler::mfctr(Register d )         { emit_int32(MFCTR_OPCODE | rt(d)); }
+inline void Assembler::mtcrf(int afxm, Register s){ emit_int32(MTCRF_OPCODE | fxm(afxm) | rs(s)); }
+inline void Assembler::mfcr( Register d )         { emit_int32(MFCR_OPCODE  | rt(d)); }
+inline void Assembler::mcrf( ConditionRegister crd, ConditionRegister cra)
+                                                      { emit_int32(MCRF_OPCODE | bf(crd) | bfa(cra)); }
+inline void Assembler::mtcr( Register s)          { Assembler::mtcrf(0xff, s); }
+
+// SAP JVM 2006-02-13 PPC branch instruction.
+// PPC 1, section 2.4.1 Branch Instructions
+inline void Assembler::b( address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(0), rt); }
+inline void Assembler::b( Label& L)                           { b( target(L)); }
+inline void Assembler::bl(address a, relocInfo::relocType rt) { emit_data(BXX_OPCODE| li(disp( intptr_t(a), intptr_t(pc()))) |aa(0)|lk(1), rt); }
+inline void Assembler::bl(Label& L)                           { bl(target(L)); }
+inline void Assembler::bc( int boint, int biint, address a, relocInfo::relocType rt) { emit_data(BCXX_OPCODE| bo(boint) | bi(biint) | bd(disp( intptr_t(a), intptr_t(pc()))) | aa(0) | lk(0), rt); }
+inline void Assembler::bc( int boint, int biint, Label& L)                           { bc(boint, biint, target(L)); }
+inline void Assembler::bcl(int boint, int biint, address a, relocInfo::relocType rt) { emit_data(BCXX_OPCODE| bo(boint) | bi(biint) | bd(disp( intptr_t(a), intptr_t(pc()))) | aa(0)|lk(1)); }
+inline void Assembler::bcl(int boint, int biint, Label& L)                           { bcl(boint, biint, target(L)); }
+
+inline void Assembler::bclr(  int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCLR_OPCODE | bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); }
+inline void Assembler::bclrl( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCLR_OPCODE | bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); }
+inline void Assembler::bcctr( int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(0), rt); }
+inline void Assembler::bcctrl(int boint, int biint, int bhint, relocInfo::relocType rt) { emit_data(BCCTR_OPCODE| bo(boint) | bi(biint) | bh(bhint) | aa(0) | lk(1), rt); }
+
+// helper function for b
+inline bool Assembler::is_within_range_of_b(address a, address pc) {
+  // Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
+  if ((((uint64_t)a) & 0x3) != 0) return false;
+
+  const int range = 1 << (29-6); // li field is from bit 6 to bit 29.
+  int value = disp(intptr_t(a), intptr_t(pc));
+  bool result = -range <= value && value < range-1;
+#ifdef ASSERT
+  if (result) li(value); // Assert that value is in correct range.
+#endif
+  return result;
+}
+
+// helper functions for bcxx.
+inline bool Assembler::is_within_range_of_bcxx(address a, address pc) {
+  // Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
+  if ((((uint64_t)a) & 0x3) != 0) return false;
+
+  const int range = 1 << (29-16); // bd field is from bit 16 to bit 29.
+  int value = disp(intptr_t(a), intptr_t(pc));
+  bool result = -range <= value && value < range-1;
+#ifdef ASSERT
+  if (result) bd(value); // Assert that value is in correct range.
+#endif
+  return result;
+}
+
+// Get the destination of a bxx branch (b, bl, ba, bla).
+address  Assembler::bxx_destination(address baddr) { return bxx_destination(*(int*)baddr, baddr); }
+address  Assembler::bxx_destination(int instr, address pc) { return (address)bxx_destination_offset(instr, (intptr_t)pc); }
+intptr_t Assembler::bxx_destination_offset(int instr, intptr_t bxx_pos) {
+  intptr_t displ = inv_li_field(instr);
+  return bxx_pos + displ;
+}
+
+// Extended mnemonics for Branch Instructions
+inline void Assembler::blt(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, less), L); }
+inline void Assembler::bgt(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, greater), L); }
+inline void Assembler::beq(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, equal), L); }
+inline void Assembler::bso(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs1, bi0(crx, summary_overflow), L); }
+inline void Assembler::bge(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, less), L); }
+inline void Assembler::ble(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, greater), L); }
+inline void Assembler::bne(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, equal), L); }
+inline void Assembler::bns(ConditionRegister crx, Label& L) { Assembler::bc(bcondCRbiIs0, bi0(crx, summary_overflow), L); }
+
+// Branch instructions with static prediction hints.
+inline void Assembler::blt_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken,    bi0(crx, less), L); }
+inline void Assembler::bgt_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken,    bi0(crx, greater), L); }
+inline void Assembler::beq_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken,    bi0(crx, equal), L); }
+inline void Assembler::bso_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsTaken,    bi0(crx, summary_overflow), L); }
+inline void Assembler::bge_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken,    bi0(crx, less), L); }
+inline void Assembler::ble_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken,    bi0(crx, greater), L); }
+inline void Assembler::bne_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken,    bi0(crx, equal), L); }
+inline void Assembler::bns_predict_taken    (ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsTaken,    bi0(crx, summary_overflow), L); }
+inline void Assembler::blt_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, less), L); }
+inline void Assembler::bgt_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, greater), L); }
+inline void Assembler::beq_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, equal), L); }
+inline void Assembler::bso_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs1_bhintIsNotTaken, bi0(crx, summary_overflow), L); }
+inline void Assembler::bge_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, less), L); }
+inline void Assembler::ble_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, greater), L); }
+inline void Assembler::bne_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, equal), L); }
+inline void Assembler::bns_predict_not_taken(ConditionRegister crx, Label& L) { bc(bcondCRbiIs0_bhintIsNotTaken, bi0(crx, summary_overflow), L); }
+
+// For use in conjunction with testbitdi:
+inline void Assembler::btrue( ConditionRegister crx, Label& L) { Assembler::bne(crx, L); }
+inline void Assembler::bfalse(ConditionRegister crx, Label& L) { Assembler::beq(crx, L); }
+
+inline void Assembler::bltl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, less), L); }
+inline void Assembler::bgtl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, greater), L); }
+inline void Assembler::beql(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, equal), L); }
+inline void Assembler::bsol(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs1, bi0(crx, summary_overflow), L); }
+inline void Assembler::bgel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, less), L); }
+inline void Assembler::blel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, greater), L); }
+inline void Assembler::bnel(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, equal), L); }
+inline void Assembler::bnsl(ConditionRegister crx, Label& L) { Assembler::bcl(bcondCRbiIs0, bi0(crx, summary_overflow), L); }
+
+// Extended mnemonics for Branch Instructions via LR.
+// We use `blr' for returns.
+inline void Assembler::blr(relocInfo::relocType rt) { Assembler::bclr(bcondAlways, 0, bhintbhBCLRisReturn, rt); }
+
+// Extended mnemonics for Branch Instructions with CTR.
+// Bdnz means `decrement CTR and jump to L if CTR is not zero'.
+inline void Assembler::bdnz(Label& L) { Assembler::bc(16, 0, L); }
+// Decrement and branch if result is zero.
+inline void Assembler::bdz(Label& L)  { Assembler::bc(18, 0, L); }
+// We use `bctr[l]' for jumps/calls in function descriptor glue
+// code, e.g. for calls to runtime functions.
+inline void Assembler::bctr( relocInfo::relocType rt) { Assembler::bcctr(bcondAlways, 0, bhintbhBCCTRisNotReturnButSame, rt); }
+inline void Assembler::bctrl(relocInfo::relocType rt) { Assembler::bcctrl(bcondAlways, 0, bhintbhBCCTRisNotReturnButSame, rt); }
+// Conditional jumps/branches via CTR.
+inline void Assembler::beqctr( ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctr( bcondCRbiIs1, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
+inline void Assembler::beqctrl(ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctrl(bcondCRbiIs1, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
+inline void Assembler::bnectr( ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctr( bcondCRbiIs0, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
+inline void Assembler::bnectrl(ConditionRegister crx, relocInfo::relocType rt) { Assembler::bcctrl(bcondCRbiIs0, bi0(crx, equal), bhintbhBCCTRisNotReturnButSame, rt); }
+
+// condition register logic instructions
+inline void Assembler::crand( int d, int s1, int s2) { emit_int32(CRAND_OPCODE  | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::crnand(int d, int s1, int s2) { emit_int32(CRNAND_OPCODE | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::cror(  int d, int s1, int s2) { emit_int32(CROR_OPCODE   | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::crxor( int d, int s1, int s2) { emit_int32(CRXOR_OPCODE  | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::crnor( int d, int s1, int s2) { emit_int32(CRNOR_OPCODE  | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::creqv( int d, int s1, int s2) { emit_int32(CREQV_OPCODE  | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::crandc(int d, int s1, int s2) { emit_int32(CRANDC_OPCODE | bt(d) | ba(s1) | bb(s2)); }
+inline void Assembler::crorc( int d, int s1, int s2) { emit_int32(CRORC_OPCODE  | bt(d) | ba(s1) | bb(s2)); }
+
+// Conditional move (>= Power7)
+inline void Assembler::isel(Register d, ConditionRegister cr, Condition cc, bool inv, Register a, Register b) {
+  if (b == noreg) {
+    b = d; // Can be omitted if old value should be kept in "else" case.
+  }
+  Register first = a;
+  Register second = b;
+  if (inv) {
+    first = b;
+    second = a; // exchange
+  }
+  assert(first != R0, "r0 not allowed");
+  isel(d, first, second, bi0(cr, cc));
+}
+inline void Assembler::isel_0(Register d, ConditionRegister cr, Condition cc, Register b) {
+  if (b == noreg) {
+    b = d; // Can be omitted if old value should be kept in "else" case.
+  }
+  isel(d, R0, b, bi0(cr, cc));
+}
+
+// PPC 2, section 3.2.1 Instruction Cache Instructions
+inline void Assembler::icbi(    Register s1, Register s2)         { emit_int32( ICBI_OPCODE   | ra0mem(s1) | rb(s2)           ); }
+// PPC 2, section 3.2.2 Data Cache Instructions
+//inline void Assembler::dcba(  Register s1, Register s2)         { emit_int32( DCBA_OPCODE   | ra0mem(s1) | rb(s2)           ); }
+inline void Assembler::dcbz(    Register s1, Register s2)         { emit_int32( DCBZ_OPCODE   | ra0mem(s1) | rb(s2)           ); }
+inline void Assembler::dcbst(   Register s1, Register s2)         { emit_int32( DCBST_OPCODE  | ra0mem(s1) | rb(s2)           ); }
+inline void Assembler::dcbf(    Register s1, Register s2)         { emit_int32( DCBF_OPCODE   | ra0mem(s1) | rb(s2)           ); }
+// dcache read hint
+inline void Assembler::dcbt(    Register s1, Register s2)         { emit_int32( DCBT_OPCODE   | ra0mem(s1) | rb(s2)           ); }
+inline void Assembler::dcbtct(  Register s1, Register s2, int ct) { emit_int32( DCBT_OPCODE   | ra0mem(s1) | rb(s2) | thct(ct)); }
+inline void Assembler::dcbtds(  Register s1, Register s2, int ds) { emit_int32( DCBT_OPCODE   | ra0mem(s1) | rb(s2) | thds(ds)); }
+// dcache write hint
+inline void Assembler::dcbtst(  Register s1, Register s2)         { emit_int32( DCBTST_OPCODE | ra0mem(s1) | rb(s2)           ); }
+inline void Assembler::dcbtstct(Register s1, Register s2, int ct) { emit_int32( DCBTST_OPCODE | ra0mem(s1) | rb(s2) | thct(ct)); }
+
+// machine barrier instructions:
+inline void Assembler::sync(int a) { emit_int32( SYNC_OPCODE | l910(a)); }
+inline void Assembler::sync()      { Assembler::sync(0); }
+inline void Assembler::lwsync()    { Assembler::sync(1); }
+inline void Assembler::ptesync()   { Assembler::sync(2); }
+inline void Assembler::eieio()     { emit_int32( EIEIO_OPCODE); }
+inline void Assembler::isync()     { emit_int32( ISYNC_OPCODE); }
+inline void Assembler::elemental_membar(int e) { assert(0 < e && e < 16, "invalid encoding"); emit_int32( SYNC_OPCODE | e1215(e)); }
+
+// atomics
+// Use ra0mem to disallow R0 as base.
+inline void Assembler::lwarx_unchecked(Register d, Register a, Register b, int eh1)           { emit_int32( LWARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
+inline void Assembler::ldarx_unchecked(Register d, Register a, Register b, int eh1)           { emit_int32( LDARX_OPCODE | rt(d) | ra0mem(a) | rb(b) | eh(eh1)); }
+inline bool Assembler::lxarx_hint_exclusive_access()                                          { return VM_Version::has_lxarxeh(); }
+inline void Assembler::lwarx( Register d, Register a, Register b, bool hint_exclusive_access) { lwarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::ldarx( Register d, Register a, Register b, bool hint_exclusive_access) { ldarx_unchecked(d, a, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::stwcx_(Register s, Register a, Register b)                             { emit_int32( STWCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
+inline void Assembler::stdcx_(Register s, Register a, Register b)                             { emit_int32( STDCX_OPCODE | rs(s) | ra0mem(a) | rb(b) | rc(1)); }
+
+// Instructions for adjusting thread priority
+// for simultaneous multithreading (SMT) on POWER5.
+inline void Assembler::smt_prio_very_low()    { Assembler::or_unchecked(R31, R31, R31); }
+inline void Assembler::smt_prio_low()         { Assembler::or_unchecked(R1,  R1,  R1); }
+inline void Assembler::smt_prio_medium_low()  { Assembler::or_unchecked(R6,  R6,  R6); }
+inline void Assembler::smt_prio_medium()      { Assembler::or_unchecked(R2,  R2,  R2); }
+inline void Assembler::smt_prio_medium_high() { Assembler::or_unchecked(R5,  R5,  R5); }
+inline void Assembler::smt_prio_high()        { Assembler::or_unchecked(R3,  R3,  R3); }
+
+inline void Assembler::twi_0(Register a)      { twi_unchecked(0, a, 0);}
+
+// trap instructions
+inline void Assembler::tdi_unchecked(int tobits, Register a, int si16){                                     emit_int32( TDI_OPCODE | to(tobits) | ra(a) | si(si16)); }
+inline void Assembler::twi_unchecked(int tobits, Register a, int si16){                                     emit_int32( TWI_OPCODE | to(tobits) | ra(a) | si(si16)); }
+inline void Assembler::tdi(int tobits, Register a, int si16)          { assert(UseSIGTRAP, "precondition"); tdi_unchecked(tobits, a, si16);                      }
+inline void Assembler::twi(int tobits, Register a, int si16)          { assert(UseSIGTRAP, "precondition"); twi_unchecked(tobits, a, si16);                      }
+inline void Assembler::td( int tobits, Register a, Register b)        { assert(UseSIGTRAP, "precondition"); emit_int32( TD_OPCODE  | to(tobits) | ra(a) | rb(b)); }
+inline void Assembler::tw( int tobits, Register a, Register b)        { assert(UseSIGTRAP, "precondition"); emit_int32( TW_OPCODE  | to(tobits) | ra(a) | rb(b)); }
+
+// FLOATING POINT instructions ppc.
+// PPC 1, section 4.6.2 Floating-Point Load Instructions
+// Use ra0mem instead of ra in some instructions below.
+inline void Assembler::lfs( FloatRegister d, int si16, Register a)   { emit_int32( LFS_OPCODE  | frt(d) | ra0mem(a) | simm(si16,16)); }
+inline void Assembler::lfsu(FloatRegister d, int si16, Register a)   { emit_int32( LFSU_OPCODE | frt(d) | ra(a)     | simm(si16,16)); }
+inline void Assembler::lfsx(FloatRegister d, Register a, Register b) { emit_int32( LFSX_OPCODE | frt(d) | ra0mem(a) | rb(b)); }
+inline void Assembler::lfd( FloatRegister d, int si16, Register a)   { emit_int32( LFD_OPCODE  | frt(d) | ra0mem(a) | simm(si16,16)); }
+inline void Assembler::lfdu(FloatRegister d, int si16, Register a)   { emit_int32( LFDU_OPCODE | frt(d) | ra(a)     | simm(si16,16)); }
+inline void Assembler::lfdx(FloatRegister d, Register a, Register b) { emit_int32( LFDX_OPCODE | frt(d) | ra0mem(a) | rb(b)); }
+
+// PPC 1, section 4.6.3 Floating-Point Store Instructions
+// Use ra0mem instead of ra in some instructions below.
+inline void Assembler::stfs( FloatRegister s, int si16, Register a)  { emit_int32( STFS_OPCODE  | frs(s) | ra0mem(a) | simm(si16,16)); }
+inline void Assembler::stfsu(FloatRegister s, int si16, Register a)  { emit_int32( STFSU_OPCODE | frs(s) | ra(a)     | simm(si16,16)); }
+inline void Assembler::stfsx(FloatRegister s, Register a, Register b){ emit_int32( STFSX_OPCODE | frs(s) | ra0mem(a) | rb(b)); }
+inline void Assembler::stfd( FloatRegister s, int si16, Register a)  { emit_int32( STFD_OPCODE  | frs(s) | ra0mem(a) | simm(si16,16)); }
+inline void Assembler::stfdu(FloatRegister s, int si16, Register a)  { emit_int32( STFDU_OPCODE | frs(s) | ra(a)     | simm(si16,16)); }
+inline void Assembler::stfdx(FloatRegister s, Register a, Register b){ emit_int32( STFDX_OPCODE | frs(s) | ra0mem(a) | rb(b)); }
+
+// PPC 1, section 4.6.4 Floating-Point Move Instructions
+inline void Assembler::fmr( FloatRegister d, FloatRegister b) { emit_int32( FMR_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fmr_(FloatRegister d, FloatRegister b) { emit_int32( FMR_OPCODE | frt(d) | frb(b) | rc(1)); }
+
+// These are special Power6 opcodes, reused for "lfdepx" and "stfdepx"
+// on Power7.  Do not use.
+//inline void Assembler::mffgpr( FloatRegister d, Register b)   { emit_int32( MFFGPR_OPCODE | frt(d) | rb(b) | rc(0)); }
+//inline void Assembler::mftgpr( Register d, FloatRegister b)   { emit_int32( MFTGPR_OPCODE | rt(d) | frb(b) | rc(0)); }
+// add cmpb and popcntb to detect ppc power version.
+inline void Assembler::cmpb(   Register a, Register s, Register b) { emit_int32( CMPB_OPCODE    | rta(a) | rs(s) | rb(b) | rc(0)); }
+inline void Assembler::popcntb(Register a, Register s)             { emit_int32( POPCNTB_OPCODE | rta(a) | rs(s)); };
+inline void Assembler::popcntw(Register a, Register s)             { emit_int32( POPCNTW_OPCODE | rta(a) | rs(s)); };
+inline void Assembler::popcntd(Register a, Register s)             { emit_int32( POPCNTD_OPCODE | rta(a) | rs(s)); };
+
+inline void Assembler::fneg(  FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fneg_( FloatRegister d, FloatRegister b) { emit_int32( FNEG_OPCODE  | frt(d) | frb(b) | rc(1)); }
+inline void Assembler::fabs(  FloatRegister d, FloatRegister b) { emit_int32( FABS_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fabs_( FloatRegister d, FloatRegister b) { emit_int32( FABS_OPCODE  | frt(d) | frb(b) | rc(1)); }
+inline void Assembler::fnabs( FloatRegister d, FloatRegister b) { emit_int32( FNABS_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fnabs_(FloatRegister d, FloatRegister b) { emit_int32( FNABS_OPCODE | frt(d) | frb(b) | rc(1)); }
+
+// PPC 1, section 4.6.5.1 Floating-Point Elementary Arithmetic Instructions
+inline void Assembler::fadd(  FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADD_OPCODE  | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fadd_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADD_OPCODE  | frt(d) | fra(a) | frb(b) | rc(1)); }
+inline void Assembler::fadds( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADDS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fadds_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FADDS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
+inline void Assembler::fsub(  FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUB_OPCODE  | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fsub_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUB_OPCODE  | frt(d) | fra(a) | frb(b) | rc(1)); }
+inline void Assembler::fsubs( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUBS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fsubs_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FSUBS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
+inline void Assembler::fmul(  FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMUL_OPCODE  | frt(d) | fra(a) | frc(c) | rc(0)); }
+inline void Assembler::fmul_( FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMUL_OPCODE  | frt(d) | fra(a) | frc(c) | rc(1)); }
+inline void Assembler::fmuls( FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMULS_OPCODE | frt(d) | fra(a) | frc(c) | rc(0)); }
+inline void Assembler::fmuls_(FloatRegister d, FloatRegister a, FloatRegister c) { emit_int32( FMULS_OPCODE | frt(d) | fra(a) | frc(c) | rc(1)); }
+inline void Assembler::fdiv(  FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIV_OPCODE  | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fdiv_( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIV_OPCODE  | frt(d) | fra(a) | frb(b) | rc(1)); }
+inline void Assembler::fdivs( FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIVS_OPCODE | frt(d) | fra(a) | frb(b) | rc(0)); }
+inline void Assembler::fdivs_(FloatRegister d, FloatRegister a, FloatRegister b) { emit_int32( FDIVS_OPCODE | frt(d) | fra(a) | frb(b) | rc(1)); }
+
+// PPC 1, section 4.6.6 Floating-Point Rounding and Conversion Instructions
+inline void Assembler::frsp(  FloatRegister d, FloatRegister b) { emit_int32( FRSP_OPCODE   | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fctid( FloatRegister d, FloatRegister b) { emit_int32( FCTID_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fctidz(FloatRegister d, FloatRegister b) { emit_int32( FCTIDZ_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fctiw( FloatRegister d, FloatRegister b) { emit_int32( FCTIW_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fctiwz(FloatRegister d, FloatRegister b) { emit_int32( FCTIWZ_OPCODE | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fcfid( FloatRegister d, FloatRegister b) { emit_int32( FCFID_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fcfids(FloatRegister d, FloatRegister b) { emit_int32( FCFIDS_OPCODE | frt(d) | frb(b) | rc(0)); }
+
+// PPC 1, section 4.6.7 Floating-Point Compare Instructions
+inline void Assembler::fcmpu( ConditionRegister crx, FloatRegister a, FloatRegister b) { emit_int32( FCMPU_OPCODE | bf(crx) | fra(a) | frb(b)); }
+
+// PPC 1, section 5.2.1 Floating-Point Arithmetic Instructions
+inline void Assembler::fsqrt( FloatRegister d, FloatRegister b) { emit_int32( FSQRT_OPCODE  | frt(d) | frb(b) | rc(0)); }
+inline void Assembler::fsqrts(FloatRegister d, FloatRegister b) { emit_int32( FSQRTS_OPCODE | frt(d) | frb(b) | rc(0)); }
+
+// Vector instructions for >= Power6.
+inline void Assembler::lvebx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEBX_OPCODE  | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvehx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEHX_OPCODE  | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvewx( VectorRegister d, Register s1, Register s2) { emit_int32( LVEWX_OPCODE  | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvx(   VectorRegister d, Register s1, Register s2) { emit_int32( LVX_OPCODE    | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvxl(  VectorRegister d, Register s1, Register s2) { emit_int32( LVXL_OPCODE   | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stvebx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEBX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stvehx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEHX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stvewx(VectorRegister d, Register s1, Register s2) { emit_int32( STVEWX_OPCODE | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stvx(  VectorRegister d, Register s1, Register s2) { emit_int32( STVX_OPCODE   | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::stvxl( VectorRegister d, Register s1, Register s2) { emit_int32( STVXL_OPCODE  | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvsl(  VectorRegister d, Register s1, Register s2) { emit_int32( LVSL_OPCODE   | vrt(d) | ra0mem(s1) | rb(s2)); }
+inline void Assembler::lvsr(  VectorRegister d, Register s1, Register s2) { emit_int32( LVSR_OPCODE   | vrt(d) | ra0mem(s1) | rb(s2)); }
+
+inline void Assembler::vpkpx(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKPX_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkshss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkswss( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSWSS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkshus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSHUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkswus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKSWUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkuhum( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUHUM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkuwum( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUWUM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkuhus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUHUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vpkuwus( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VPKUWUS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vupkhpx( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKHPX_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vupkhsb( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKHSB_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vupkhsh( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKHSH_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vupklpx( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKLPX_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vupklsb( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKLSB_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vupklsh( VectorRegister d, VectorRegister b)                   { emit_int32( VUPKLSH_OPCODE | vrt(d) | vrb(b)); }
+inline void Assembler::vmrghb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHB_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmrghw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHW_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmrghh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGHH_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmrglb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLB_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmrglw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLW_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmrglh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMRGLH_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsplt(   VectorRegister d, int ui4,          VectorRegister b) { emit_int32( VSPLT_OPCODE   | vrt(d) | vsplt_uim(uimm(ui4,4)) | vrb(b)); }
+inline void Assembler::vsplth(  VectorRegister d, int ui3,          VectorRegister b) { emit_int32( VSPLTH_OPCODE  | vrt(d) | vsplt_uim(uimm(ui3,3)) | vrb(b)); }
+inline void Assembler::vspltw(  VectorRegister d, int ui2,          VectorRegister b) { emit_int32( VSPLTW_OPCODE  | vrt(d) | vsplt_uim(uimm(ui2,2)) | vrb(b)); }
+inline void Assembler::vspltisb(VectorRegister d, int si5)                            { emit_int32( VSPLTISB_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
+inline void Assembler::vspltish(VectorRegister d, int si5)                            { emit_int32( VSPLTISH_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
+inline void Assembler::vspltisw(VectorRegister d, int si5)                            { emit_int32( VSPLTISW_OPCODE| vrt(d) | vsplti_sim(simm(si5,5))); }
+inline void Assembler::vperm(   VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VPERM_OPCODE | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
+inline void Assembler::vsel(    VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c){ emit_int32( VSEL_OPCODE  | vrt(d) | vra(a) | vrb(b) | vrc(c)); }
+inline void Assembler::vsl(     VectorRegister d, VectorRegister a, VectorRegister b)                  { emit_int32( VSL_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsldoi(  VectorRegister d, VectorRegister a, VectorRegister b, int si4)         { emit_int32( VSLDOI_OPCODE| vrt(d) | vra(a) | vrb(b) | vsldoi_shb(simm(si4,4))); }
+inline void Assembler::vslo(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLO_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsr(     VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSR_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsro(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRO_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddcuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDCUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddshs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddsbs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDSWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddubm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vadduwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vadduhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vaddubs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vadduws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vadduhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VADDUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubcuw( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBCUW_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubshs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubsbs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBSWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsububm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUBM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubuwm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUWM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubuhm( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUHM_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsububs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubuws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsubuhs( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUBUHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmulesb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULESB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmuleub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULEUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmulesh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULESH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmuleuh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULEUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmulosb( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOSB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmuloub( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOUB_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmulosh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOSH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmulouh( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMULOUH_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmhaddshs(VectorRegister d,VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMHADDSHS_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmhraddshs(VectorRegister d,VectorRegister a,VectorRegister b, VectorRegister c) { emit_int32( VMHRADDSHS_OPCODE| vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmladduhm(VectorRegister d,VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMLADDUHM_OPCODE | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsubuhm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUBUHM_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsummbm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMMBM_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsumshm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMSHM_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsumshs(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMSHS_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsumuhm(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMUHM_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vmsumuhs(VectorRegister d, VectorRegister a, VectorRegister b, VectorRegister c) { emit_int32( VMSUMUHS_OPCODE  | vrt(d) | vra(a) | vrb(b)| vrc(c)); }
+inline void Assembler::vsumsws( VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUMSWS_OPCODE  | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsum2sws(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM2SWS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsum4sbs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4SBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsum4ubs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4UBS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsum4shs(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSUM4SHS_OPCODE | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavgsb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavgsw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavgsh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGSH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavgub(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavguw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vavguh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAVGUH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxsb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxsw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxsh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXSH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxub(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxuw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vmaxuh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMAXUH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminsb(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminsw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminsh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINSH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminub(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUB_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminuw(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUW_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vminuh(  VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VMINUH_OPCODE   | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vcmpequb(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpequh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpequw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtsh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtsb(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtsw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtub(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtuh(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpgtuw(VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(0)); }
+inline void Assembler::vcmpequb_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpequh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpequw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPEQUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtsh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtsb_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtsw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTSW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtub_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUB_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtuh_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUH_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vcmpgtuw_(VectorRegister d,VectorRegister a, VectorRegister b) { emit_int32( VCMPGTUW_OPCODE | vrt(d) | vra(a) | vrb(b) | vcmp_rc(1)); }
+inline void Assembler::vand(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VAND_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vandc(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VANDC_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vnor(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VNOR_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vor(     VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VOR_OPCODE      | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vxor(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VXOR_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vrlb(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLB_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vrlw(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLW_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vrlh(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VRLH_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vslb(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLB_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vskw(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSKW_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vslh(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSLH_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsrb(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRB_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsrw(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRW_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsrh(    VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRH_OPCODE     | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsrab(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAB_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsraw(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAW_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::vsrah(   VectorRegister d, VectorRegister a, VectorRegister b) { emit_int32( VSRAH_OPCODE    | vrt(d) | vra(a) | vrb(b)); }
+inline void Assembler::mtvscr(  VectorRegister b)                                     { emit_int32( MTVSCR_OPCODE   | vrb(b)); }
+inline void Assembler::mfvscr(  VectorRegister d)                                     { emit_int32( MFVSCR_OPCODE   | vrt(d)); }
+
+// ra0 version
+inline void Assembler::lwzx( Register d, Register s2) { emit_int32( LWZX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::lwz(  Register d, int si16   ) { emit_int32( LWZ_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::lwax( Register d, Register s2) { emit_int32( LWAX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::lwa(  Register d, int si16   ) { emit_int32( LWA_OPCODE  | rt(d) | ds(si16));}
+inline void Assembler::lhzx( Register d, Register s2) { emit_int32( LHZX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::lhz(  Register d, int si16   ) { emit_int32( LHZ_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::lhax( Register d, Register s2) { emit_int32( LHAX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::lha(  Register d, int si16   ) { emit_int32( LHA_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::lbzx( Register d, Register s2) { emit_int32( LBZX_OPCODE | rt(d) | rb(s2));}
+inline void Assembler::lbz(  Register d, int si16   ) { emit_int32( LBZ_OPCODE  | rt(d) | d1(si16));}
+inline void Assembler::ld(   Register d, int si16   ) { emit_int32( LD_OPCODE   | rt(d) | ds(si16));}
+inline void Assembler::ldx(  Register d, Register s2) { emit_int32( LDX_OPCODE  | rt(d) | rb(s2));}
+inline void Assembler::stwx( Register d, Register s2) { emit_int32( STWX_OPCODE | rs(d) | rb(s2));}
+inline void Assembler::stw(  Register d, int si16   ) { emit_int32( STW_OPCODE  | rs(d) | d1(si16));}
+inline void Assembler::sthx( Register d, Register s2) { emit_int32( STHX_OPCODE | rs(d) | rb(s2));}
+inline void Assembler::sth(  Register d, int si16   ) { emit_int32( STH_OPCODE  | rs(d) | d1(si16));}
+inline void Assembler::stbx( Register d, Register s2) { emit_int32( STBX_OPCODE | rs(d) | rb(s2));}
+inline void Assembler::stb(  Register d, int si16   ) { emit_int32( STB_OPCODE  | rs(d) | d1(si16));}
+inline void Assembler::std(  Register d, int si16   ) { emit_int32( STD_OPCODE  | rs(d) | ds(si16));}
+inline void Assembler::stdx( Register d, Register s2) { emit_int32( STDX_OPCODE | rs(d) | rb(s2));}
+
+// ra0 version
+inline void Assembler::icbi(    Register s2)          { emit_int32( ICBI_OPCODE   | rb(s2)           ); }
+//inline void Assembler::dcba(  Register s2)          { emit_int32( DCBA_OPCODE   | rb(s2)           ); }
+inline void Assembler::dcbz(    Register s2)          { emit_int32( DCBZ_OPCODE   | rb(s2)           ); }
+inline void Assembler::dcbst(   Register s2)          { emit_int32( DCBST_OPCODE  | rb(s2)           ); }
+inline void Assembler::dcbf(    Register s2)          { emit_int32( DCBF_OPCODE   | rb(s2)           ); }
+inline void Assembler::dcbt(    Register s2)          { emit_int32( DCBT_OPCODE   | rb(s2)           ); }
+inline void Assembler::dcbtct(  Register s2, int ct)  { emit_int32( DCBT_OPCODE   | rb(s2) | thct(ct)); }
+inline void Assembler::dcbtds(  Register s2, int ds)  { emit_int32( DCBT_OPCODE   | rb(s2) | thds(ds)); }
+inline void Assembler::dcbtst(  Register s2)          { emit_int32( DCBTST_OPCODE | rb(s2)           ); }
+inline void Assembler::dcbtstct(Register s2, int ct)  { emit_int32( DCBTST_OPCODE | rb(s2) | thct(ct)); }
+
+// ra0 version
+inline void Assembler::lwarx_unchecked(Register d, Register b, int eh1)          { emit_int32( LWARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
+inline void Assembler::ldarx_unchecked(Register d, Register b, int eh1)          { emit_int32( LDARX_OPCODE | rt(d) | rb(b) | eh(eh1)); }
+inline void Assembler::lwarx( Register d, Register b, bool hint_exclusive_access){ lwarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::ldarx( Register d, Register b, bool hint_exclusive_access){ ldarx_unchecked(d, b, (hint_exclusive_access && lxarx_hint_exclusive_access() && UseExtendedLoadAndReserveInstructionsPPC64) ? 1 : 0); }
+inline void Assembler::stwcx_(Register s, Register b)                            { emit_int32( STWCX_OPCODE | rs(s) | rb(b) | rc(1)); }
+inline void Assembler::stdcx_(Register s, Register b)                            { emit_int32( STDCX_OPCODE | rs(s) | rb(b) | rc(1)); }
+
+// ra0 version
+inline void Assembler::lfs( FloatRegister d, int si16)   { emit_int32( LFS_OPCODE  | frt(d) | simm(si16,16)); }
+inline void Assembler::lfsx(FloatRegister d, Register b) { emit_int32( LFSX_OPCODE | frt(d) | rb(b)); }
+inline void Assembler::lfd( FloatRegister d, int si16)   { emit_int32( LFD_OPCODE  | frt(d) | simm(si16,16)); }
+inline void Assembler::lfdx(FloatRegister d, Register b) { emit_int32( LFDX_OPCODE | frt(d) | rb(b)); }
+
+// ra0 version
+inline void Assembler::stfs( FloatRegister s, int si16)   { emit_int32( STFS_OPCODE  | frs(s) | simm(si16, 16)); }
+inline void Assembler::stfsx(FloatRegister s, Register b) { emit_int32( STFSX_OPCODE | frs(s) | rb(b)); }
+inline void Assembler::stfd( FloatRegister s, int si16)   { emit_int32( STFD_OPCODE  | frs(s) | simm(si16, 16)); }
+inline void Assembler::stfdx(FloatRegister s, Register b) { emit_int32( STFDX_OPCODE | frs(s) | rb(b)); }
+
+// ra0 version
+inline void Assembler::lvebx( VectorRegister d, Register s2) { emit_int32( LVEBX_OPCODE  | vrt(d) | rb(s2)); }
+inline void Assembler::lvehx( VectorRegister d, Register s2) { emit_int32( LVEHX_OPCODE  | vrt(d) | rb(s2)); }
+inline void Assembler::lvewx( VectorRegister d, Register s2) { emit_int32( LVEWX_OPCODE  | vrt(d) | rb(s2)); }
+inline void Assembler::lvx(   VectorRegister d, Register s2) { emit_int32( LVX_OPCODE    | vrt(d) | rb(s2)); }
+inline void Assembler::lvxl(  VectorRegister d, Register s2) { emit_int32( LVXL_OPCODE   | vrt(d) | rb(s2)); }
+inline void Assembler::stvebx(VectorRegister d, Register s2) { emit_int32( STVEBX_OPCODE | vrt(d) | rb(s2)); }
+inline void Assembler::stvehx(VectorRegister d, Register s2) { emit_int32( STVEHX_OPCODE | vrt(d) | rb(s2)); }
+inline void Assembler::stvewx(VectorRegister d, Register s2) { emit_int32( STVEWX_OPCODE | vrt(d) | rb(s2)); }
+inline void Assembler::stvx(  VectorRegister d, Register s2) { emit_int32( STVX_OPCODE   | vrt(d) | rb(s2)); }
+inline void Assembler::stvxl( VectorRegister d, Register s2) { emit_int32( STVXL_OPCODE  | vrt(d) | rb(s2)); }
+inline void Assembler::lvsl(  VectorRegister d, Register s2) { emit_int32( LVSL_OPCODE   | vrt(d) | rb(s2)); }
+inline void Assembler::lvsr(  VectorRegister d, Register s2) { emit_int32( LVSR_OPCODE   | vrt(d) | rb(s2)); }
+
+inline void Assembler::load_const(Register d, void* x, Register tmp) {
+   load_const(d, (long)x, tmp);
+}
+
+// Load a 64 bit constant encoded by a `Label'. This works for bound
+// labels as well as unbound ones. For unbound labels, the code will
+// be patched as soon as the label gets bound.
+inline void Assembler::load_const(Register d, Label& L, Register tmp) {
+  load_const(d, target(L), tmp);
+}
+
+// Load a 64 bit constant encoded by an AddressLiteral. patchable.
+inline void Assembler::load_const(Register d, AddressLiteral& a, Register tmp) {
+  assert(d != R0, "R0 not allowed");
+  // First relocate (we don't change the offset in the RelocationHolder,
+  // just pass a.rspec()), then delegate to load_const(Register, long).
+  relocate(a.rspec());
+  load_const(d, (long)a.value(), tmp);
+}
+
+
+#endif // CPU_PPC_VM_ASSEMBLER_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/bytecodeInterpreter_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_BYTECODEINTERPRETER_PPC_HPP
+#define CPU_PPC_VM_BYTECODEINTERPRETER_PPC_HPP
+
+// Platform specific for C++ based Interpreter
+#define LOTS_OF_REGS    /* Lets interpreter use plenty of registers */
+
+private:
+
+    // Save the bottom of the stack after frame manager setup. For ease of restoration after return
+    // from recursive interpreter call.
+    intptr_t* _frame_bottom;              // Saved bottom of frame manager frame.
+    address   _last_Java_pc;              // Pc to return to in frame manager.
+    intptr_t* _last_Java_fp;              // frame pointer
+    intptr_t* _last_Java_sp;              // stack pointer
+    interpreterState _self_link;          // Previous interpreter state  // sometimes points to self???
+    double    _native_fresult;            // Save result of native calls that might return floats.
+    intptr_t  _native_lresult;            // Save result of native calls that might return handle/longs.
+
+public:
+    address last_Java_pc(void)            { return _last_Java_pc; }
+    intptr_t* last_Java_fp(void)          { return _last_Java_fp; }
+
+    static ByteSize native_lresult_offset() {
+      return byte_offset_of(BytecodeInterpreter, _native_lresult);
+    }
+
+    static ByteSize native_fresult_offset() {
+      return byte_offset_of(BytecodeInterpreter, _native_fresult);
+    }
+
+    static void pd_layout_interpreterState(interpreterState istate, address last_Java_pc, intptr_t* last_Java_fp);
+
+#define SET_LAST_JAVA_FRAME()   THREAD->frame_anchor()->set(istate->_last_Java_sp, istate->_last_Java_pc);
+#define RESET_LAST_JAVA_FRAME() THREAD->frame_anchor()->clear();
+
+
+// Macros for accessing the stack.
+#undef STACK_INT
+#undef STACK_FLOAT
+#undef STACK_ADDR
+#undef STACK_OBJECT
+#undef STACK_DOUBLE
+#undef STACK_LONG
+
+// JavaStack Implementation
+#define STACK_SLOT(offset)    ((address) &topOfStack[-(offset)])
+#define STACK_INT(offset)     (*((jint*) &topOfStack[-(offset)]))
+#define STACK_FLOAT(offset)   (*((jfloat *) &topOfStack[-(offset)]))
+#define STACK_OBJECT(offset)  (*((oop *) &topOfStack [-(offset)]))
+#define STACK_DOUBLE(offset)  (((VMJavaVal64*) &topOfStack[-(offset)])->d)
+#define STACK_LONG(offset)    (((VMJavaVal64 *) &topOfStack[-(offset)])->l)
+
+#define SET_STACK_SLOT(value, offset)   (*(intptr_t*)&topOfStack[-(offset)] = *(intptr_t*)(value))
+#define SET_STACK_ADDR(value, offset)   (*((address *)&topOfStack[-(offset)]) = (value))
+#define SET_STACK_INT(value, offset)    (*((jint *)&topOfStack[-(offset)]) = (value))
+#define SET_STACK_FLOAT(value, offset)  (*((jfloat *)&topOfStack[-(offset)]) = (value))
+#define SET_STACK_OBJECT(value, offset) (*((oop *)&topOfStack[-(offset)]) = (value))
+#define SET_STACK_DOUBLE(value, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d = (value))
+#define SET_STACK_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&topOfStack[-(offset)])->d =  \
+                                                 ((VMJavaVal64*)(addr))->d)
+#define SET_STACK_LONG(value, offset)   (((VMJavaVal64*)&topOfStack[-(offset)])->l = (value))
+#define SET_STACK_LONG_FROM_ADDR(addr, offset)   (((VMJavaVal64*)&topOfStack[-(offset)])->l =  \
+                                                 ((VMJavaVal64*)(addr))->l)
+// JavaLocals implementation
+
+#define LOCALS_SLOT(offset)    ((intptr_t*)&locals[-(offset)])
+#define LOCALS_ADDR(offset)    ((address)locals[-(offset)])
+#define LOCALS_INT(offset)     (*(jint*)&(locals[-(offset)]))
+#define LOCALS_OBJECT(offset)  (cast_to_oop(locals[-(offset)]))
+#define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
+#define LOCALS_DOUBLE_AT(offset) (((address)&locals[-((offset) + 1)]))
+
+#define SET_LOCALS_SLOT(value, offset)    (*(intptr_t*)&locals[-(offset)] = *(intptr_t *)(value))
+#define SET_LOCALS_INT(value, offset)     (*((jint *)&locals[-(offset)]) = (value))
+#define SET_LOCALS_DOUBLE(value, offset)  (((VMJavaVal64*)&locals[-((offset)+1)])->d = (value))
+#define SET_LOCALS_LONG(value, offset)    (((VMJavaVal64*)&locals[-((offset)+1)])->l = (value))
+#define SET_LOCALS_DOUBLE_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->d = \
+                                                  ((VMJavaVal64*)(addr))->d)
+
+
+#endif // CPU_PPC_VM_BYTECODEINTERPRETER_PPC_PP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/bytecodeInterpreter_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
+#define CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
+
+#ifdef CC_INTERP
+
+// Inline interpreter functions for ppc.
+
+#include <math.h>
+
+inline jfloat BytecodeInterpreter::VMfloatAdd(jfloat op1, jfloat op2) { return op1 + op2; }
+inline jfloat BytecodeInterpreter::VMfloatSub(jfloat op1, jfloat op2) { return op1 - op2; }
+inline jfloat BytecodeInterpreter::VMfloatMul(jfloat op1, jfloat op2) { return op1 * op2; }
+inline jfloat BytecodeInterpreter::VMfloatDiv(jfloat op1, jfloat op2) { return op1 / op2; }
+inline jfloat BytecodeInterpreter::VMfloatRem(jfloat op1, jfloat op2) { return (jfloat)fmod((double)op1, (double)op2); }
+
+inline jfloat BytecodeInterpreter::VMfloatNeg(jfloat op) { return -op; }
+
+inline int32_t BytecodeInterpreter::VMfloatCompare(jfloat op1, jfloat op2, int32_t direction) {
+  return ( op1 < op2 ? -1 :
+               op1 > op2 ? 1 :
+                   op1 == op2 ? 0 :
+                       (direction == -1 || direction == 1) ? direction : 0);
+
+}
+
+inline void BytecodeInterpreter::VMmemCopy64(uint32_t to[2], const uint32_t from[2]) {
+  to[0] = from[0]; to[1] = from[1];
+}
+
+// The long operations depend on compiler support for "long long" on ppc.
+
+inline jlong BytecodeInterpreter::VMlongAdd(jlong op1, jlong op2) {
+  return op1 + op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongAnd(jlong op1, jlong op2) {
+  return op1 & op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongDiv(jlong op1, jlong op2) {
+  if (op1 == min_jlong && op2 == -1) return op1;
+  return op1 / op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongMul(jlong op1, jlong op2) {
+  return op1 * op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongOr(jlong op1, jlong op2) {
+  return op1 | op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongSub(jlong op1, jlong op2) {
+  return op1 - op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongXor(jlong op1, jlong op2) {
+  return op1 ^ op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongRem(jlong op1, jlong op2) {
+  if (op1 == min_jlong && op2 == -1) return 0;
+  return op1 % op2;
+}
+
+inline jlong BytecodeInterpreter::VMlongUshr(jlong op1, jint op2) {
+  return ((uint64_t) op1) >> (op2 & 0x3F);
+}
+
+inline jlong BytecodeInterpreter::VMlongShr(jlong op1, jint op2) {
+  return op1 >> (op2 & 0x3F);
+}
+
+inline jlong BytecodeInterpreter::VMlongShl(jlong op1, jint op2) {
+  return op1 << (op2 & 0x3F);
+}
+
+inline jlong BytecodeInterpreter::VMlongNeg(jlong op) {
+  return -op;
+}
+
+inline jlong BytecodeInterpreter::VMlongNot(jlong op) {
+  return ~op;
+}
+
+inline int32_t BytecodeInterpreter::VMlongLtz(jlong op) {
+  return (op <= 0);
+}
+
+inline int32_t BytecodeInterpreter::VMlongGez(jlong op) {
+  return (op >= 0);
+}
+
+inline int32_t BytecodeInterpreter::VMlongEqz(jlong op) {
+  return (op == 0);
+}
+
+inline int32_t BytecodeInterpreter::VMlongEq(jlong op1, jlong op2) {
+  return (op1 == op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongNe(jlong op1, jlong op2) {
+  return (op1 != op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongGe(jlong op1, jlong op2) {
+  return (op1 >= op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongLe(jlong op1, jlong op2) {
+  return (op1 <= op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongLt(jlong op1, jlong op2) {
+  return (op1 < op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongGt(jlong op1, jlong op2) {
+  return (op1 > op2);
+}
+
+inline int32_t BytecodeInterpreter::VMlongCompare(jlong op1, jlong op2) {
+  return (VMlongLt(op1, op2) ? -1 : VMlongGt(op1, op2) ? 1 : 0);
+}
+
+// Long conversions
+
+inline jdouble BytecodeInterpreter::VMlong2Double(jlong val) {
+  return (jdouble) val;
+}
+
+inline jfloat BytecodeInterpreter::VMlong2Float(jlong val) {
+  return (jfloat) val;
+}
+
+inline jint BytecodeInterpreter::VMlong2Int(jlong val) {
+  return (jint) val;
+}
+
+// Double Arithmetic
+
+inline jdouble BytecodeInterpreter::VMdoubleAdd(jdouble op1, jdouble op2) {
+  return op1 + op2;
+}
+
+inline jdouble BytecodeInterpreter::VMdoubleDiv(jdouble op1, jdouble op2) {
+  return op1 / op2;
+}
+
+inline jdouble BytecodeInterpreter::VMdoubleMul(jdouble op1, jdouble op2) {
+  return op1 * op2;
+}
+
+inline jdouble BytecodeInterpreter::VMdoubleNeg(jdouble op) {
+  return -op;
+}
+
+inline jdouble BytecodeInterpreter::VMdoubleRem(jdouble op1, jdouble op2) {
+  return fmod(op1, op2);
+}
+
+inline jdouble BytecodeInterpreter::VMdoubleSub(jdouble op1, jdouble op2) {
+  return op1 - op2;
+}
+
+inline int32_t BytecodeInterpreter::VMdoubleCompare(jdouble op1, jdouble op2, int32_t direction) {
+  return ( op1 < op2 ? -1 :
+               op1 > op2 ? 1 :
+                   op1 == op2 ? 0 :
+                       (direction == -1 || direction == 1) ? direction : 0);
+}
+
+// Double Conversions
+
+inline jfloat BytecodeInterpreter::VMdouble2Float(jdouble val) {
+  return (jfloat) val;
+}
+
+// Float Conversions
+
+inline jdouble BytecodeInterpreter::VMfloat2Double(jfloat op) {
+  return (jdouble) op;
+}
+
+// Integer Arithmetic
+
+inline jint BytecodeInterpreter::VMintAdd(jint op1, jint op2) {
+  return op1 + op2;
+}
+
+inline jint BytecodeInterpreter::VMintAnd(jint op1, jint op2) {
+  return op1 & op2;
+}
+
+inline jint BytecodeInterpreter::VMintDiv(jint op1, jint op2) {
+  /* it's possible we could catch this special case implicitly */
+  if ((juint)op1 == 0x80000000 && op2 == -1) return op1;
+  else return op1 / op2;
+}
+
+inline jint BytecodeInterpreter::VMintMul(jint op1, jint op2) {
+  return op1 * op2;
+}
+
+inline jint BytecodeInterpreter::VMintNeg(jint op) {
+  return -op;
+}
+
+inline jint BytecodeInterpreter::VMintOr(jint op1, jint op2) {
+  return op1 | op2;
+}
+
+inline jint BytecodeInterpreter::VMintRem(jint op1, jint op2) {
+  /* it's possible we could catch this special case implicitly */
+  if ((juint)op1 == 0x80000000 && op2 == -1) return 0;
+  else return op1 % op2;
+}
+
+inline jint BytecodeInterpreter::VMintShl(jint op1, jint op2) {
+  return op1 <<  (op2 & 0x1f);
+}
+
+inline jint BytecodeInterpreter::VMintShr(jint op1, jint op2) {
+  return op1 >>  (op2 & 0x1f);
+}
+
+inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
+  return op1 - op2;
+}
+
+inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
+  return ((juint) op1) >> (op2 & 0x1f);
+}
+
+inline jint BytecodeInterpreter::VMintXor(jint op1, jint op2) {
+  return op1 ^ op2;
+}
+
+inline jdouble BytecodeInterpreter::VMint2Double(jint val) {
+  return (jdouble) val;
+}
+
+inline jfloat BytecodeInterpreter::VMint2Float(jint val) {
+  return (jfloat) val;
+}
+
+inline jlong BytecodeInterpreter::VMint2Long(jint val) {
+  return (jlong) val;
+}
+
+inline jchar BytecodeInterpreter::VMint2Char(jint val) {
+  return (jchar) val;
+}
+
+inline jshort BytecodeInterpreter::VMint2Short(jint val) {
+  return (jshort) val;
+}
+
+inline jbyte BytecodeInterpreter::VMint2Byte(jint val) {
+  return (jbyte) val;
+}
+
+#endif // CC_INTERP
+
+#endif // CPU_PPC_VM_BYTECODEINTERPRETER_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/bytecodes_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/bytecodes.hpp"
+
+void Bytecodes::pd_initialize() {
+  // No ppc specific initialization.
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/bytecodes_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_BYTECODES_PPC_HPP
+#define CPU_PPC_VM_BYTECODES_PPC_HPP
+
+// No ppc64 specific bytecodes
+
+#endif // CPU_PPC_VM_BYTECODES_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/bytes_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_BYTES_PPC_HPP
+#define CPU_PPC_VM_BYTES_PPC_HPP
+
+#include "memory/allocation.hpp"
+
+class Bytes: AllStatic {
+ public:
+  // Efficient reading and writing of unaligned unsigned data in platform-specific byte ordering
+  // PowerPC needs to check for alignment.
+
+  // Can I count on address always being a pointer to an unsigned char? Yes.
+
+  // Returns true, if the byte ordering used by Java is different from the nativ byte ordering
+  // of the underlying machine. For example, true for Intel x86, False, for Solaris on Sparc.
+  static inline bool is_Java_byte_ordering_different() { return false; }
+
+  // Thus, a swap between native and Java ordering is always a no-op:
+  static inline u2   swap_u2(u2 x)  { return x; }
+  static inline u4   swap_u4(u4 x)  { return x; }
+  static inline u8   swap_u8(u8 x)  { return x; }
+
+  static inline u2   get_native_u2(address p) {
+    return (intptr_t(p) & 1) == 0
+             ?   *(u2*)p
+             :   ( u2(p[0]) << 8 )
+               | ( u2(p[1])      );
+  }
+
+  static inline u4   get_native_u4(address p) {
+    switch (intptr_t(p) & 3) {
+     case 0:  return *(u4*)p;
+
+     case 2:  return (  u4( ((u2*)p)[0] ) << 16  )
+                   | (  u4( ((u2*)p)[1] )        );
+
+    default:  return ( u4(p[0]) << 24 )
+                   | ( u4(p[1]) << 16 )
+                   | ( u4(p[2]) <<  8 )
+                   |   u4(p[3]);
+    }
+  }
+
+  static inline u8   get_native_u8(address p) {
+    switch (intptr_t(p) & 7) {
+      case 0:  return *(u8*)p;
+
+      case 4:  return (  u8( ((u4*)p)[0] ) << 32  )
+                    | (  u8( ((u4*)p)[1] )        );
+
+      case 2:  return (  u8( ((u2*)p)[0] ) << 48  )
+                    | (  u8( ((u2*)p)[1] ) << 32  )
+                    | (  u8( ((u2*)p)[2] ) << 16  )
+                    | (  u8( ((u2*)p)[3] )        );
+
+     default:  return ( u8(p[0]) << 56 )
+                    | ( u8(p[1]) << 48 )
+                    | ( u8(p[2]) << 40 )
+                    | ( u8(p[3]) << 32 )
+                    | ( u8(p[4]) << 24 )
+                    | ( u8(p[5]) << 16 )
+                    | ( u8(p[6]) <<  8 )
+                    |   u8(p[7]);
+    }
+  }
+
+
+
+  static inline void put_native_u2(address p, u2 x) {
+    if ( (intptr_t(p) & 1) == 0 ) { *(u2*)p = x; }
+    else {
+      p[0] = x >> 8;
+      p[1] = x;
+    }
+  }
+
+  static inline void put_native_u4(address p, u4 x) {
+    switch ( intptr_t(p) & 3 ) {
+    case 0:  *(u4*)p = x;
+              break;
+
+    case 2:  ((u2*)p)[0] = x >> 16;
+             ((u2*)p)[1] = x;
+             break;
+
+    default: ((u1*)p)[0] = x >> 24;
+             ((u1*)p)[1] = x >> 16;
+             ((u1*)p)[2] = x >>  8;
+             ((u1*)p)[3] = x;
+             break;
+    }
+  }
+
+  static inline void put_native_u8(address p, u8 x) {
+    switch ( intptr_t(p) & 7 ) {
+    case 0:  *(u8*)p = x;
+             break;
+
+    case 4:  ((u4*)p)[0] = x >> 32;
+             ((u4*)p)[1] = x;
+             break;
+
+    case 2:  ((u2*)p)[0] = x >> 48;
+             ((u2*)p)[1] = x >> 32;
+             ((u2*)p)[2] = x >> 16;
+             ((u2*)p)[3] = x;
+             break;
+
+    default: ((u1*)p)[0] = x >> 56;
+             ((u1*)p)[1] = x >> 48;
+             ((u1*)p)[2] = x >> 40;
+             ((u1*)p)[3] = x >> 32;
+             ((u1*)p)[4] = x >> 24;
+             ((u1*)p)[5] = x >> 16;
+             ((u1*)p)[6] = x >>  8;
+             ((u1*)p)[7] = x;
+    }
+  }
+
+  // Efficient reading and writing of unaligned unsigned data in Java byte ordering (i.e. big-endian ordering)
+  // (no byte-order reversal is needed since Power CPUs are big-endian oriented).
+  static inline u2   get_Java_u2(address p) { return get_native_u2(p); }
+  static inline u4   get_Java_u4(address p) { return get_native_u4(p); }
+  static inline u8   get_Java_u8(address p) { return get_native_u8(p); }
+
+  static inline void put_Java_u2(address p, u2 x)     { put_native_u2(p, x); }
+  static inline void put_Java_u4(address p, u4 x)     { put_native_u4(p, x); }
+  static inline void put_Java_u8(address p, u8 x)     { put_native_u8(p, x); }
+};
+
+#endif // CPU_PPC_VM_BYTES_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/c2_globals_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_C2_GLOBALS_PPC_HPP
+#define CPU_PPC_VM_C2_GLOBALS_PPC_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+// Sets the default values for platform dependent flags used by the server compiler.
+// (see c2_globals.hpp).
+
+define_pd_global(bool, BackgroundCompilation,        true);
+define_pd_global(bool, CICompileOSR,                 true);
+define_pd_global(bool, InlineIntrinsics,             true);
+define_pd_global(bool, PreferInterpreterNativeStubs, false);
+define_pd_global(bool, ProfileTraps,                 true);
+define_pd_global(bool, UseOnStackReplacement,        true);
+define_pd_global(bool, ProfileInterpreter,           true);
+define_pd_global(bool, TieredCompilation,            false);
+define_pd_global(intx, CompileThreshold,             10000);
+define_pd_global(intx, BackEdgeThreshold,            140000);
+
+define_pd_global(intx, OnStackReplacePercentage,     140);
+define_pd_global(intx, ConditionalMoveLimit,         3);
+define_pd_global(intx, FLOATPRESSURE,                28);
+define_pd_global(intx, FreqInlineSize,               175);
+define_pd_global(intx, MinJumpTableSize,             10);
+define_pd_global(intx, INTPRESSURE,                  25);
+define_pd_global(intx, InteriorEntryAlignment,       16);
+define_pd_global(intx, NewSizeThreadIncrease,        ScaleForWordSize(4*K));
+define_pd_global(intx, RegisterCostAreaRatio,        16000);
+define_pd_global(bool, UseTLAB,                      true);
+define_pd_global(bool, ResizeTLAB,                   true);
+define_pd_global(intx, LoopUnrollLimit,              60);
+
+// Peephole and CISC spilling both break the graph, and so make the
+// scheduler sick.
+define_pd_global(bool, OptoPeephole,                 false);
+define_pd_global(bool, UseCISCSpill,                 false);
+define_pd_global(bool, OptoBundling,                 false);
+// GL:
+// Detected a problem with unscaled compressed oops and
+// narrow_oop_use_complex_address() == false.
+// -Djava.io.tmpdir=./tmp -jar SPECjvm2008.jar -ikv -wt 3 -it 3
+//   -bt 1 --base compiler.sunflow
+// fails in Lower.visitIf->translate->tranlate->translate and
+// throws an unexpected NPE. A load and a store seem to be
+// reordered.  Java reads about:
+//   loc = x.f
+//   x.f = 0
+//   NullCheck loc
+// While assembler reads:
+//   x.f = 0
+//   loc = x.f
+//   NullCheck loc
+define_pd_global(bool, OptoScheduling,               false);
+
+define_pd_global(intx, InitialCodeCacheSize,         2048*K); // Integral multiple of CodeCacheExpansionSize
+define_pd_global(intx, ReservedCodeCacheSize,        256*M);
+define_pd_global(intx, CodeCacheExpansionSize,       64*K);
+
+// Ergonomics related flags
+define_pd_global(uint64_t,MaxRAM,                    4ULL*G);
+define_pd_global(uintx, CodeCacheMinBlockLength,     4);
+define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
+
+define_pd_global(bool,  TrapBasedRangeChecks,        false);
+
+// Heap related flags
+define_pd_global(uintx,MetaspaceSize,                ScaleForWordSize(16*M));
+
+// Ergonomics related flags
+define_pd_global(bool, NeverActAsServerClassMachine, false);
+
+#endif // CPU_PPC_VM_C2_GLOBALS_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/c2_init_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "opto/compile.hpp"
+#include "opto/node.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/debug.hpp"
+
+// processor dependent initialization for ppc
+
+void Compile::pd_compiler2_init() {
+
+  // Power7 and later
+  if (PowerArchitecturePPC64 > 6) {
+    if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
+      FLAG_SET_ERGO(bool, UsePopCountInstruction, true);
+    }
+  }
+
+  if (PowerArchitecturePPC64 == 6) {
+    if (FLAG_IS_DEFAULT(InsertEndGroupPPC64)) {
+      FLAG_SET_ERGO(bool, InsertEndGroupPPC64, true);
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/codeBuffer_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_CODEBUFFER_PPC_HPP
+#define CPU_PPC_VM_CODEBUFFER_PPC_HPP
+
+private:
+  void pd_initialize() {}
+
+public:
+  void flush_bundle(bool start_new_bundle) {}
+
+#endif // CPU_PPC_VM_CODEBUFFER_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/compiledIC_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
+#include "code/icBuffer.hpp"
+#include "code/nmethod.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/safepoint.hpp"
+#ifdef COMPILER2
+#include "opto/matcher.hpp"
+#endif
+
+// Release the CompiledICHolder* associated with this call site is there is one.
+void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
+  // This call site might have become stale so inspect it carefully.
+  NativeCall* call = nativeCall_at(call_site->addr());
+  if (is_icholder_entry(call->destination())) {
+    NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
+    InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
+  }
+}
+
+bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
+  // This call site might have become stale so inspect it carefully.
+  NativeCall* call = nativeCall_at(call_site->addr());
+  return is_icholder_entry(call->destination());
+}
+
+//-----------------------------------------------------------------------------
+// High-level access to an inline cache. Guaranteed to be MT-safe.
+
+CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
+  : _ic_call(call)
+{
+  address ic_call = call->instruction_address();
+
+  assert(ic_call != NULL, "ic_call address must be set");
+  assert(nm != NULL, "must pass nmethod");
+  assert(nm->contains(ic_call), "must be in nmethod");
+
+  // Search for the ic_call at the given address.
+  RelocIterator iter(nm, ic_call, ic_call+1);
+  bool ret = iter.next();
+  assert(ret == true, "relocInfo must exist at this address");
+  assert(iter.addr() == ic_call, "must find ic_call");
+  if (iter.type() == relocInfo::virtual_call_type) {
+    virtual_call_Relocation* r = iter.virtual_call_reloc();
+    _is_optimized = false;
+    _value = nativeMovConstReg_at(r->cached_value());
+  } else {
+    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
+    _is_optimized = true;
+    _value = NULL;
+  }
+}
+
+// ----------------------------------------------------------------------------
+
+// A PPC CompiledStaticCall looks like this:
+//
+// >>>> consts
+//
+// [call target1]
+// [IC cache]
+// [call target2]
+//
+// <<<< consts
+// >>>> insts
+//
+// bl offset16               -+  -+             ??? // How many bits available?
+//                            |   |
+// <<<< insts                 |   |
+// >>>> stubs                 |   |
+//                            |   |- trampoline_stub_Reloc
+// trampoline stub:           | <-+
+//   r2 = toc                 |
+//   r2 = [r2 + offset]       |       // Load call target1 from const section
+//   mtctr r2                 |
+//   bctr                     |- static_stub_Reloc
+// comp_to_interp_stub:   <---+
+//   r1 = toc
+//   ICreg = [r1 + IC_offset]         // Load IC from const section
+//   r1    = [r1 + offset]            // Load call target2 from const section
+//   mtctr r1
+//   bctr
+//
+// <<<< stubs
+//
+// The call instruction in the code either
+// - branches directly to a compiled method if offset encodable in instruction
+// - branches to the trampoline stub if offset to compiled method not encodable
+// - branches to the compiled_to_interp stub if target interpreted
+//
+// Further there are three relocations from the loads to the constants in
+// the constant section.
+//
+// Usage of r1 and r2 in the stubs allows to distinguish them.
+
+const int IC_pos_in_java_to_interp_stub = 8;
+#define __ _masm.
+void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
+#ifdef COMPILER2
+  // Get the mark within main instrs section which is set to the address of the call.
+  address call_addr = cbuf.insts_mark();
+
+  // Note that the code buffer's insts_mark is always relative to insts.
+  // That's why we must use the macroassembler to generate a stub.
+  MacroAssembler _masm(&cbuf);
+
+  // Start the stub.
+  address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
+  if (stub == NULL) {
+    Compile::current()->env()->record_out_of_memory_failure();
+    return;
+  }
+
+  // For java_to_interp stubs we use R11_scratch1 as scratch register
+  // and in call trampoline stubs we use R12_scratch2. This way we
+  // can distinguish them (see is_NativeCallTrampolineStub_at()).
+  Register reg_scratch = R11_scratch1;
+
+  // Create a static stub relocation which relates this stub
+  // with the call instruction at insts_call_instruction_offset in the
+  // instructions code-section.
+  __ relocate(static_stub_Relocation::spec(call_addr));
+  const int stub_start_offset = __ offset();
+
+  // Now, create the stub's code:
+  // - load the TOC
+  // - load the inline cache oop from the constant pool
+  // - load the call target from the constant pool
+  // - call
+  __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
+  AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
+  __ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()), ic, reg_scratch);
+
+  if (ReoptimizeCallSequences) {
+    __ b64_patchable((address)-1, relocInfo::none);
+  } else {
+    AddressLiteral a((address)-1);
+    __ load_const_from_method_toc(reg_scratch, a, reg_scratch);
+    __ mtctr(reg_scratch);
+    __ bctr();
+  }
+
+  // FIXME: Assert that the stub can be identified and patched.
+
+  // Java_to_interp_stub_size should be good.
+  assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(),
+         "should be good size");
+  assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)),
+         "must not confuse java_to_interp with trampoline stubs");
+
+ // End the stub.
+  __ end_a_stub();
+#else
+  ShouldNotReachHere();
+#endif
+}
+#undef __
+
+// Size of java_to_interp stub, this doesn't need to be accurate but it must
+// be larger or equal to the real size of the stub.
+// Used for optimization in Compile::Shorten_branches.
+int CompiledStaticCall::to_interp_stub_size() {
+  return 12 * BytesPerInstWord;
+}
+
+// Relocation entries for call stub, compiled java to interpreter.
+// Used for optimization in Compile::Shorten_branches.
+int CompiledStaticCall::reloc_to_interp_stub() {
+  return 5;
+}
+
+void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
+  address stub = find_stub();
+  guarantee(stub != NULL, "stub not found");
+
+  if (TraceICs) {
+    ResourceMark rm;
+    tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
+                  instruction_address(),
+                  callee->name_and_sig_as_C_string());
+  }
+
+  // Creation also verifies the object.
+  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
+  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+
+  assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
+         "a) MT-unsafe modification of inline cache");
+  assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
+         "b) MT-unsafe modification of inline cache");
+
+  // Update stub.
+  method_holder->set_data((intptr_t)callee());
+  jump->set_jump_destination(entry);
+
+  // Update jump to call.
+  set_destination_mt_safe(stub);
+}
+
+void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
+  // Reset stub.
+  address stub = static_stub->addr();
+  assert(stub != NULL, "stub not found");
+  // Creation also verifies the object.
+  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
+  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+  method_holder->set_data(0);
+  jump->set_jump_destination((address)-1);
+}
+
+//-----------------------------------------------------------------------------
+// Non-product mode code
+#ifndef PRODUCT
+
+void CompiledStaticCall::verify() {
+  // Verify call.
+  NativeCall::verify();
+  if (os::is_MP()) {
+    verify_alignment();
+  }
+
+  // Verify stub.
+  address stub = find_stub();
+  assert(stub != NULL, "no stub found for static call");
+  // Creation also verifies the object.
+  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
+  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
+
+  // Verify state.
+  assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
+}
+
+#endif // !PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/copy_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_COPY_PPC_HPP
+#define CPU_PPC_VM_COPY_PPC_HPP
+
+#ifndef PPC64
+#error "copy currently only implemented for PPC64"
+#endif
+
+// Inline functions for memory copy and fill.
+
+static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+  (void)memmove(to, from, count * HeapWordSize);
+}
+
+static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+  switch (count) {
+  case 8:  to[7] = from[7];
+  case 7:  to[6] = from[6];
+  case 6:  to[5] = from[5];
+  case 5:  to[4] = from[4];
+  case 4:  to[3] = from[3];
+  case 3:  to[2] = from[2];
+  case 2:  to[1] = from[1];
+  case 1:  to[0] = from[0];
+  case 0:  break;
+  default: (void)memcpy(to, from, count * HeapWordSize);
+           break;
+  }
+}
+
+static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) {
+  switch (count) {
+  case 8:  to[7] = from[7];
+  case 7:  to[6] = from[6];
+  case 6:  to[5] = from[5];
+  case 5:  to[4] = from[4];
+  case 4:  to[3] = from[3];
+  case 3:  to[2] = from[2];
+  case 2:  to[1] = from[1];
+  case 1:  to[0] = from[0];
+  case 0:  break;
+  default: while (count-- > 0) {
+             *to++ = *from++;
+           }
+           break;
+  }
+}
+
+static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+  (void)memmove(to, from, count * HeapWordSize);
+}
+
+static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) {
+  pd_disjoint_words(from, to, count);
+}
+
+static void pd_conjoint_bytes(void* from, void* to, size_t count) {
+  (void)memmove(to, from, count);
+}
+
+static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) {
+  (void)memmove(to, from, count);
+}
+
+// Template for atomic, element-wise copy.
+template <class T>
+static void copy_conjoint_atomic(T* from, T* to, size_t count) {
+  if (from > to) {
+    while (count-- > 0) {
+      // Copy forwards
+      *to++ = *from++;
+    }
+  } else {
+    from += count - 1;
+    to   += count - 1;
+    while (count-- > 0) {
+      // Copy backwards
+      *to-- = *from--;
+    }
+  }
+}
+
+static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
+  // TODO: contribute optimized version.
+  copy_conjoint_atomic<jshort>(from, to, count);
+}
+
+static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
+  // TODO: contribute optimized version.
+  copy_conjoint_atomic<jint>(from, to, count);
+}
+
+static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
+  copy_conjoint_atomic<jlong>(from, to, count);
+}
+
+static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) {
+  copy_conjoint_atomic<oop>(from, to, count);
+}
+
+static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) {
+  pd_conjoint_bytes_atomic(from, to, count);
+}
+
+static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) {
+  // TODO: contribute optimized version.
+  pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count);
+}
+
+static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) {
+  // TODO: contribute optimized version.
+  pd_conjoint_jints_atomic((jint*)from, (jint*)to, count);
+}
+
+static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) {
+  pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count);
+}
+
+static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) {
+  pd_conjoint_oops_atomic((oop*)from, (oop*)to, count);
+}
+
+static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) {
+  julong* to = (julong*)tohw;
+  julong  v  = ((julong)value << 32) | value;
+  while (count-- > 0) {
+    *to++ = v;
+  }
+}
+
+static void pd_fill_to_aligned_words(HeapWord* tohw, size_t count, juint value) {
+  pd_fill_to_words(tohw, count, value);
+}
+
+static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
+  (void)memset(to, value, count);
+}
+
+static void pd_zero_to_words(HeapWord* tohw, size_t count) {
+  pd_fill_to_words(tohw, count, 0);
+}
+
+static void pd_zero_to_bytes(void* to, size_t count) {
+  (void)memset(to, 0, count);
+}
+
+#endif // CPU_PPC_VM_COPY_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/cppInterpreterGenerator_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
+#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
+
+  address generate_normal_entry(void);
+  address generate_native_entry(void);
+
+  void lock_method(void);
+  void unlock_method(void);
+
+  void generate_counter_incr(Label& overflow);
+  void generate_counter_overflow(Label& do_continue);
+
+  void generate_more_monitors();
+  void generate_deopt_handling(Register result_index);
+
+  void generate_compute_interpreter_state(Label& exception_return);
+
+#endif // CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/cppInterpreter_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,3045 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/cppInterpreter.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#ifdef SHARK
+#include "shark/shark_globals.hpp"
+#endif
+
+#ifdef CC_INTERP
+
+#define __ _masm->
+
+// Contains is used for identifying interpreter frames during a stack-walk.
+// A frame with a PC in InterpretMethod must be identified as a normal C frame.
+bool CppInterpreter::contains(address pc) {
+  return _code->contains(pc);
+}
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) // nothing
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+static address interpreter_frame_manager        = NULL;
+static address frame_manager_specialized_return = NULL;
+static address native_entry                     = NULL;
+
+static address interpreter_return_address       = NULL;
+
+static address unctrap_frame_manager_entry      = NULL;
+
+static address deopt_frame_manager_return_atos  = NULL;
+static address deopt_frame_manager_return_btos  = NULL;
+static address deopt_frame_manager_return_itos  = NULL;
+static address deopt_frame_manager_return_ltos  = NULL;
+static address deopt_frame_manager_return_ftos  = NULL;
+static address deopt_frame_manager_return_dtos  = NULL;
+static address deopt_frame_manager_return_vtos  = NULL;
+
+// A result handler converts/unboxes a native call result into
+// a java interpreter/compiler result. The current frame is an
+// interpreter frame.
+address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
+  return AbstractInterpreterGenerator::generate_result_handler_for(type);
+}
+
+// tosca based result to c++ interpreter stack based result.
+address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType type) {
+  //
+  // A result is in the native abi result register from a native
+  // method call. We need to return this result to the interpreter by
+  // pushing the result on the interpreter's stack.
+  //
+  // Registers alive:
+  //   R3_ARG1(R3_RET)/F1_ARG1(F1_RET) - result to move
+  //   R4_ARG2                         - address of tos
+  //   LR
+  //
+  // Registers updated:
+  //   R3_RET(R3_ARG1)   - address of new tos (== R17_tos for T_VOID)
+  //
+
+  int number_of_used_slots = 1;
+
+  const Register tos = R4_ARG2;
+  Label done;
+  Label is_false;
+
+  address entry = __ pc();
+
+  switch (type) {
+  case T_BOOLEAN:
+    __ cmpwi(CCR0, R3_RET, 0);
+    __ beq(CCR0, is_false);
+    __ li(R3_RET, 1);
+    __ stw(R3_RET, 0, tos);
+    __ b(done);
+    __ bind(is_false);
+    __ li(R3_RET, 0);
+    __ stw(R3_RET, 0, tos);
+    break;
+  case T_BYTE:
+  case T_CHAR:
+  case T_SHORT:
+  case T_INT:
+    __ stw(R3_RET, 0, tos);
+    break;
+  case T_LONG:
+    number_of_used_slots = 2;
+    // mark unused slot for debugging
+    // long goes to topmost slot
+    __ std(R3_RET, -BytesPerWord, tos);
+    __ li(R3_RET, 0);
+    __ std(R3_RET, 0, tos);
+    break;
+  case T_OBJECT:
+    __ verify_oop(R3_RET);
+    __ std(R3_RET, 0, tos);
+    break;
+  case T_FLOAT:
+    __ stfs(F1_RET, 0, tos);
+    break;
+  case T_DOUBLE:
+    number_of_used_slots = 2;
+    // mark unused slot for debugging
+    __ li(R3_RET, 0);
+    __ std(R3_RET, 0, tos);
+    // double goes to topmost slot
+    __ stfd(F1_RET, -BytesPerWord, tos);
+    break;
+  case T_VOID:
+    number_of_used_slots = 0;
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+
+  __ BIND(done);
+
+  // new expression stack top
+  __ addi(R3_RET, tos, -BytesPerWord * number_of_used_slots);
+
+  __ blr();
+
+  return entry;
+}
+
+address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType type) {
+  //
+  // Copy the result from the callee's stack to the caller's stack,
+  // caller and callee both being interpreted.
+  //
+  // Registers alive
+  //   R3_ARG1        - address of callee's tos + BytesPerWord
+  //   R4_ARG2        - address of caller's tos [i.e. free location]
+  //   LR
+  //
+  //   stack grows upwards, memory grows downwards.
+  //
+  //   [      free         ]  <-- callee's tos
+  //   [  optional result  ]  <-- R3_ARG1
+  //   [  optional dummy   ]
+  //          ...
+  //   [      free         ]  <-- caller's tos, R4_ARG2
+  //          ...
+  // Registers updated
+  //   R3_RET(R3_ARG1) - address of caller's new tos
+  //
+  //   stack grows upwards, memory grows downwards.
+  //
+  //   [      free         ]  <-- current tos, R3_RET
+  //   [  optional result  ]
+  //   [  optional dummy   ]
+  //          ...
+  //
+
+  const Register from = R3_ARG1;
+  const Register ret  = R3_ARG1;
+  const Register tos  = R4_ARG2;
+  const Register tmp1 = R21_tmp1;
+  const Register tmp2 = R22_tmp2;
+
+  address entry = __ pc();
+
+  switch (type) {
+  case T_BOOLEAN:
+  case T_BYTE:
+  case T_CHAR:
+  case T_SHORT:
+  case T_INT:
+  case T_FLOAT:
+    __ lwz(tmp1, 0, from);
+    __ stw(tmp1, 0, tos);
+    // New expression stack top.
+    __ addi(ret, tos, - BytesPerWord);
+    break;
+  case T_LONG:
+  case T_DOUBLE:
+    // Move both entries for debug purposes even though only one is live.
+    __ ld(tmp1, BytesPerWord, from);
+    __ ld(tmp2, 0, from);
+    __ std(tmp1, 0, tos);
+    __ std(tmp2, -BytesPerWord, tos);
+    // New expression stack top.
+    __ addi(ret, tos, - 2 * BytesPerWord); // two slots
+    break;
+  case T_OBJECT:
+    __ ld(tmp1, 0, from);
+    __ verify_oop(tmp1);
+    __ std(tmp1, 0, tos);
+    // New expression stack top.
+    __ addi(ret, tos, - BytesPerWord);
+    break;
+  case T_VOID:
+    // New expression stack top.
+    __ mr(ret, tos);
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+
+  __ blr();
+
+  return entry;
+}
+
+address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicType type) {
+  //
+  // Load a result from the callee's stack into the caller's expecting
+  // return register, callee being interpreted, caller being call stub
+  // or jit code.
+  //
+  // Registers alive
+  //   R3_ARG1   - callee expression tos + BytesPerWord
+  //   LR
+  //
+  //   stack grows upwards, memory grows downwards.
+  //
+  //   [      free         ]  <-- callee's tos
+  //   [  optional result  ]  <-- R3_ARG1
+  //   [  optional dummy   ]
+  //          ...
+  //
+  // Registers updated
+  //   R3_RET(R3_ARG1)/F1_RET - result
+  //
+
+  const Register from = R3_ARG1;
+  const Register ret = R3_ARG1;
+  const FloatRegister fret = F1_ARG1;
+
+  address entry = __ pc();
+
+  // Implemented uniformly for both kinds of endianness. The interpreter
+  // implements boolean, byte, char, and short as jint (4 bytes).
+  switch (type) {
+  case T_BOOLEAN:
+  case T_CHAR:
+    // zero extension
+    __ lwz(ret, 0, from);
+    break;
+  case T_BYTE:
+  case T_SHORT:
+  case T_INT:
+    // sign extension
+    __ lwa(ret, 0, from);
+    break;
+  case T_LONG:
+    __ ld(ret, 0, from);
+    break;
+  case T_OBJECT:
+    __ ld(ret, 0, from);
+    __ verify_oop(ret);
+    break;
+  case T_FLOAT:
+    __ lfs(fret, 0, from);
+    break;
+  case T_DOUBLE:
+    __ lfd(fret, 0, from);
+    break;
+  case T_VOID:
+    break;
+  default:
+    ShouldNotReachHere();
+  }
+
+  __ blr();
+
+  return entry;
+}
+
+address CppInterpreter::return_entry(TosState state, int length, Bytecodes::Code code) {
+  assert(interpreter_return_address != NULL, "Not initialized");
+  return interpreter_return_address;
+}
+
+address CppInterpreter::deopt_entry(TosState state, int length) {
+  address ret = NULL;
+  if (length != 0) {
+    switch (state) {
+      case atos: ret = deopt_frame_manager_return_atos; break;
+      case btos: ret = deopt_frame_manager_return_itos; break;
+      case ctos:
+      case stos:
+      case itos: ret = deopt_frame_manager_return_itos; break;
+      case ltos: ret = deopt_frame_manager_return_ltos; break;
+      case ftos: ret = deopt_frame_manager_return_ftos; break;
+      case dtos: ret = deopt_frame_manager_return_dtos; break;
+      case vtos: ret = deopt_frame_manager_return_vtos; break;
+      default: ShouldNotReachHere();
+    }
+  } else {
+    ret = unctrap_frame_manager_entry;  // re-execute the bytecode (e.g. uncommon trap, popframe)
+  }
+  assert(ret != NULL, "Not initialized");
+  return ret;
+}
+
+//
+// Helpers for commoning out cases in the various type of method entries.
+//
+
+//
+// Registers alive
+//   R16_thread      - JavaThread*
+//   R1_SP           - old stack pointer
+//   R19_method      - callee's Method
+//   R17_tos         - address of caller's tos (prepushed)
+//   R15_prev_state  - address of caller's BytecodeInterpreter or 0
+//   return_pc in R21_tmp15 (only when called within generate_native_entry)
+//
+// Registers updated
+//   R14_state       - address of callee's interpreter state
+//   R1_SP           - new stack pointer
+//   CCR4_is_synced  - current method is synchronized
+//
+void CppInterpreterGenerator::generate_compute_interpreter_state(Label& stack_overflow_return) {
+  //
+  // Stack layout at this point:
+  //
+  //   F1      [TOP_IJAVA_FRAME_ABI]              <-- R1_SP
+  //           alignment (optional)
+  //           [F1's outgoing Java arguments]     <-- R17_tos
+  //           ...
+  //   F2      [PARENT_IJAVA_FRAME_ABI]
+  //            ...
+
+  //=============================================================================
+  // Allocate space for locals other than the parameters, the
+  // interpreter state, monitors, and the expression stack.
+
+  const Register local_count        = R21_tmp1;
+  const Register parameter_count    = R22_tmp2;
+  const Register max_stack          = R23_tmp3;
+  // Must not be overwritten within this method!
+  // const Register return_pc         = R29_tmp9;
+
+  const ConditionRegister is_synced = CCR4_is_synced;
+  const ConditionRegister is_native = CCR6;
+  const ConditionRegister is_static = CCR7;
+
+  assert(is_synced != is_native, "condition code registers must be distinct");
+  assert(is_synced != is_static, "condition code registers must be distinct");
+  assert(is_native != is_static, "condition code registers must be distinct");
+
+  {
+
+  // Local registers
+  const Register top_frame_size     = R24_tmp4;
+  const Register access_flags       = R25_tmp5;
+  const Register state_offset       = R26_tmp6;
+  Register mem_stack_limit          = R27_tmp7;
+  const Register page_size          = R28_tmp8;
+
+  BLOCK_COMMENT("compute_interpreter_state {");
+
+  // access_flags = method->access_flags();
+  // TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
+  __ lwa(access_flags, method_(access_flags));
+
+  // parameter_count = method->constMethod->size_of_parameters();
+  // TODO: PPC port: assert(2 == ConstMethod::sz_size_of_parameters(), "unexpected field size");
+  __ ld(max_stack, in_bytes(Method::const_offset()), R19_method);   // Max_stack holds constMethod for a while.
+  __ lhz(parameter_count, in_bytes(ConstMethod::size_of_parameters_offset()), max_stack);
+
+  // local_count = method->constMethod()->max_locals();
+  // TODO: PPC port: assert(2 == ConstMethod::sz_max_locals(), "unexpected field size");
+  __ lhz(local_count, in_bytes(ConstMethod::size_of_locals_offset()), max_stack);
+
+  // max_stack = method->constMethod()->max_stack();
+  // TODO: PPC port: assert(2 == ConstMethod::sz_max_stack(), "unexpected field size");
+  __ lhz(max_stack, in_bytes(ConstMethod::max_stack_offset()), max_stack);
+
+  if (EnableInvokeDynamic) {
+    // Take into account 'extra_stack_entries' needed by method handles (see method.hpp).
+    __ addi(max_stack, max_stack, Method::extra_stack_entries());
+  }
+
+  // mem_stack_limit = thread->stack_limit();
+  __ ld(mem_stack_limit, thread_(stack_overflow_limit));
+
+  // Point locals at the first argument. Method's locals are the
+  // parameters on top of caller's expression stack.
+
+  // tos points past last Java argument
+  __ sldi(R18_locals, parameter_count, Interpreter::logStackElementSize);
+  __ add(R18_locals, R17_tos, R18_locals);
+
+  // R18_locals - i*BytesPerWord points to i-th Java local (i starts at 0)
+
+  // Set is_native, is_synced, is_static - will be used later.
+  __ testbitdi(is_native, R0, access_flags, JVM_ACC_NATIVE_BIT);
+  __ testbitdi(is_synced, R0, access_flags, JVM_ACC_SYNCHRONIZED_BIT);
+  assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
+  __ testbitdi(is_static, R0, access_flags, JVM_ACC_STATIC_BIT);
+
+  // PARENT_IJAVA_FRAME_ABI
+  //
+  // frame_size =
+  //   round_to((local_count - parameter_count)*BytesPerWord +
+  //              2*BytesPerWord +
+  //              alignment +
+  //              frame::interpreter_frame_cinterpreterstate_size_in_bytes()
+  //              sizeof(PARENT_IJAVA_FRAME_ABI)
+  //              method->is_synchronized() ? sizeof(BasicObjectLock) : 0 +
+  //              max_stack*BytesPerWord,
+  //            16)
+  //
+  // Note that this calculation is exactly mirrored by
+  // AbstractInterpreter::layout_activation_impl() [ and
+  // AbstractInterpreter::size_activation() ]. Which is used by
+  // deoptimization so that it can allocate the proper sized
+  // frame. This only happens for interpreted frames so the extra
+  // notes below about max_stack below are not important. The other
+  // thing to note is that for interpreter frames other than the
+  // current activation the size of the stack is the size of the live
+  // portion of the stack at the particular bcp and NOT the maximum
+  // stack that the method might use.
+  //
+  // If we're calling a native method, we replace max_stack (which is
+  // zero) with space for the worst-case signature handler varargs
+  // vector, which is:
+  //
+  //   max_stack = max(Argument::n_register_parameters, parameter_count+2);
+  //
+  // We add two slots to the parameter_count, one for the jni
+  // environment and one for a possible native mirror.  We allocate
+  // space for at least the number of ABI registers, even though
+  // InterpreterRuntime::slow_signature_handler won't write more than
+  // parameter_count+2 words when it creates the varargs vector at the
+  // top of the stack.  The generated slow signature handler will just
+  // load trash into registers beyond the necessary number.  We're
+  // still going to cut the stack back by the ABI register parameter
+  // count so as to get SP+16 pointing at the ABI outgoing parameter
+  // area, so we need to allocate at least that much even though we're
+  // going to throw it away.
+  //
+
+  // Adjust max_stack for native methods:
+  Label skip_native_calculate_max_stack;
+  __ bfalse(is_native, skip_native_calculate_max_stack);
+  // if (is_native) {
+  //  max_stack = max(Argument::n_register_parameters, parameter_count+2);
+  __ addi(max_stack, parameter_count, 2*Interpreter::stackElementWords);
+  __ cmpwi(CCR0, max_stack, Argument::n_register_parameters);
+  __ bge(CCR0, skip_native_calculate_max_stack);
+  __ li(max_stack,  Argument::n_register_parameters);
+  // }
+  __ bind(skip_native_calculate_max_stack);
+  // max_stack is now in bytes
+  __ slwi(max_stack, max_stack, Interpreter::logStackElementSize);
+
+  // Calculate number of non-parameter locals (in slots):
+  Label not_java;
+  __ btrue(is_native, not_java);
+  // if (!is_native) {
+  //   local_count = non-parameter local count
+  __ sub(local_count, local_count, parameter_count);
+  // } else {
+  //   // nothing to do: method->max_locals() == 0 for native methods
+  // }
+  __ bind(not_java);
+
+
+  // Calculate top_frame_size and parent_frame_resize.
+  {
+  const Register parent_frame_resize = R12_scratch2;
+
+  BLOCK_COMMENT("Compute top_frame_size.");
+  // top_frame_size = TOP_IJAVA_FRAME_ABI
+  //                  + size of interpreter state
+  __ li(top_frame_size, frame::top_ijava_frame_abi_size
+                        + frame::interpreter_frame_cinterpreterstate_size_in_bytes());
+  //                  + max_stack
+  __ add(top_frame_size, top_frame_size, max_stack);
+  //                  + stack slots for a BasicObjectLock for synchronized methods
+  {
+    Label not_synced;
+    __ bfalse(is_synced, not_synced);
+    __ addi(top_frame_size, top_frame_size, frame::interpreter_frame_monitor_size_in_bytes());
+    __ bind(not_synced);
+  }
+  // align
+  __ round_to(top_frame_size, frame::alignment_in_bytes);
+
+
+  BLOCK_COMMENT("Compute parent_frame_resize.");
+  // parent_frame_resize = R1_SP - R17_tos
+  __ sub(parent_frame_resize, R1_SP, R17_tos);
+  //__ li(parent_frame_resize, 0);
+  //                       + PARENT_IJAVA_FRAME_ABI
+  //                       + extra two slots for the no-parameter/no-locals
+  //                         method result
+  __ addi(parent_frame_resize, parent_frame_resize,
+                                      frame::parent_ijava_frame_abi_size
+                                    + 2*Interpreter::stackElementSize);
+  //                       + (locals_count - params_count)
+  __ sldi(R0, local_count, Interpreter::logStackElementSize);
+  __ add(parent_frame_resize, parent_frame_resize, R0);
+  // align
+  __ round_to(parent_frame_resize, frame::alignment_in_bytes);
+
+  //
+  // Stack layout at this point:
+  //
+  // The new frame F0 hasn't yet been pushed, F1 is still the top frame.
+  //
+  //   F0      [TOP_IJAVA_FRAME_ABI]
+  //           alignment (optional)
+  //           [F0's full operand stack]
+  //           [F0's monitors] (optional)
+  //           [F0's BytecodeInterpreter object]
+  //   F1      [PARENT_IJAVA_FRAME_ABI]
+  //           alignment (optional)
+  //           [F0's Java result]
+  //           [F0's non-arg Java locals]
+  //           [F1's outgoing Java arguments]     <-- R17_tos
+  //           ...
+  //   F2      [PARENT_IJAVA_FRAME_ABI]
+  //            ...
+
+
+  // Calculate new R14_state
+  // and
+  // test that the new memory stack pointer is above the limit,
+  // throw a StackOverflowError otherwise.
+  __ sub(R11_scratch1/*F1's SP*/,  R1_SP, parent_frame_resize);
+  __ addi(R14_state, R11_scratch1/*F1's SP*/,
+              -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
+  __ sub(R11_scratch1/*F0's SP*/,
+             R11_scratch1/*F1's SP*/, top_frame_size);
+
+  BLOCK_COMMENT("Test for stack overflow:");
+  __ cmpld(CCR0/*is_stack_overflow*/, R11_scratch1, mem_stack_limit);
+  __ blt(CCR0/*is_stack_overflow*/, stack_overflow_return);
+
+
+  //=============================================================================
+  // Frame_size doesn't overflow the stack. Allocate new frame and
+  // initialize interpreter state.
+
+  // Register state
+  //
+  //   R15            - local_count
+  //   R16            - parameter_count
+  //   R17            - max_stack
+  //
+  //   R18            - frame_size
+  //   R19            - access_flags
+  //   CCR4_is_synced - is_synced
+  //
+  //   GR_Lstate      - pointer to the uninitialized new BytecodeInterpreter.
+
+  // _last_Java_pc just needs to be close enough that we can identify
+  // the frame as an interpreted frame. It does not need to be the
+  // exact return address from either calling
+  // BytecodeInterpreter::InterpretMethod or the call to a jni native method.
+  // So we can initialize it here with a value of a bundle in this
+  // code fragment. We only do this initialization for java frames
+  // where InterpretMethod needs a a way to get a good pc value to
+  // store in the thread state. For interpreter frames used to call
+  // jni native code we just zero the value in the state and move an
+  // ip as needed in the native entry code.
+  //
+  // const Register last_Java_pc_addr     = GR24_SCRATCH;  // QQQ 27
+  // const Register last_Java_pc          = GR26_SCRATCH;
+
+  // Must reference stack before setting new SP since Windows
+  // will not be able to deliver the exception on a bad SP.
+  // Windows also insists that we bang each page one at a time in order
+  // for the OS to map in the reserved pages. If we bang only
+  // the final page, Windows stops delivering exceptions to our
+  // VectoredExceptionHandler and terminates our program.
+  // Linux only requires a single bang but it's rare to have
+  // to bang more than 1 page so the code is enabled for both OS's.
+
+  // BANG THE STACK
+  //
+  // Nothing to do for PPC, because updating the SP will automatically
+  // bang the page.
+
+  // Up to here we have calculated the delta for the new C-frame and
+  // checked for a stack-overflow. Now we can savely update SP and
+  // resize the C-frame.
+
+  // R14_state has already been calculated.
+  __ push_interpreter_frame(top_frame_size, parent_frame_resize,
+                            R25_tmp5, R26_tmp6, R27_tmp7, R28_tmp8);
+
+  }
+
+  //
+  // Stack layout at this point:
+  //
+  //   F0 has been been pushed!
+  //
+  //   F0      [TOP_IJAVA_FRAME_ABI]              <-- R1_SP
+  //           alignment (optional)               (now it's here, if required)
+  //           [F0's full operand stack]
+  //           [F0's monitors] (optional)
+  //           [F0's BytecodeInterpreter object]
+  //   F1      [PARENT_IJAVA_FRAME_ABI]
+  //           alignment (optional)               (now it's here, if required)
+  //           [F0's Java result]
+  //           [F0's non-arg Java locals]
+  //           [F1's outgoing Java arguments]
+  //           ...
+  //   F2      [PARENT_IJAVA_FRAME_ABI]
+  //           ...
+  //
+  // R14_state points to F0's BytecodeInterpreter object.
+  //
+
+  }
+
+  //=============================================================================
+  // new BytecodeInterpreter-object is save, let's initialize it:
+  BLOCK_COMMENT("New BytecodeInterpreter-object is save.");
+
+  {
+  // Locals
+  const Register bytecode_addr = R24_tmp4;
+  const Register constants     = R25_tmp5;
+  const Register tos           = R26_tmp6;
+  const Register stack_base    = R27_tmp7;
+  const Register local_addr    = R28_tmp8;
+  {
+    Label L;
+    __ btrue(is_native, L);
+    // if (!is_native) {
+      // bytecode_addr = constMethod->codes();
+      __ ld(bytecode_addr, method_(const));
+      __ addi(bytecode_addr, bytecode_addr, in_bytes(ConstMethod::codes_offset()));
+    // }
+    __ bind(L);
+  }
+
+  __ ld(constants, in_bytes(Method::const_offset()), R19_method);
+  __ ld(constants, in_bytes(ConstMethod::constants_offset()), constants);
+
+  // state->_prev_link = prev_state;
+  __ std(R15_prev_state, state_(_prev_link));
+
+  // For assertions only.
+  // TODO: not needed anyway because it coincides with `_monitor_base'. remove!
+  // state->_self_link = state;
+  DEBUG_ONLY(__ std(R14_state, state_(_self_link));)
+
+  // state->_thread = thread;
+  __ std(R16_thread, state_(_thread));
+
+  // state->_method = method;
+  __ std(R19_method, state_(_method));
+
+  // state->_locals = locals;
+  __ std(R18_locals, state_(_locals));
+
+  // state->_oop_temp = NULL;
+  __ li(R0, 0);
+  __ std(R0, state_(_oop_temp));
+
+  // state->_last_Java_fp = *R1_SP // Use *R1_SP as fp
+  __ ld(R0, _abi(callers_sp), R1_SP);
+  __ std(R0, state_(_last_Java_fp));
+
+  BLOCK_COMMENT("load Stack base:");
+  {
+    // Stack_base.
+    // if (!method->synchronized()) {
+    //   stack_base = state;
+    // } else {
+    //   stack_base = (uintptr_t)state - sizeof(BasicObjectLock);
+    // }
+    Label L;
+    __ mr(stack_base, R14_state);
+    __ bfalse(is_synced, L);
+    __ addi(stack_base, stack_base, -frame::interpreter_frame_monitor_size_in_bytes());
+    __ bind(L);
+  }
+
+  // state->_mdx = NULL;
+  __ li(R0, 0);
+  __ std(R0, state_(_mdx));
+
+  {
+    // if (method->is_native()) state->_bcp = NULL;
+    // else state->_bcp = bytecode_addr;
+    Label label1, label2;
+    __ bfalse(is_native, label1);
+    __ std(R0, state_(_bcp));
+    __ b(label2);
+    __ bind(label1);
+    __ std(bytecode_addr, state_(_bcp));
+    __ bind(label2);
+  }
+
+
+  // state->_result._to_call._callee = NULL;
+  __ std(R0, state_(_result._to_call._callee));
+
+  // state->_monitor_base = state;
+  __ std(R14_state, state_(_monitor_base));
+
+  // state->_msg = BytecodeInterpreter::method_entry;
+  __ li(R0, BytecodeInterpreter::method_entry);
+  __ stw(R0, state_(_msg));
+
+  // state->_last_Java_sp = R1_SP;
+  __ std(R1_SP, state_(_last_Java_sp));
+
+  // state->_stack_base = stack_base;
+  __ std(stack_base, state_(_stack_base));
+
+  // tos = stack_base - 1 slot (prepushed);
+  // state->_stack.Tos(tos);
+  __ addi(tos, stack_base, - Interpreter::stackElementSize);
+  __ std(tos,  state_(_stack));
+
+
+  {
+    BLOCK_COMMENT("get last_Java_pc:");
+    // if (!is_native) state->_last_Java_pc = <some_ip_in_this_code_buffer>;
+    // else state->_last_Java_pc = NULL; (just for neatness)
+    Label label1, label2;
+    __ btrue(is_native, label1);
+    __ get_PC_trash_LR(R0);
+    __ std(R0, state_(_last_Java_pc));
+    __ b(label2);
+    __ bind(label1);
+    __ li(R0, 0);
+    __ std(R0, state_(_last_Java_pc));
+    __ bind(label2);
+  }
+
+
+  // stack_limit = tos - max_stack;
+  __ sub(R0, tos, max_stack);
+  // state->_stack_limit = stack_limit;
+  __ std(R0, state_(_stack_limit));
+
+
+  // cache = method->constants()->cache();
+   __ ld(R0, ConstantPool::cache_offset_in_bytes(), constants);
+  // state->_constants = method->constants()->cache();
+  __ std(R0, state_(_constants));
+
+
+
+  //=============================================================================
+  // synchronized method, allocate and initialize method object lock.
+  // if (!method->is_synchronized()) goto fill_locals_with_0x0s;
+  Label fill_locals_with_0x0s;
+  __ bfalse(is_synced, fill_locals_with_0x0s);
+
+  //   pool_holder = method->constants()->pool_holder();
+  const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+  {
+    Label label1, label2;
+    // lockee = NULL; for java methods, correct value will be inserted in BytecodeInterpretMethod.hpp
+    __ li(R0,0);
+    __ bfalse(is_native, label2);
+
+    __ bfalse(is_static, label1);
+    // if (method->is_static()) lockee =
+    // pool_holder->klass_part()->java_mirror();
+    __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(), constants);
+    __ ld(R0/*lockee*/, mirror_offset, R11_scratch1/*pool_holder*/);
+    __ b(label2);
+
+    __ bind(label1);
+    // else lockee = *(oop*)locals;
+    __ ld(R0/*lockee*/, 0, R18_locals);
+    __ bind(label2);
+
+    // monitor->set_obj(lockee);
+    __ std(R0/*lockee*/, BasicObjectLock::obj_offset_in_bytes(), stack_base);
+  }
+
+  // See if we need to zero the locals
+  __ BIND(fill_locals_with_0x0s);
+
+
+  //=============================================================================
+  // fill locals with 0x0s
+  Label locals_zeroed;
+  __ btrue(is_native, locals_zeroed);
+
+  if (true /* zerolocals */ || ClearInterpreterLocals) {
+    // local_count is already num_locals_slots - num_param_slots
+    __ sldi(R0, parameter_count, Interpreter::logStackElementSize);
+    __ sub(local_addr, R18_locals, R0);
+    __ cmpdi(CCR0, local_count, 0);
+    __ ble(CCR0, locals_zeroed);
+
+    __ mtctr(local_count);
+    //__ ld_const_addr(R0, (address) 0xcafe0000babe);
+    __ li(R0, 0);
+
+    Label zero_slot;
+    __ bind(zero_slot);
+
+    // first local is at local_addr
+    __ std(R0, 0, local_addr);
+    __ addi(local_addr, local_addr, -BytesPerWord);
+    __ bdnz(zero_slot);
+  }
+
+   __ BIND(locals_zeroed);
+
+  }
+  BLOCK_COMMENT("} compute_interpreter_state");
+}
+
+// Generate code to initiate compilation on invocation counter overflow.
+void CppInterpreterGenerator::generate_counter_overflow(Label& continue_entry) {
+  // Registers alive
+  //   R14_state
+  //   R16_thread
+  //
+  // Registers updated
+  //   R14_state
+  //   R3_ARG1 (=R3_RET)
+  //   R4_ARG2
+
+  // After entering the vm we remove the activation and retry the
+  // entry point in case the compilation is complete.
+
+  // InterpreterRuntime::frequency_counter_overflow takes one argument
+  // that indicates if the counter overflow occurs at a backwards
+  // branch (NULL bcp). We pass zero. The call returns the address
+  // of the verified entry point for the method or NULL if the
+  // compilation did not complete (either went background or bailed
+  // out).
+  __ li(R4_ARG2, 0);
+
+  // Pass false to call_VM so it doesn't check for pending exceptions,
+  // since at this point in the method invocation the exception
+  // handler would try to exit the monitor of synchronized methods
+  // which haven't been entered yet.
+  //
+  // Returns verified_entry_point or NULL, we don't care which.
+  //
+  // Do not use the variant `frequency_counter_overflow' that returns
+  // a structure, because this will change the argument list by a
+  // hidden parameter (gcc 4.1).
+
+  __ call_VM(noreg,
+             CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow),
+             R4_ARG2,
+             false);
+  // Returns verified_entry_point or NULL, we don't care which as we ignore it
+  // and run interpreted.
+
+  // Reload method, it may have moved.
+  __ ld(R19_method, state_(_method));
+
+  // We jump now to the label "continue_after_compile".
+  __ b(continue_entry);
+}
+
+// Increment invocation count and check for overflow.
+//
+// R19_method must contain Method* of method to profile.
+void CppInterpreterGenerator::generate_counter_incr(Label& overflow) {
+  Label done;
+  const Register Rcounters             = R12_scratch2;
+  const Register iv_be_count           = R11_scratch1;
+  const Register invocation_limit      = R12_scratch2;
+  const Register invocation_limit_addr = invocation_limit;
+
+  // Load and ev. allocate MethodCounters object.
+  __ get_method_counters(R19_method, Rcounters, done);
+
+  // Update standard invocation counters.
+  __ increment_invocation_counter(Rcounters, iv_be_count, R0);
+
+  // Compare against limit.
+  BLOCK_COMMENT("Compare counter against limit:");
+  assert(4 == sizeof(InvocationCounter::InterpreterInvocationLimit),
+         "must be 4 bytes");
+  __ load_const(invocation_limit_addr, (address)&InvocationCounter::InterpreterInvocationLimit);
+  __ lwa(invocation_limit, 0, invocation_limit_addr);
+  __ cmpw(CCR0, iv_be_count, invocation_limit);
+  __ bge(CCR0, overflow);
+  __ bind(done);
+}
+
+//
+// Call a JNI method.
+//
+// Interpreter stub for calling a native method. (C++ interpreter)
+// This sets up a somewhat different looking stack for calling the native method
+// than the typical interpreter frame setup.
+//
+address CppInterpreterGenerator::generate_native_entry(void) {
+  if (native_entry != NULL) return native_entry;
+  address entry = __ pc();
+
+  // Read
+  //   R16_thread
+  //   R15_prev_state  - address of caller's BytecodeInterpreter, if this snippet
+  //                     gets called by the frame manager.
+  //   R19_method      - callee's Method
+  //   R17_tos         - address of caller's tos
+  //   R1_SP           - caller's stack pointer
+  //   R21_sender_SP   - initial caller sp
+  //
+  // Update
+  //   R14_state       - address of caller's BytecodeInterpreter
+  //   R3_RET          - integer result, if any.
+  //   F1_RET          - float result, if any.
+  //
+  //
+  // Stack layout at this point:
+  //
+  //    0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
+  //            alignment (optional)
+  //            [outgoing Java arguments]     <-- R17_tos
+  //            ...
+  //    PARENT  [PARENT_IJAVA_FRAME_ABI]
+  //            ...
+  //
+
+  const bool inc_counter = UseCompiler || CountCompiledCalls;
+
+  const Register signature_handler_fd   = R21_tmp1;
+  const Register pending_exception      = R22_tmp2;
+  const Register result_handler_addr    = R23_tmp3;
+  const Register native_method_fd       = R24_tmp4;
+  const Register access_flags           = R25_tmp5;
+  const Register active_handles         = R26_tmp6;
+  const Register sync_state             = R27_tmp7;
+  const Register sync_state_addr        = sync_state;     // Address is dead after use.
+  const Register suspend_flags          = R24_tmp4;
+
+  const Register return_pc              = R28_tmp8;       // Register will be locked for some time.
+
+  const ConditionRegister is_synced     = CCR4_is_synced; // Live-on-exit from compute_interpreter_state.
+
+
+  // R1_SP still points to caller's SP at this point.
+
+  // Save initial_caller_sp to caller's abi. The caller frame must be
+  // resized before returning to get rid of the c2i arguments (if
+  // any).
+  // Override the saved SP with the senderSP so we can pop c2i
+  // arguments (if any) off when we return
+  __ std(R21_sender_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
+
+  // Save LR to caller's frame. We don't use _abi(lr) here, because it is not safe.
+  __ mflr(return_pc);
+  __ std(return_pc, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+
+  assert(return_pc->is_nonvolatile(), "return_pc must be a non-volatile register");
+
+  __ verify_method_ptr(R19_method);
+
+  //=============================================================================
+
+  // If this snippet gets called by the frame manager (at label
+  // `call_special'), then R15_prev_state is valid. If this snippet
+  // is not called by the frame manager, but e.g. by the call stub or
+  // by compiled code, then R15_prev_state is invalid.
+  {
+    // Set R15_prev_state to 0 if we don't return to the frame
+    // manager; we will return to the call_stub or to compiled code
+    // instead. If R15_prev_state is 0 there will be only one
+    // interpreter frame (we will set this up later) in this C frame!
+    // So we must take care about retrieving prev_state_(_prev_link)
+    // and restoring R1_SP when popping that interpreter.
+    Label prev_state_is_valid;
+
+    __ load_const(R11_scratch1/*frame_manager_returnpc_addr*/, (address)&frame_manager_specialized_return);
+    __ ld(R12_scratch2/*frame_manager_returnpc*/, 0, R11_scratch1/*frame_manager_returnpc_addr*/);
+    __ cmpd(CCR0, return_pc, R12_scratch2/*frame_manager_returnpc*/);
+    __ beq(CCR0, prev_state_is_valid);
+
+    __ li(R15_prev_state, 0);
+
+    __ BIND(prev_state_is_valid);
+  }
+
+  //=============================================================================
+  // Allocate new frame and initialize interpreter state.
+
+  Label exception_return;
+  Label exception_return_sync_check;
+  Label stack_overflow_return;
+
+  // Generate new interpreter state and jump to stack_overflow_return in case of
+  // a stack overflow.
+  generate_compute_interpreter_state(stack_overflow_return);
+
+  //=============================================================================
+  // Increment invocation counter. On overflow, entry to JNI method
+  // will be compiled.
+  Label invocation_counter_overflow;
+  if (inc_counter) {
+    generate_counter_incr(invocation_counter_overflow);
+  }
+
+  Label continue_after_compile;
+  __ BIND(continue_after_compile);
+
+  // access_flags = method->access_flags();
+  // Load access flags.
+  assert(access_flags->is_nonvolatile(),
+         "access_flags must be in a non-volatile register");
+  // Type check.
+  // TODO: PPC port: assert(4 == methodOopDesc::sz_access_flags(), "unexpected field size");
+  __ lwz(access_flags, method_(access_flags));
+
+  // We don't want to reload R19_method and access_flags after calls
+  // to some helper functions.
+  assert(R19_method->is_nonvolatile(), "R19_method must be a non-volatile register");
+
+  // Check for synchronized methods. Must happen AFTER invocation counter
+  // check, so method is not locked if counter overflows.
+
+  {
+    Label method_is_not_synced;
+    // Is_synced is still alive.
+    assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
+    __ bfalse(is_synced, method_is_not_synced);
+
+    lock_method();
+    // Reload method, it may have moved.
+    __ ld(R19_method, state_(_method));
+
+    __ BIND(method_is_not_synced);
+  }
+
+  // jvmti/jvmpi support
+  __ notify_method_entry();
+
+  // Reload method, it may have moved.
+  __ ld(R19_method, state_(_method));
+
+  //=============================================================================
+  // Get and call the signature handler
+
+  __ ld(signature_handler_fd, method_(signature_handler));
+  Label call_signature_handler;
+
+  __ cmpdi(CCR0, signature_handler_fd, 0);
+  __ bne(CCR0, call_signature_handler);
+
+  // Method has never been called. Either generate a specialized
+  // handler or point to the slow one.
+  //
+  // Pass parameter 'false' to avoid exception check in call_VM.
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R19_method, false);
+
+  // Check for an exception while looking up the target method. If we
+  // incurred one, bail.
+  __ ld(pending_exception, thread_(pending_exception));
+  __ cmpdi(CCR0, pending_exception, 0);
+  __ bne(CCR0, exception_return_sync_check); // has pending exception
+
+  // reload method
+  __ ld(R19_method, state_(_method));
+
+  // Reload signature handler, it may have been created/assigned in the meanwhile
+  __ ld(signature_handler_fd, method_(signature_handler));
+
+  __ BIND(call_signature_handler);
+
+  // Before we call the signature handler we push a new frame to
+  // protect the interpreter frame volatile registers when we return
+  // from jni but before we can get back to Java.
+
+  // First set the frame anchor while the SP/FP registers are
+  // convenient and the slow signature handler can use this same frame
+  // anchor.
+
+  // We have a TOP_IJAVA_FRAME here, which belongs to us.
+  __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
+
+  // Now the interpreter frame (and its call chain) have been
+  // invalidated and flushed. We are now protected against eager
+  // being enabled in native code. Even if it goes eager the
+  // registers will be reloaded as clean and we will invalidate after
+  // the call so no spurious flush should be possible.
+
+  // Call signature handler and pass locals address.
+  //
+  // Our signature handlers copy required arguments to the C stack
+  // (outgoing C args), R3_ARG1 to R10_ARG8, and F1_ARG1 to
+  // F13_ARG13.
+  __ mr(R3_ARG1, R18_locals);
+  __ ld(signature_handler_fd, 0, signature_handler_fd);
+  __ call_stub(signature_handler_fd);
+  // reload method
+  __ ld(R19_method, state_(_method));
+
+  // Remove the register parameter varargs slots we allocated in
+  // compute_interpreter_state. SP+16 ends up pointing to the ABI
+  // outgoing argument area.
+  //
+  // Not needed on PPC64.
+  //__ add(SP, SP, Argument::n_register_parameters*BytesPerWord);
+
+  assert(result_handler_addr->is_nonvolatile(), "result_handler_addr must be in a non-volatile register");
+  // Save across call to native method.
+  __ mr(result_handler_addr, R3_RET);
+
+  // Set up fixed parameters and call the native method.
+  // If the method is static, get mirror into R4_ARG2.
+
+  {
+    Label method_is_not_static;
+    // access_flags is non-volatile and still, no need to restore it
+
+    // restore access flags
+    __ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
+    __ bfalse(CCR0, method_is_not_static);
+
+    // constants = method->constants();
+    __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
+    __ ld(R11_scratch1/*constants*/, in_bytes(ConstMethod::constants_offset()), R11_scratch1);
+    // pool_holder = method->constants()->pool_holder();
+    __ ld(R11_scratch1/*pool_holder*/, ConstantPool::pool_holder_offset_in_bytes(),
+          R11_scratch1/*constants*/);
+
+    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
+
+    // mirror = pool_holder->klass_part()->java_mirror();
+    __ ld(R0/*mirror*/, mirror_offset, R11_scratch1/*pool_holder*/);
+    // state->_native_mirror = mirror;
+    __ std(R0/*mirror*/, state_(_oop_temp));
+    // R4_ARG2 = &state->_oop_temp;
+    __ addir(R4_ARG2, state_(_oop_temp));
+
+    __ BIND(method_is_not_static);
+  }
+
+  // At this point, arguments have been copied off the stack into
+  // their JNI positions. Oops are boxed in-place on the stack, with
+  // handles copied to arguments. The result handler address is in a
+  // register.
+
+  // pass JNIEnv address as first parameter
+  __ addir(R3_ARG1, thread_(jni_environment));
+
+  // Load the native_method entry before we change the thread state.
+  __ ld(native_method_fd, method_(native_function));
+
+  //=============================================================================
+  // Transition from _thread_in_Java to _thread_in_native. As soon as
+  // we make this change the safepoint code needs to be certain that
+  // the last Java frame we established is good. The pc in that frame
+  // just needs to be near here not an actual return address.
+
+  // We use release_store_fence to update values like the thread state, where
+  // we don't want the current thread to continue until all our prior memory
+  // accesses (including the new thread state) are visible to other threads.
+  __ li(R0, _thread_in_native);
+  __ release();
+
+  // TODO: PPC port: assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
+  __ stw(R0, thread_(thread_state));
+
+  if (UseMembar) {
+    __ fence();
+  }
+
+  //=============================================================================
+  // Call the native method. Argument registers must not have been
+  // overwritten since "__ call_stub(signature_handler);" (except for
+  // ARG1 and ARG2 for static methods)
+  __ call_c(native_method_fd);
+
+  __ std(R3_RET, state_(_native_lresult));
+  __ stfd(F1_RET, state_(_native_fresult));
+
+  // The frame_manager_lr field, which we use for setting the last
+  // java frame, gets overwritten by the signature handler. Restore
+  // it now.
+  __ get_PC_trash_LR(R11_scratch1);
+  __ std(R11_scratch1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+
+  // Because of GC R19_method may no longer be valid.
+
+  // Block, if necessary, before resuming in _thread_in_Java state.
+  // In order for GC to work, don't clear the last_Java_sp until after
+  // blocking.
+
+
+
+  //=============================================================================
+  // Switch thread to "native transition" state before reading the
+  // synchronization state.  This additional state is necessary
+  // because reading and testing the synchronization state is not
+  // atomic w.r.t. GC, as this scenario demonstrates: Java thread A,
+  // in _thread_in_native state, loads _not_synchronized and is
+  // preempted.  VM thread changes sync state to synchronizing and
+  // suspends threads for GC. Thread A is resumed to finish this
+  // native method, but doesn't block here since it didn't see any
+  // synchronization in progress, and escapes.
+
+  // We use release_store_fence to update values like the thread state, where
+  // we don't want the current thread to continue until all our prior memory
+  // accesses (including the new thread state) are visible to other threads.
+  __ li(R0/*thread_state*/, _thread_in_native_trans);
+  __ release();
+  __ stw(R0/*thread_state*/, thread_(thread_state));
+  if (UseMembar) {
+    __ fence();
+  }
+  // Write serialization page so that the VM thread can do a pseudo remote
+  // membar. We use the current thread pointer to calculate a thread
+  // specific offset to write to within the page. This minimizes bus
+  // traffic due to cache line collision.
+  else {
+    __ serialize_memory(R16_thread, R11_scratch1, R12_scratch2);
+  }
+
+  // Now before we return to java we must look for a current safepoint
+  // (a new safepoint can not start since we entered native_trans).
+  // We must check here because a current safepoint could be modifying
+  // the callers registers right this moment.
+
+  // Acquire isn't strictly necessary here because of the fence, but
+  // sync_state is declared to be volatile, so we do it anyway.
+  __ load_const(sync_state_addr, SafepointSynchronize::address_of_state());
+
+  // TODO: PPC port: assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
+  __ lwz(sync_state, 0, sync_state_addr);
+
+  // TODO: PPC port: assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
+  __ lwz(suspend_flags, thread_(suspend_flags));
+
+  __ acquire();
+
+  Label sync_check_done;
+  Label do_safepoint;
+  // No synchronization in progress nor yet synchronized
+  __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
+  // not suspended
+  __ cmpwi(CCR1, suspend_flags, 0);
+
+  __ bne(CCR0, do_safepoint);
+  __ beq(CCR1, sync_check_done);
+  __ bind(do_safepoint);
+  // Block.  We do the call directly and leave the current
+  // last_Java_frame setup undisturbed.  We must save any possible
+  // native result acrosss the call. No oop is present
+
+  __ mr(R3_ARG1, R16_thread);
+  __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, JavaThread::check_special_condition_for_native_trans),
+            relocInfo::none);
+  __ bind(sync_check_done);
+
+  //=============================================================================
+  // <<<<<< Back in Interpreter Frame >>>>>
+
+  // We are in thread_in_native_trans here and back in the normal
+  // interpreter frame. We don't have to do anything special about
+  // safepoints and we can switch to Java mode anytime we are ready.
+
+  // Note: frame::interpreter_frame_result has a dependency on how the
+  // method result is saved across the call to post_method_exit. For
+  // native methods it assumes that the non-FPU/non-void result is
+  // saved in _native_lresult and a FPU result in _native_fresult. If
+  // this changes then the interpreter_frame_result implementation
+  // will need to be updated too.
+
+  // On PPC64, we have stored the result directly after the native call.
+
+  //=============================================================================
+  // back in Java
+
+  // We use release_store_fence to update values like the thread state, where
+  // we don't want the current thread to continue until all our prior memory
+  // accesses (including the new thread state) are visible to other threads.
+  __ li(R0/*thread_state*/, _thread_in_Java);
+  __ release();
+  __ stw(R0/*thread_state*/, thread_(thread_state));
+  if (UseMembar) {
+    __ fence();
+  }
+
+  __ reset_last_Java_frame();
+
+  // Reload GR27_method, call killed it. We can't look at
+  // state->_method until we're back in java state because in java
+  // state gc can't happen until we get to a safepoint.
+  //
+  // We've set thread_state to _thread_in_Java already, so restoring
+  // R19_method from R14_state works; R19_method is invalid, because
+  // GC may have happened.
+  __ ld(R19_method, state_(_method)); // reload method, may have moved
+
+  // jvmdi/jvmpi support. Whether we've got an exception pending or
+  // not, and whether unlocking throws an exception or not, we notify
+  // on native method exit. If we do have an exception, we'll end up
+  // in the caller's context to handle it, so if we don't do the
+  // notify here, we'll drop it on the floor.
+
+  __ notify_method_exit(true/*native method*/,
+                        ilgl /*illegal state (not used for native methods)*/);
+
+
+
+  //=============================================================================
+  // Handle exceptions
+
+  // See if we must unlock.
+  //
+  {
+    Label method_is_not_synced;
+    // is_synced is still alive
+    assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
+    __ bfalse(is_synced, method_is_not_synced);
+
+    unlock_method();
+
+    __ bind(method_is_not_synced);
+  }
+
+  // Reset active handles after returning from native.
+  // thread->active_handles()->clear();
+  __ ld(active_handles, thread_(active_handles));
+  // JNIHandleBlock::_top is an int.
+  // TODO:  PPC port: assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
+  __ li(R0, 0);
+  __ stw(R0, JNIHandleBlock::top_offset_in_bytes(), active_handles);
+
+  Label no_pending_exception_from_native_method;
+  __ ld(R0/*pending_exception*/, thread_(pending_exception));
+  __ cmpdi(CCR0, R0/*pending_exception*/, 0);
+  __ beq(CCR0, no_pending_exception_from_native_method);
+
+
+  //-----------------------------------------------------------------------------
+  // An exception is pending. We call into the runtime only if the
+  // caller was not interpreted. If it was interpreted the
+  // interpreter will do the correct thing. If it isn't interpreted
+  // (call stub/compiled code) we will change our return and continue.
+  __ BIND(exception_return);
+
+  Label return_to_initial_caller_with_pending_exception;
+  __ cmpdi(CCR0, R15_prev_state, 0);
+  __ beq(CCR0, return_to_initial_caller_with_pending_exception);
+
+  // We are returning to an interpreter activation, just pop the state,
+  // pop our frame, leave the exception pending, and return.
+  __ pop_interpreter_state(/*prev_state_may_be_0=*/false);
+  __ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1 /* set to return pc */, R22_tmp2);
+  __ mtlr(R21_tmp1);
+  __ blr();
+
+  __ BIND(exception_return_sync_check);
+
+  assert(is_synced->is_nonvolatile(), "is_synced must be non-volatile");
+  __ bfalse(is_synced, exception_return);
+  unlock_method();
+  __ b(exception_return);
+
+
+  __ BIND(return_to_initial_caller_with_pending_exception);
+  // We are returning to a c2i-adapter / call-stub, get the address of the
+  // exception handler, pop the frame and return to the handler.
+
+  // First, pop to caller's frame.
+  __ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1  /* set to return pc */, R22_tmp2);
+
+  __ push_frame_abi112(0, R11_scratch1);
+  // Get the address of the exception handler.
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
+                  R16_thread,
+                  R21_tmp1 /* return pc */);
+  __ pop_frame();
+
+  // Load the PC of the the exception handler into LR.
+  __ mtlr(R3_RET);
+
+  // Load exception into R3_ARG1 and clear pending exception in thread.
+  __ ld(R3_ARG1/*exception*/, thread_(pending_exception));
+  __ li(R4_ARG2, 0);
+  __ std(R4_ARG2, thread_(pending_exception));
+
+  // Load the original return pc into R4_ARG2.
+  __ mr(R4_ARG2/*issuing_pc*/, R21_tmp1);
+
+  // Resize frame to get rid of a potential extension.
+  __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
+
+  // Return to exception handler.
+  __ blr();
+
+
+  //-----------------------------------------------------------------------------
+  // No exception pending.
+  __ BIND(no_pending_exception_from_native_method);
+
+  // Move native method result back into proper registers and return.
+  // Invoke result handler (may unbox/promote).
+  __ ld(R3_RET, state_(_native_lresult));
+  __ lfd(F1_RET, state_(_native_fresult));
+  __ call_stub(result_handler_addr);
+
+  // We have created a new BytecodeInterpreter object, now we must destroy it.
+  //
+  // Restore previous R14_state and caller's SP.  R15_prev_state may
+  // be 0 here, because our caller may be the call_stub or compiled
+  // code.
+  __ pop_interpreter_state(/*prev_state_may_be_0=*/true);
+  __ pop_interpreter_frame(R11_scratch1, R12_scratch2, R21_tmp1 /* set to return pc */, R22_tmp2);
+  // Resize frame to get rid of a potential extension.
+  __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
+
+  // Must use the return pc which was loaded from the caller's frame
+  // as the VM uses return-pc-patching for deoptimization.
+  __ mtlr(R21_tmp1);
+  __ blr();
+
+
+
+  //=============================================================================
+  // We encountered an exception while computing the interpreter
+  // state, so R14_state isn't valid. Act as if we just returned from
+  // the callee method with a pending exception.
+  __ BIND(stack_overflow_return);
+
+  //
+  // Register state:
+  //   R14_state         invalid; trashed by compute_interpreter_state
+  //   R15_prev_state    valid, but may be 0
+  //
+  //   R1_SP             valid, points to caller's SP; wasn't yet updated by
+  //                     compute_interpreter_state
+  //
+
+  // Create exception oop and make it pending.
+
+  // Throw the exception via RuntimeStub "throw_StackOverflowError_entry".
+  //
+  // Previously, we called C-Code directly. As a consequence, a
+  // possible GC tried to process the argument oops of the top frame
+  // (see RegisterMap::clear, which sets the corresponding flag to
+  // true). This lead to crashes because:
+  //   1. The top register map did not contain locations for the argument registers
+  //   2. The arguments are dead anyway, could be already overwritten in the worst case
+  // Solution: Call via special runtime stub that pushes it's own
+  // frame. This runtime stub has the flag "CodeBlob::caller_must_gc_arguments()"
+  // set to "false", what prevents the dead arguments getting GC'd.
+  //
+  // 2 cases exist:
+  // 1. We were called by the c2i adapter / call stub
+  // 2. We were called by the frame manager
+  //
+  // Both cases are handled by this code:
+  // 1. - initial_caller_sp was saved in both cases on entry, so it's safe to load it back even if it was not changed.
+  //    - control flow will be:
+  //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->excp_blob of caller method
+  // 2. - control flow will be:
+  //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->rethrow_excp_entry of frame manager->resume_method
+  //      Since we restored the caller SP above, the rethrow_excp_entry can restore the original interpreter state
+  //      registers using the stack and resume the calling method with a pending excp.
+
+  // Pop any c2i extension from the stack, restore LR just to be sure
+  __ ld(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+  __ mtlr(R0);
+  // Resize frame to get rid of a potential extension.
+  __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
+
+  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
+  // Load target address of the runtime stub.
+  __ load_const(R12_scratch2, (StubRoutines::throw_StackOverflowError_entry()));
+  __ mtctr(R12_scratch2);
+  __ bctr();
+
+
+  //=============================================================================
+  // Counter overflow.
+
+  if (inc_counter) {
+    // Handle invocation counter overflow
+    __ bind(invocation_counter_overflow);
+
+    generate_counter_overflow(continue_after_compile);
+  }
+
+  native_entry = entry;
+  return entry;
+}
+
+bool AbstractInterpreter::can_be_compiled(methodHandle m) {
+  // No special entry points that preclude compilation.
+  return true;
+}
+
+// Unlock the current method.
+//
+void CppInterpreterGenerator::unlock_method(void) {
+  // Find preallocated monitor and unlock method. Method monitor is
+  // the first one.
+
+  // Registers alive
+  //   R14_state
+  //
+  // Registers updated
+  //   volatiles
+  //
+  const Register monitor = R4_ARG2;
+
+  // Pass address of initial monitor we allocated.
+  //
+  // First monitor.
+  __ addi(monitor, R14_state, -frame::interpreter_frame_monitor_size_in_bytes());
+
+  // Unlock method
+  __ unlock_object(monitor);
+}
+
+// Lock the current method.
+//
+void CppInterpreterGenerator::lock_method(void) {
+  // Find preallocated monitor and lock method. Method monitor is the
+  // first one.
+
+  //
+  // Registers alive
+  //   R14_state
+  //
+  // Registers updated
+  //   volatiles
+  //
+
+  const Register monitor = R4_ARG2;
+  const Register object  = R5_ARG3;
+
+  // Pass address of initial monitor we allocated.
+  __ addi(monitor, R14_state, -frame::interpreter_frame_monitor_size_in_bytes());
+
+  // Pass object address.
+  __ ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
+
+  // Lock method.
+  __ lock_object(monitor, object);
+}
+
+// Generate code for handling resuming a deopted method.
+void CppInterpreterGenerator::generate_deopt_handling(Register result_index) {
+
+  //=============================================================================
+  // Returning from a compiled method into a deopted method. The
+  // bytecode at the bcp has completed. The result of the bytecode is
+  // in the native abi (the tosca for the template based
+  // interpreter). Any stack space that was used by the bytecode that
+  // has completed has been removed (e.g. parameters for an invoke) so
+  // all that we have to do is place any pending result on the
+  // expression stack and resume execution on the next bytecode.
+
+  Label return_from_deopt_common;
+
+  // R3_RET and F1_RET are live here! Load the array index of the
+  // required result stub address and continue at return_from_deopt_common.
+
+  // Deopt needs to jump to here to enter the interpreter (return a result).
+  deopt_frame_manager_return_atos = __ pc();
+  __ li(result_index, AbstractInterpreter::BasicType_as_index(T_OBJECT));
+  __ b(return_from_deopt_common);
+
+  deopt_frame_manager_return_btos = __ pc();
+  __ li(result_index, AbstractInterpreter::BasicType_as_index(T_BOOLEAN));
+  __ b(return_from_deopt_common);
+
+  deopt_frame_manager_return_itos = __ pc();
+  __ li(result_index, AbstractInterpreter::BasicType_as_index(T_INT));
+  __ b(return_from_deopt_common);
+
+  deopt_frame_manager_return_ltos = __ pc();
+  __ li(result_index, AbstractInterpreter::BasicType_as_index(T_LONG));
+  __ b(return_from_deopt_common);
+
+  deopt_frame_manager_return_ftos = __ pc();
+  __ li(result_index, AbstractInterpreter::BasicType_as_index(T_FLOAT));
+  __ b(return_from_deopt_common);
+
+  deopt_frame_manager_return_dtos = __ pc();
+  __ li(result_index, AbstractInterpreter::BasicType_as_index(T_DOUBLE));
+  __ b(return_from_deopt_common);
+
+  deopt_frame_manager_return_vtos = __ pc();
+  __ li(result_index, AbstractInterpreter::BasicType_as_index(T_VOID));
+  // Last one, fall-through to return_from_deopt_common.
+
+  // Deopt return common. An index is present that lets us move any
+  // possible result being return to the interpreter's stack.
+  //
+  __ BIND(return_from_deopt_common);
+
+}
+
+// Generate the code to handle a more_monitors message from the c++ interpreter.
+void CppInterpreterGenerator::generate_more_monitors() {
+
+  //
+  // Registers alive
+  //   R16_thread      - JavaThread*
+  //   R15_prev_state  - previous BytecodeInterpreter or 0
+  //   R14_state       - BytecodeInterpreter* address of receiver's interpreter state
+  //   R1_SP           - old stack pointer
+  //
+  // Registers updated
+  //   R1_SP          - new stack pointer
+  //
+
+  // Very-local scratch registers.
+  const Register old_tos         = R21_tmp1;
+  const Register new_tos         = R22_tmp2;
+  const Register stack_base      = R23_tmp3;
+  const Register stack_limit     = R24_tmp4;
+  const Register slot            = R25_tmp5;
+  const Register n_slots         = R25_tmp5;
+
+  // Interpreter state fields.
+  const Register msg             = R24_tmp4;
+
+  // Load up relevant interpreter state.
+
+  __ ld(stack_base, state_(_stack_base));                // Old stack_base
+  __ ld(old_tos, state_(_stack));                        // Old tos
+  __ ld(stack_limit, state_(_stack_limit));              // Old stack_limit
+
+  // extracted monitor_size
+  int monitor_size = frame::interpreter_frame_monitor_size_in_bytes();
+  assert(Assembler::is_aligned((unsigned int)monitor_size,
+                               (unsigned int)frame::alignment_in_bytes),
+         "size of a monitor must respect alignment of SP");
+
+  // Save and restore top LR
+  __ ld(R12_scratch2, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+  __ resize_frame(-monitor_size, R11_scratch1);// Allocate space for new monitor
+  __ std(R12_scratch2, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+    // Initial_caller_sp is used as unextended_sp for non initial callers.
+  __ std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
+  __ addi(stack_base, stack_base, -monitor_size);        // New stack_base
+  __ addi(new_tos, old_tos, -monitor_size);              // New tos
+  __ addi(stack_limit, stack_limit, -monitor_size);      // New stack_limit
+
+  __ std(R1_SP, state_(_last_Java_sp));                  // Update frame_bottom
+
+  __ std(stack_base, state_(_stack_base));               // Update stack_base
+  __ std(new_tos, state_(_stack));                       // Update tos
+  __ std(stack_limit, state_(_stack_limit));             // Update stack_limit
+
+  __ li(msg, BytecodeInterpreter::got_monitors);         // Tell interpreter we allocated the lock
+  __ stw(msg, state_(_msg));
+
+  // Shuffle expression stack down. Recall that stack_base points
+  // just above the new expression stack bottom. Old_tos and new_tos
+  // are used to scan thru the old and new expression stacks.
+
+  Label copy_slot, copy_slot_finished;
+  __ sub(n_slots, stack_base, new_tos);
+  __ srdi_(n_slots, n_slots, LogBytesPerWord);           // compute number of slots to copy
+  assert(LogBytesPerWord == 3, "conflicts assembler instructions");
+  __ beq(CCR0, copy_slot_finished);                       // nothing to copy
+
+  __ mtctr(n_slots);
+
+  // loop
+  __ bind(copy_slot);
+  __ ldu(slot, BytesPerWord, old_tos);                   // slot = *++old_tos;
+  __ stdu(slot, BytesPerWord, new_tos);                  // *++new_tos = slot;
+  __ bdnz(copy_slot);
+
+  __ bind(copy_slot_finished);
+
+  // Restart interpreter
+  __ li(R0, 0);
+  __ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base);  // Mark lock as unused
+}
+
+address CppInterpreterGenerator::generate_normal_entry(void) {
+  if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
+
+  address entry = __ pc();
+
+  address return_from_native_pc = (address) NULL;
+
+  // Initial entry to frame manager (from call_stub or c2i_adapter)
+
+  //
+  // Registers alive
+  //   R16_thread               - JavaThread*
+  //   R19_method               - callee's Method (method to be invoked)
+  //   R17_tos                  - address of sender tos (prepushed)
+  //   R1_SP                    - SP prepared by call stub such that caller's outgoing args are near top
+  //   LR                       - return address to caller (call_stub or c2i_adapter)
+  //   R21_sender_SP            - initial caller sp
+  //
+  // Registers updated
+  //   R15_prev_state           - 0
+  //
+  // Stack layout at this point:
+  //
+  //   0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
+  //           alignment (optional)
+  //           [outgoing Java arguments]     <-- R17_tos
+  //           ...
+  //   PARENT  [PARENT_IJAVA_FRAME_ABI]
+  //           ...
+  //
+
+  // Save initial_caller_sp to caller's abi.
+  // The caller frame must be resized before returning to get rid of
+  // the c2i part on top of the calling compiled frame (if any).
+  // R21_tmp1 must match sender_sp in gen_c2i_adapter.
+  // Now override the saved SP with the senderSP so we can pop c2i
+  // arguments (if any) off when we return.
+  __ std(R21_sender_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
+
+  // Save LR to caller's frame. We don't use _abi(lr) here,
+  // because it is not safe.
+  __ mflr(R0);
+  __ std(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+
+  // If we come here, it is the first invocation of the frame manager.
+  // So there is no previous interpreter state.
+  __ li(R15_prev_state, 0);
+
+
+  // Fall through to where "recursive" invocations go.
+
+  //=============================================================================
+  // Dispatch an instance of the interpreter. Recursive activations
+  // come here.
+
+  Label re_dispatch;
+  __ BIND(re_dispatch);
+
+  //
+  // Registers alive
+  //    R16_thread        - JavaThread*
+  //    R19_method        - callee's Method
+  //    R17_tos           - address of caller's tos (prepushed)
+  //    R15_prev_state    - address of caller's BytecodeInterpreter or 0
+  //    R1_SP             - caller's SP trimmed such that caller's outgoing args are near top.
+  //
+  // Stack layout at this point:
+  //
+  //   0       [TOP_IJAVA_FRAME_ABI]
+  //           alignment (optional)
+  //           [outgoing Java arguments]
+  //           ...
+  //   PARENT  [PARENT_IJAVA_FRAME_ABI]
+  //           ...
+
+  // fall through to interpreted execution
+
+  //=============================================================================
+  // Allocate a new Java frame and initialize the new interpreter state.
+
+  Label stack_overflow_return;
+
+  // Create a suitable new Java frame plus a new BytecodeInterpreter instance
+  // in the current (frame manager's) C frame.
+  generate_compute_interpreter_state(stack_overflow_return);
+
+  // fall through
+
+  //=============================================================================
+  // Interpreter dispatch.
+
+  Label call_interpreter;
+  __ BIND(call_interpreter);
+
+  //
+  // Registers alive
+  //   R16_thread       - JavaThread*
+  //   R15_prev_state   - previous BytecodeInterpreter or 0
+  //   R14_state        - address of receiver's BytecodeInterpreter
+  //   R1_SP            - receiver's stack pointer
+  //
+
+  // Thread fields.
+  const Register pending_exception = R21_tmp1;
+
+  // Interpreter state fields.
+  const Register msg               = R24_tmp4;
+
+  // MethodOop fields.
+  const Register parameter_count   = R25_tmp5;
+  const Register result_index      = R26_tmp6;
+
+  const Register dummy             = R28_tmp8;
+
+  // Address of various interpreter stubs.
+  // R29_tmp9 is reserved.
+  const Register stub_addr         = R27_tmp7;
+
+  // Uncommon trap needs to jump to here to enter the interpreter
+  // (re-execute current bytecode).
+  unctrap_frame_manager_entry  = __ pc();
+
+  // If we are profiling, store our fp (BSP) in the thread so we can
+  // find it during a tick.
+  if (Arguments::has_profile()) {
+    // On PPC64 we store the pointer to the current BytecodeInterpreter,
+    // instead of the bsp of ia64. This should suffice to be able to
+    // find all interesting information.
+    __ std(R14_state, thread_(last_interpreter_fp));
+  }
+
+  // R16_thread, R14_state and R15_prev_state are nonvolatile
+  // registers. There is no need to save these. If we needed to save
+  // some state in the current Java frame, this could be a place to do
+  // so.
+
+  // Call Java bytecode dispatcher passing "BytecodeInterpreter* istate".
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address,
+                                   JvmtiExport::can_post_interpreter_events()
+                                   ? BytecodeInterpreter::runWithChecks
+                                   : BytecodeInterpreter::run),
+                  R14_state);
+
+  interpreter_return_address  = __ last_calls_return_pc();
+
+  // R16_thread, R14_state and R15_prev_state have their values preserved.
+
+  // If we are profiling, clear the fp in the thread to tell
+  // the profiler that we are no longer in the interpreter.
+  if (Arguments::has_profile()) {
+    __ li(R11_scratch1, 0);
+    __ std(R11_scratch1, thread_(last_interpreter_fp));
+  }
+
+  // Load message from bytecode dispatcher.
+  // TODO: PPC port: guarantee(4 == BytecodeInterpreter::sz_msg(), "unexpected field size");
+  __ lwz(msg, state_(_msg));
+
+
+  Label more_monitors;
+  Label return_from_native;
+  Label return_from_native_common;
+  Label return_from_native_no_exception;
+  Label return_from_interpreted_method;
+  Label return_from_recursive_activation;
+  Label unwind_recursive_activation;
+  Label resume_interpreter;
+  Label return_to_initial_caller;
+  Label unwind_initial_activation;
+  Label unwind_initial_activation_pending_exception;
+  Label call_method;
+  Label call_special;
+  Label retry_method;
+  Label retry_method_osr;
+  Label popping_frame;
+  Label throwing_exception;
+
+  // Branch according to the received message
+
+  __ cmpwi(CCR1, msg, BytecodeInterpreter::call_method);
+  __ cmpwi(CCR2, msg, BytecodeInterpreter::return_from_method);
+
+  __ beq(CCR1, call_method);
+  __ beq(CCR2, return_from_interpreted_method);
+
+  __ cmpwi(CCR3, msg, BytecodeInterpreter::more_monitors);
+  __ cmpwi(CCR4, msg, BytecodeInterpreter::throwing_exception);
+
+  __ beq(CCR3, more_monitors);
+  __ beq(CCR4, throwing_exception);
+
+  __ cmpwi(CCR5, msg, BytecodeInterpreter::popping_frame);
+  __ cmpwi(CCR6, msg, BytecodeInterpreter::do_osr);
+
+  __ beq(CCR5, popping_frame);
+  __ beq(CCR6, retry_method_osr);
+
+  __ stop("bad message from interpreter");
+
+
+  //=============================================================================
+  // Add a monitor just below the existing one(s). State->_stack_base
+  // points to the lowest existing one, so we insert the new one just
+  // below it and shuffle the expression stack down. Ref. the above
+  // stack layout picture, we must update _stack_base, _stack, _stack_limit
+  // and _last_Java_sp in the interpreter state.
+
+  __ BIND(more_monitors);
+
+  generate_more_monitors();
+  __ b(call_interpreter);
+
+  generate_deopt_handling(result_index);
+
+  // Restoring the R14_state is already done by the deopt_blob.
+
+  // Current tos includes no parameter slots.
+  __ ld(R17_tos, state_(_stack));
+  __ li(msg, BytecodeInterpreter::deopt_resume);
+  __ b(return_from_native_common);
+
+  // We are sent here when we are unwinding from a native method or
+  // adapter with an exception pending. We need to notify the interpreter
+  // that there is an exception to process.
+  // We arrive here also if the frame manager called an (interpreted) target
+  // which returns with a StackOverflow exception.
+  // The control flow is in this case is:
+  // frame_manager->throw_excp_stub->forward_excp->rethrow_excp_entry
+
+  AbstractInterpreter::_rethrow_exception_entry = __ pc();
+
+  // Restore R14_state.
+  __ ld(R14_state, 0, R1_SP);
+  __ addi(R14_state, R14_state,
+              -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
+
+  // Store exception oop into thread object.
+  __ std(R3_RET, thread_(pending_exception));
+  __ li(msg, BytecodeInterpreter::method_resume /*rethrow_exception*/);
+  //
+  // NOTE: the interpreter frame as setup be deopt does NOT include
+  // any parameter slots (good thing since we have no callee here
+  // and couldn't remove them) so we don't have to do any calculations
+  // here to figure it out.
+  //
+  __ ld(R17_tos, state_(_stack));
+  __ b(return_from_native_common);
+
+
+  //=============================================================================
+  // Returning from a native method.  Result is in the native abi
+  // location so we must move it to the java expression stack.
+
+  __ BIND(return_from_native);
+  guarantee(return_from_native_pc == (address) NULL, "precondition");
+  return_from_native_pc = __ pc();
+
+  // Restore R14_state.
+  __ ld(R14_state, 0, R1_SP);
+  __ addi(R14_state, R14_state, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
+
+  //
+  // Registers alive
+  //   R16_thread
+  //   R14_state    - address of caller's BytecodeInterpreter.
+  //   R3_RET       - integer result, if any.
+  //   F1_RET       - float result, if any.
+  //
+  // Registers updated
+  //   R19_method   - callee's Method
+  //   R17_tos      - caller's tos, with outgoing args popped
+  //   result_index - index of result handler.
+  //   msg          - message for resuming interpreter.
+  //
+
+  // Very-local scratch registers.
+
+  const ConditionRegister have_pending_exception = CCR0;
+
+  // Load callee Method, gc may have moved it.
+  __ ld(R19_method, state_(_result._to_call._callee));
+
+  // Load address of caller's tos. includes parameter slots.
+  __ ld(R17_tos, state_(_stack));
+
+  // Pop callee's parameters.
+
+  __ ld(parameter_count, in_bytes(Method::const_offset()), R19_method);
+  __ lhz(parameter_count, in_bytes(ConstMethod::size_of_parameters_offset()), parameter_count);
+  __ sldi(parameter_count, parameter_count, Interpreter::logStackElementSize);
+  __ add(R17_tos, R17_tos, parameter_count);
+
+  // Result stub address array index
+  // TODO: PPC port: assert(4 == methodOopDesc::sz_result_index(), "unexpected field size");
+  __ lwa(result_index, method_(result_index));
+
+  __ li(msg, BytecodeInterpreter::method_resume);
+
+  //
+  // Registers alive
+  //   R16_thread
+  //   R14_state    - address of caller's BytecodeInterpreter.
+  //   R17_tos      - address of caller's tos with outgoing args already popped
+  //   R3_RET       - integer return value, if any.
+  //   F1_RET       - float return value, if any.
+  //   result_index - index of result handler.
+  //   msg          - message for resuming interpreter.
+  //
+  // Registers updated
+  //   R3_RET       - new address of caller's tos, including result, if any
+  //
+
+  __ BIND(return_from_native_common);
+
+  // Check for pending exception
+  __ ld(pending_exception, thread_(pending_exception));
+  __ cmpdi(CCR0, pending_exception, 0);
+  __ beq(CCR0, return_from_native_no_exception);
+
+  // If there's a pending exception, we really have no result, so
+  // R3_RET is dead. Resume_interpreter assumes the new tos is in
+  // R3_RET.
+  __ mr(R3_RET, R17_tos);
+  // `resume_interpreter' expects R15_prev_state to be alive.
+  __ ld(R15_prev_state, state_(_prev_link));
+  __ b(resume_interpreter);
+
+  __ BIND(return_from_native_no_exception);
+
+  // No pending exception, copy method result from native ABI register
+  // to tos.
+
+  // Address of stub descriptor address array.
+  __ load_const(stub_addr, CppInterpreter::tosca_result_to_stack());
+
+  // Pass address of tos to stub.
+  __ mr(R4_ARG2, R17_tos);
+
+  // Address of stub descriptor address.
+  __ sldi(result_index, result_index, LogBytesPerWord);
+  __ add(stub_addr, stub_addr, result_index);
+
+  // Stub descriptor address.
+  __ ld(stub_addr, 0, stub_addr);
+
+  // TODO: don't do this via a call, do it in place!
+  //
+  // call stub via descriptor
+  // in R3_ARG1/F1_ARG1: result value (R3_RET or F1_RET)
+  __ call_stub(stub_addr);
+
+  // new tos = result of call in R3_RET
+
+  // `resume_interpreter' expects R15_prev_state to be alive.
+  __ ld(R15_prev_state, state_(_prev_link));
+  __ b(resume_interpreter);
+
+  //=============================================================================
+  // We encountered an exception while computing the interpreter
+  // state, so R14_state isn't valid. Act as if we just returned from
+  // the callee method with a pending exception.
+  __ BIND(stack_overflow_return);
+
+  //
+  // Registers alive
+  //   R16_thread        - JavaThread*
+  //   R1_SP             - old stack pointer
+  //   R19_method        - callee's Method
+  //   R17_tos           - address of caller's tos (prepushed)
+  //   R15_prev_state    - address of caller's BytecodeInterpreter or 0
+  //   R18_locals        - address of callee's locals array
+  //
+  // Registers updated
+  //   R3_RET           - address of resuming tos, if recursive unwind
+
+  Label Lskip_unextend_SP;
+
+  {
+  const ConditionRegister is_initial_call = CCR0;
+  const Register tos_save = R21_tmp1;
+  const Register tmp = R22_tmp2;
+
+  assert(tos_save->is_nonvolatile(), "need a nonvolatile");
+
+  // Is the exception thrown in the initial Java frame of this frame
+  // manager frame?
+  __ cmpdi(is_initial_call, R15_prev_state, 0);
+  __ bne(is_initial_call, Lskip_unextend_SP);
+
+  // Pop any c2i extension from the stack. This is necessary in the
+  // non-recursive case (that is we were called by the c2i adapter,
+  // meaning we have to prev state). In this case we entered the frame
+  // manager through a special entry which pushes the orignal
+  // unextended SP to the stack. Here we load it back.
+  __ ld(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+  __ mtlr(R0);
+  // Resize frame to get rid of a potential extension.
+  __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
+
+  // Fall through
+
+  __ bind(Lskip_unextend_SP);
+
+  // Throw the exception via RuntimeStub "throw_StackOverflowError_entry".
+  //
+  // Previously, we called C-Code directly. As a consequence, a
+  // possible GC tried to process the argument oops of the top frame
+  // (see RegisterMap::clear, which sets the corresponding flag to
+  // true). This lead to crashes because:
+  // 1. The top register map did not contain locations for the argument registers
+  // 2. The arguments are dead anyway, could be already overwritten in the worst case
+  // Solution: Call via special runtime stub that pushes it's own frame. This runtime stub has the flag
+  // "CodeBlob::caller_must_gc_arguments()" set to "false", what prevents the dead arguments getting GC'd.
+  //
+  // 2 cases exist:
+  // 1. We were called by the c2i adapter / call stub
+  // 2. We were called by the frame manager
+  //
+  // Both cases are handled by this code:
+  // 1. - initial_caller_sp was saved on stack => Load it back and we're ok
+  //    - control flow will be:
+  //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->excp_blob of calling method
+  // 2. - control flow will be:
+  //      throw_stackoverflow_stub->VM->throw_stackoverflow_stub->forward_excep->
+  //        ->rethrow_excp_entry of frame manager->resume_method
+  //      Since we restored the caller SP above, the rethrow_excp_entry can restore the original interpreter state
+  //      registers using the stack and resume the calling method with a pending excp.
+
+  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
+  __ load_const(R3_ARG1, (StubRoutines::throw_StackOverflowError_entry()));
+  __ mtctr(R3_ARG1);
+  __ bctr();
+  }
+  //=============================================================================
+  // We have popped a frame from an interpreted call. We are assured
+  // of returning to an interpreted call by the popframe abi. We have
+  // no return value all we have to do is pop the current frame and
+  // then make sure that the top of stack (of the caller) gets set to
+  // where it was when we entered the callee (i.e. the args are still
+  // in place).  Or we are returning to the interpreter. In the first
+  // case we must extract result (if any) from the java expression
+  // stack and store it in the location the native abi would expect
+  // for a call returning this type. In the second case we must simply
+  // do a stack to stack move as we unwind.
+
+  __ BIND(popping_frame);
+
+  // Registers alive
+  //   R14_state
+  //   R15_prev_state
+  //   R17_tos
+  //
+  // Registers updated
+  //   R19_method
+  //   R3_RET
+  //   msg
+  {
+    Label L;
+
+    // Reload callee method, gc may have moved it.
+    __ ld(R19_method, state_(_method));
+
+    // We may be returning to a deoptimized frame in which case the
+    // usual assumption of a recursive return is not true.
+
+    // not equal = is recursive call
+    __ cmpdi(CCR0, R15_prev_state, 0);
+
+    __ bne(CCR0, L);
+
+    // Pop_frame capability.
+    // The pop_frame api says that the underlying frame is a Java frame, in this case
+    // (prev_state==null) it must be a compiled frame:
+    //
+    // Stack at this point: I, C2I + C, ...
+    //
+    // The outgoing arguments of the call have just been copied (popframe_preserve_args).
+    // By the pop_frame api, we must end up in an interpreted frame. So the compiled frame
+    // will be deoptimized. Deoptimization will restore the outgoing arguments from
+    // popframe_preserve_args, adjust the tos such that it includes the popframe_preserve_args,
+    // and adjust the bci such that the call will be executed again.
+    // We have no results, just pop the interpreter frame, resize the compiled frame to get rid
+    // of the c2i extension and return to the deopt_handler.
+    __ b(unwind_initial_activation);
+
+    // is recursive call
+    __ bind(L);
+
+    // Resume_interpreter expects the original tos in R3_RET.
+    __ ld(R3_RET, prev_state_(_stack));
+
+    // We're done.
+    __ li(msg, BytecodeInterpreter::popping_frame);
+
+    __ b(unwind_recursive_activation);
+  }
+
+
+  //=============================================================================
+
+  // We have finished an interpreted call. We are either returning to
+  // native (call_stub/c2) or we are returning to the interpreter.
+  // When returning to native, we must extract the result (if any)
+  // from the java expression stack and store it in the location the
+  // native abi expects. When returning to the interpreter we must
+  // simply do a stack to stack move as we unwind.
+
+  __ BIND(return_from_interpreted_method);
+
+  //
+  // Registers alive
+  //   R16_thread     - JavaThread*
+  //   R15_prev_state - address of caller's BytecodeInterpreter or 0
+  //   R14_state      - address of callee's interpreter state
+  //   R1_SP          - callee's stack pointer
+  //
+  // Registers updated
+  //   R19_method     - callee's method
+  //   R3_RET         - address of result (new caller's tos),
+  //
+  // if returning to interpreted
+  //   msg  - message for interpreter,
+  // if returning to interpreted
+  //
+
+  // Check if this is the initial invocation of the frame manager.
+  // If so, R15_prev_state will be null.
+  __ cmpdi(CCR0, R15_prev_state, 0);
+
+  // Reload callee method, gc may have moved it.
+  __ ld(R19_method, state_(_method));
+
+  // Load the method's result type.
+  __ lwz(result_index, method_(result_index));
+
+  // Go to return_to_initial_caller if R15_prev_state is null.
+  __ beq(CCR0, return_to_initial_caller);
+
+  // Copy callee's result to caller's expression stack via inline stack-to-stack
+  // converters.
+  {
+    Register new_tos   = R3_RET;
+    Register from_temp = R4_ARG2;
+    Register from      = R5_ARG3;
+    Register tos       = R6_ARG4;
+    Register tmp1      = R7_ARG5;
+    Register tmp2      = R8_ARG6;
+
+    ConditionRegister result_type_is_void   = CCR1;
+    ConditionRegister result_type_is_long   = CCR2;
+    ConditionRegister result_type_is_double = CCR3;
+
+    Label stack_to_stack_void;
+    Label stack_to_stack_double_slot; // T_LONG, T_DOUBLE
+    Label stack_to_stack_single_slot; // T_BOOLEAN, T_BYTE, T_CHAR, T_SHORT, T_INT, T_FLOAT, T_OBJECT
+    Label stack_to_stack_done;
+
+    // Pass callee's address of tos + BytesPerWord
+    __ ld(from_temp, state_(_stack));
+
+    // result type: void
+    __ cmpwi(result_type_is_void, result_index, AbstractInterpreter::BasicType_as_index(T_VOID));
+
+    // Pass caller's tos == callee's locals address
+    __ ld(tos, state_(_locals));
+
+    // result type: long
+    __ cmpwi(result_type_is_long, result_index, AbstractInterpreter::BasicType_as_index(T_LONG));
+
+    __ addi(from, from_temp, Interpreter::stackElementSize);
+
+    // !! don't branch above this line !!
+
+    // handle void
+    __ beq(result_type_is_void,   stack_to_stack_void);
+
+    // result type: double
+    __ cmpwi(result_type_is_double, result_index, AbstractInterpreter::BasicType_as_index(T_DOUBLE));
+
+    // handle long or double
+    __ beq(result_type_is_long, stack_to_stack_double_slot);
+    __ beq(result_type_is_double, stack_to_stack_double_slot);
+
+    // fall through to single slot types (incl. object)
+
+    {
+      __ BIND(stack_to_stack_single_slot);
+      // T_BOOLEAN, T_BYTE, T_CHAR, T_SHORT, T_INT, T_FLOAT, T_OBJECT
+
+      __ ld(tmp1, 0, from);
+      __ std(tmp1, 0, tos);
+      // New expression stack top
+      __ addi(new_tos, tos, - BytesPerWord);
+
+      __ b(stack_to_stack_done);
+    }
+
+    {
+      __ BIND(stack_to_stack_double_slot);
+      // T_LONG, T_DOUBLE
+
+      // Move both entries for debug purposes even though only one is live
+      __ ld(tmp1, BytesPerWord, from);
+      __ ld(tmp2, 0, from);
+      __ std(tmp1, 0, tos);
+      __ std(tmp2, -BytesPerWord, tos);
+
+      // new expression stack top
+      __ addi(new_tos, tos, - 2 * BytesPerWord); // two slots
+      __ b(stack_to_stack_done);
+    }
+
+    {
+      __ BIND(stack_to_stack_void);
+      // T_VOID
+
+      // new expression stack top
+      __ mr(new_tos, tos);
+      // fall through to stack_to_stack_done
+    }
+
+    __ BIND(stack_to_stack_done);
+  }
+
+  // new tos = R3_RET
+
+  // Get the message for the interpreter
+  __ li(msg, BytecodeInterpreter::method_resume);
+
+  // And fall thru
+
+
+  //=============================================================================
+  // Restore caller's interpreter state and pass pointer to caller's
+  // new tos to caller.
+
+  __ BIND(unwind_recursive_activation);
+
+  //
+  // Registers alive
+  //   R15_prev_state   - address of caller's BytecodeInterpreter
+  //   R3_RET           - address of caller's tos
+  //   msg              - message for caller's BytecodeInterpreter
+  //   R1_SP            - callee's stack pointer
+  //
+  // Registers updated
+  //   R14_state        - address of caller's BytecodeInterpreter
+  //   R15_prev_state   - address of its parent or 0
+  //
+
+  // Pop callee's interpreter and set R14_state to caller's interpreter.
+  __ pop_interpreter_state(/*prev_state_may_be_0=*/false);
+
+  // And fall thru
+
+
+  //=============================================================================
+  // Resume the (calling) interpreter after a call.
+
+  __ BIND(resume_interpreter);
+
+  //
+  // Registers alive
+  //   R14_state        - address of resuming BytecodeInterpreter
+  //   R15_prev_state   - address of its parent or 0
+  //   R3_RET           - address of resuming tos
+  //   msg              - message for resuming interpreter
+  //   R1_SP            - callee's stack pointer
+  //
+  // Registers updated
+  //   R1_SP            - caller's stack pointer
+  //
+
+  // Restore C stack pointer of caller (resuming interpreter),
+  // R14_state already points to the resuming BytecodeInterpreter.
+  __ pop_interpreter_frame_to_state(R14_state, R21_tmp1, R11_scratch1, R12_scratch2);
+
+  // Store new address of tos (holding return value) in interpreter state.
+  __ std(R3_RET, state_(_stack));
+
+  // Store message for interpreter.
+  __ stw(msg, state_(_msg));
+
+  __ b(call_interpreter);
+
+  //=============================================================================
+  // Interpreter returning to native code (call_stub/c1/c2) from
+  // initial activation. Convert stack result and unwind activation.
+
+  __ BIND(return_to_initial_caller);
+
+  //
+  // Registers alive
+  //   R19_method       - callee's Method
+  //   R14_state        - address of callee's interpreter state
+  //   R16_thread       - JavaThread
+  //   R1_SP            - callee's stack pointer
+  //
+  // Registers updated
+  //   R3_RET/F1_RET - result in expected output register
+  //
+
+  // If we have an exception pending we have no result and we
+  // must figure out where to really return to.
+  //
+  __ ld(pending_exception, thread_(pending_exception));
+  __ cmpdi(CCR0, pending_exception, 0);
+  __ bne(CCR0, unwind_initial_activation_pending_exception);
+
+  __ lwa(result_index, method_(result_index));
+
+  // Address of stub descriptor address array.
+  __ load_const(stub_addr, CppInterpreter::stack_result_to_native());
+
+  // Pass address of callee's tos + BytesPerWord.
+  // Will then point directly to result.
+  __ ld(R3_ARG1, state_(_stack));
+  __ addi(R3_ARG1, R3_ARG1, Interpreter::stackElementSize);
+
+  // Address of stub descriptor address
+  __ sldi(result_index, result_index, LogBytesPerWord);
+  __ add(stub_addr, stub_addr, result_index);
+
+  // Stub descriptor address
+  __ ld(stub_addr, 0, stub_addr);
+
+  // TODO: don't do this via a call, do it in place!
+  //
+  // call stub via descriptor
+  __ call_stub(stub_addr);
+
+  __ BIND(unwind_initial_activation);
+
+  // Unwind from initial activation. No exception is pending.
+
+  //
+  // Stack layout at this point:
+  //
+  //    0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
+  //            ...
+  //    CALLER  [PARENT_IJAVA_FRAME_ABI]
+  //            ...
+  //    CALLER  [unextended ABI]
+  //            ...
+  //
+  //  The CALLER frame has a C2I adapter or is an entry-frame.
+  //
+
+  // An interpreter frame exists, we may pop the TOP_IJAVA_FRAME and
+  // turn the caller's PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
+  // But, we simply restore the return pc from the caller's frame and
+  // use the caller's initial_caller_sp as the new SP which pops the
+  // interpreter frame and "resizes" the caller's frame to its "unextended"
+  // size.
+
+  // get rid of top frame
+  __ pop_frame();
+
+  // Load return PC from parent frame.
+  __ ld(R21_tmp1, _parent_ijava_frame_abi(lr), R1_SP);
+
+  // Resize frame to get rid of a potential extension.
+  __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
+
+  // update LR
+  __ mtlr(R21_tmp1);
+
+  // return
+  __ blr();
+
+  //=============================================================================
+  // Unwind from initial activation. An exception is pending
+
+  __ BIND(unwind_initial_activation_pending_exception);
+
+  //
+  // Stack layout at this point:
+  //
+  //   0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
+  //           ...
+  //   CALLER  [PARENT_IJAVA_FRAME_ABI]
+  //           ...
+  //   CALLER  [unextended ABI]
+  //           ...
+  //
+  // The CALLER frame has a C2I adapter or is an entry-frame.
+  //
+
+  // An interpreter frame exists, we may pop the TOP_IJAVA_FRAME and
+  // turn the caller's PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
+  // But, we just pop the current TOP_IJAVA_FRAME and fall through
+
+  __ pop_frame();
+  __ ld(R3_ARG1, _top_ijava_frame_abi(lr), R1_SP);
+
+  //
+  // Stack layout at this point:
+  //
+  //   CALLER  [PARENT_IJAVA_FRAME_ABI]      <-- R1_SP
+  //           ...
+  //   CALLER  [unextended ABI]
+  //           ...
+  //
+  // The CALLER frame has a C2I adapter or is an entry-frame.
+  //
+  // Registers alive
+  //   R16_thread
+  //   R3_ARG1 - return address to caller
+  //
+  // Registers updated
+  //   R3_ARG1 - address of pending exception
+  //   R4_ARG2 - issuing pc = return address to caller
+  //   LR      - address of exception handler stub
+  //
+
+  // Resize frame to get rid of a potential extension.
+  __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
+
+  __ mr(R14, R3_ARG1);   // R14 := ARG1
+  __ mr(R4_ARG2, R3_ARG1);  // ARG2 := ARG1
+
+  // Find the address of the "catch_exception" stub.
+  __ push_frame_abi112(0, R11_scratch1);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
+                  R16_thread,
+                  R4_ARG2);
+  __ pop_frame();
+
+  // Load continuation address into LR.
+  __ mtlr(R3_RET);
+
+  // Load address of pending exception and clear it in thread object.
+  __ ld(R3_ARG1/*R3_RET*/, thread_(pending_exception));
+  __ li(R4_ARG2, 0);
+  __ std(R4_ARG2, thread_(pending_exception));
+
+  // re-load issuing pc
+  __ mr(R4_ARG2, R14);
+
+  // Branch to found exception handler.
+  __ blr();
+
+  //=============================================================================
+  // Call a new method. Compute new args and trim the expression stack
+  // to only what we are currently using and then recurse.
+
+  __ BIND(call_method);
+
+  //
+  //  Registers alive
+  //    R16_thread
+  //    R14_state      - address of caller's BytecodeInterpreter
+  //    R1_SP          - caller's stack pointer
+  //
+  //  Registers updated
+  //    R15_prev_state - address of caller's BytecodeInterpreter
+  //    R17_tos        - address of caller's tos
+  //    R19_method     - callee's Method
+  //    R1_SP          - trimmed back
+  //
+
+  // Very-local scratch registers.
+
+  const Register offset = R21_tmp1;
+  const Register tmp    = R22_tmp2;
+  const Register self_entry  = R23_tmp3;
+  const Register stub_entry  = R24_tmp4;
+
+  const ConditionRegister cr = CCR0;
+
+  // Load the address of the frame manager.
+  __ load_const(self_entry, &interpreter_frame_manager);
+  __ ld(self_entry, 0, self_entry);
+
+  // Load BytecodeInterpreter._result._to_call._callee (callee's Method).
+  __ ld(R19_method, state_(_result._to_call._callee));
+  // Load BytecodeInterpreter._stack (outgoing tos).
+  __ ld(R17_tos, state_(_stack));
+
+  // Save address of caller's BytecodeInterpreter.
+  __ mr(R15_prev_state, R14_state);
+
+  // Load the callee's entry point.
+  // Load BytecodeInterpreter._result._to_call._callee_entry_point.
+  __ ld(stub_entry, state_(_result._to_call._callee_entry_point));
+
+  // Check whether stub_entry is equal to self_entry.
+  __ cmpd(cr, self_entry, stub_entry);
+  // if (self_entry == stub_entry)
+  //   do a re-dispatch
+  __ beq(cr, re_dispatch);
+  // else
+  //   call the specialized entry (adapter for jni or compiled code)
+  __ BIND(call_special);
+
+  //
+  // Call the entry generated by `InterpreterGenerator::generate_native_entry'.
+  //
+  // Registers alive
+  //   R16_thread
+  //   R15_prev_state    - address of caller's BytecodeInterpreter
+  //   R19_method        - callee's Method
+  //   R17_tos           - address of caller's tos
+  //   R1_SP             - caller's stack pointer
+  //
+
+  // Mark return from specialized entry for generate_native_entry.
+  guarantee(return_from_native_pc != (address) NULL, "precondition");
+  frame_manager_specialized_return = return_from_native_pc;
+
+  // Set sender_SP in case we call interpreter native wrapper which
+  // will expect it. Compiled code should not care.
+  __ mr(R21_sender_SP, R1_SP);
+
+  // Do a tail call here, and let the link register point to
+  // frame_manager_specialized_return which is return_from_native_pc.
+  __ load_const(tmp, frame_manager_specialized_return);
+  __ call_stub_and_return_to(stub_entry,  tmp /* return_pc=tmp */);
+
+
+  //=============================================================================
+  //
+  // InterpretMethod triggered OSR compilation of some Java method M
+  // and now asks to run the compiled code.  We call this code the
+  // `callee'.
+  //
+  // This is our current idea on how OSR should look like on PPC64:
+  //
+  // While interpreting a Java method M the stack is:
+  //
+  //  (InterpretMethod (M), IJAVA_FRAME (M), ANY_FRAME, ...).
+  //
+  // After having OSR compiled M, `InterpretMethod' returns to the
+  // frame manager, sending the message `retry_method_osr'.  The stack
+  // is:
+  //
+  //  (IJAVA_FRAME (M), ANY_FRAME, ...).
+  //
+  // The compiler will have generated an `nmethod' suitable for
+  // continuing execution of M at the bytecode index at which OSR took
+  // place.  So now the frame manager calls the OSR entry.  The OSR
+  // entry sets up a JIT_FRAME for M and continues execution of M with
+  // initial state determined by the IJAVA_FRAME.
+  //
+  //  (JIT_FRAME (M), IJAVA_FRAME (M), ANY_FRAME, ...).
+  //
+
+  __ BIND(retry_method_osr);
+  {
+  //
+  // Registers alive
+  //   R16_thread
+  //   R15_prev_state     - address of caller's BytecodeInterpreter
+  //   R14_state          - address of callee's BytecodeInterpreter
+  //   R1_SP              - callee's SP before call to InterpretMethod
+  //
+  // Registers updated
+  //   R17                - pointer to callee's locals array
+  //                       (declared via `interpreter_arg_ptr_reg' in the AD file)
+  //   R19_method         - callee's Method
+  //   R1_SP              - callee's SP (will become SP of OSR adapter frame)
+  //
+
+  // Provide a debugger breakpoint in the frame manager if breakpoints
+  // in osr'd methods are requested.
+#ifdef COMPILER2
+  NOT_PRODUCT( if (OptoBreakpointOSR) { __ illtrap(); } )
+#endif
+
+  // Load callee's pointer to locals array from callee's state.
+  //  __ ld(R17, state_(_locals));
+
+  // Load osr entry.
+  __ ld(R12_scratch2, state_(_result._osr._osr_entry));
+
+  // Load address of temporary osr buffer to arg1.
+  __ ld(R3_ARG1, state_(_result._osr._osr_buf));
+  __ mtctr(R12_scratch2);
+
+  // Load method oop, gc may move it during execution of osr'd method.
+  __ ld(R22_tmp2, state_(_method));
+  // Load message 'call_method'.
+  __ li(R23_tmp3, BytecodeInterpreter::call_method);
+
+  {
+    // Pop the IJAVA frame of the method which we are going to call osr'd.
+    Label no_state, skip_no_state;
+    __ pop_interpreter_state(/*prev_state_may_be_0=*/true);
+    __ cmpdi(CCR0, R14_state,0);
+    __ beq(CCR0, no_state);
+    // return to interpreter
+    __ pop_interpreter_frame_to_state(R14_state, R11_scratch1, R12_scratch2, R21_tmp1);
+
+    // Init _result._to_call._callee and tell gc that it contains a valid oop
+    // by setting _msg to 'call_method'.
+    __ std(R22_tmp2, state_(_result._to_call._callee));
+    // TODO: PPC port: assert(4 == BytecodeInterpreter::sz_msg(), "unexpected field size");
+    __ stw(R23_tmp3, state_(_msg));
+
+    __ load_const(R21_tmp1, frame_manager_specialized_return);
+    __ b(skip_no_state);
+    __ bind(no_state);
+
+    // Return to initial caller.
+
+    // Get rid of top frame.
+    __ pop_frame();
+
+    // Load return PC from parent frame.
+    __ ld(R21_tmp1, _parent_ijava_frame_abi(lr), R1_SP);
+
+    // Resize frame to get rid of a potential extension.
+    __ resize_frame_to_initial_caller(R11_scratch1, R12_scratch2);
+
+    __ bind(skip_no_state);
+
+    // Update LR with return pc.
+    __ mtlr(R21_tmp1);
+  }
+  // Jump to the osr entry point.
+  __ bctr();
+
+  }
+
+  //=============================================================================
+  // Interpreted method "returned" with an exception, pass it on.
+  // Pass no result, unwind activation and continue/return to
+  // interpreter/call_stub/c2.
+
+  __ BIND(throwing_exception);
+
+  // Check if this is the initial invocation of the frame manager.  If
+  // so, previous interpreter state in R15_prev_state will be null.
+
+  // New tos of caller is callee's first parameter address, that is
+  // callee's incoming arguments are popped.
+  __ ld(R3_RET, state_(_locals));
+
+  // Check whether this is an initial call.
+  __ cmpdi(CCR0, R15_prev_state, 0);
+  // Yes, called from the call stub or from generated code via a c2i frame.
+  __ beq(CCR0, unwind_initial_activation_pending_exception);
+
+  // Send resume message, interpreter will see the exception first.
+
+  __ li(msg, BytecodeInterpreter::method_resume);
+  __ b(unwind_recursive_activation);
+
+
+  //=============================================================================
+  // Push the last instruction out to the code buffer.
+
+  {
+    __ unimplemented("end of InterpreterGenerator::generate_normal_entry", 128);
+  }
+
+  interpreter_frame_manager = entry;
+  return interpreter_frame_manager;
+}
+
+// Generate code for various sorts of method entries
+//
+address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
+  address entry_point = NULL;
+
+  switch (kind) {
+    case Interpreter::zerolocals                 :                                                                              break;
+    case Interpreter::zerolocals_synchronized    :                                                                              break;
+    case Interpreter::native                     : // Fall thru
+    case Interpreter::native_synchronized        : entry_point = ((CppInterpreterGenerator*)this)->generate_native_entry();     break;
+    case Interpreter::empty                      :                                                                              break;
+    case Interpreter::accessor                   : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry();      break;
+    case Interpreter::abstract                   : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry();      break;
+    // These are special interpreter intrinsics which we don't support so far.
+    case Interpreter::java_lang_math_sin         :                                                                              break;
+    case Interpreter::java_lang_math_cos         :                                                                              break;
+    case Interpreter::java_lang_math_tan         :                                                                              break;
+    case Interpreter::java_lang_math_abs         :                                                                              break;
+    case Interpreter::java_lang_math_log         :                                                                              break;
+    case Interpreter::java_lang_math_log10       :                                                                              break;
+    case Interpreter::java_lang_math_sqrt        :                                                                              break;
+    case Interpreter::java_lang_math_pow         :                                                                              break;
+    case Interpreter::java_lang_math_exp         :                                                                              break;
+    case Interpreter::java_lang_ref_reference_get: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
+    default                                      : ShouldNotReachHere();                                                        break;
+  }
+
+  if (entry_point) {
+    return entry_point;
+  }
+  return ((InterpreterGenerator*)this)->generate_normal_entry();
+}
+
+InterpreterGenerator::InterpreterGenerator(StubQueue* code)
+ : CppInterpreterGenerator(code) {
+   generate_all(); // down here so it can be "virtual"
+}
+
+// How much stack a topmost interpreter method activation needs in words.
+int AbstractInterpreter::size_top_interpreter_activation(Method* method) {
+  // Computation is in bytes not words to match layout_activation_impl
+  // below, but the return is in words.
+
+  //
+  //  0       [TOP_IJAVA_FRAME_ABI]                                                    \
+  //          alignment (optional)                                             \       |
+  //          [operand stack / Java parameters] > stack                        |       |
+  //          [monitors] (optional)             > monitors                     |       |
+  //          [PARENT_IJAVA_FRAME_ABI]                                \        |       |
+  //          [BytecodeInterpreter object]      > interpreter \       |        |       |
+  //          alignment (optional)                            | round | parent | round | top
+  //          [Java result] (2 slots)           > result      |       |        |       |
+  //          [Java non-arg locals]             \ locals      |       |        |       |
+  //          [arg locals]                      /             /       /        /       /
+  //
+
+  int locals = method->max_locals() * BytesPerWord;
+  int interpreter = frame::interpreter_frame_cinterpreterstate_size_in_bytes();
+  int result = 2 * BytesPerWord;
+
+  int parent = round_to(interpreter + result + locals, 16) + frame::parent_ijava_frame_abi_size;
+
+  int stack = method->max_stack() * BytesPerWord;
+  int monitors = method->is_synchronized() ? frame::interpreter_frame_monitor_size_in_bytes() : 0;
+  int top = round_to(parent + monitors + stack, 16) + frame::top_ijava_frame_abi_size;
+
+  return (top / BytesPerWord);
+}
+
+void BytecodeInterpreter::layout_interpreterState(interpreterState to_fill,
+                                                  frame* caller,
+                                                  frame* current,
+                                                  Method* method,
+                                                  intptr_t* locals,
+                                                  intptr_t* stack,
+                                                  intptr_t* stack_base,
+                                                  intptr_t* monitor_base,
+                                                  intptr_t* frame_sp,
+                                                  bool is_top_frame) {
+  // What about any vtable?
+  //
+  to_fill->_thread = JavaThread::current();
+  // This gets filled in later but make it something recognizable for now.
+  to_fill->_bcp = method->code_base();
+  to_fill->_locals = locals;
+  to_fill->_constants = method->constants()->cache();
+  to_fill->_method = method;
+  to_fill->_mdx = NULL;
+  to_fill->_stack = stack;
+
+  if (is_top_frame && JavaThread::current()->popframe_forcing_deopt_reexecution()) {
+    to_fill->_msg = deopt_resume2;
+  } else {
+    to_fill->_msg = method_resume;
+  }
+  to_fill->_result._to_call._bcp_advance = 0;
+  to_fill->_result._to_call._callee_entry_point = NULL; // doesn't matter to anyone
+  to_fill->_result._to_call._callee = NULL; // doesn't matter to anyone
+  to_fill->_prev_link = NULL;
+
+  if (caller->is_interpreted_frame()) {
+    interpreterState prev  = caller->get_interpreterState();
+
+    // Support MH calls. Make sure the interpreter will return the right address:
+    // 1. Caller did ordinary interpreted->compiled call call: Set a prev_state
+    //    which makes the CPP interpreter return to frame manager "return_from_interpreted_method"
+    //    entry after finishing execution.
+    // 2. Caller did a MH call: If the caller has a MethodHandleInvoke in it's
+    //    state (invariant: must be the caller of the bottom vframe) we used the
+    //    "call_special" entry to do the call, meaning the arguments have not been
+    //    popped from the stack. Therefore, don't enter a prev state in this case
+    //    in order to return to "return_from_native" frame manager entry which takes
+    //    care of popping arguments. Also, don't overwrite the MH.invoke Method in
+    //    the prev_state in order to be able to figure out the number of arguments to
+    //     pop.
+    // The parameter method can represent MethodHandle.invokeExact(...).
+    // The MethodHandleCompiler generates these synthetic Methods,
+    // including bytecodes, if an invokedynamic call gets inlined. In
+    // this case we want to return like from any other interpreted
+    // Java call, so we set _prev_link.
+    to_fill->_prev_link = prev;
+
+    if (*prev->_bcp == Bytecodes::_invokeinterface || *prev->_bcp == Bytecodes::_invokedynamic) {
+      prev->_result._to_call._bcp_advance = 5;
+    } else {
+      prev->_result._to_call._bcp_advance = 3;
+    }
+  }
+  to_fill->_oop_temp = NULL;
+  to_fill->_stack_base = stack_base;
+  // Need +1 here because stack_base points to the word just above the
+  // first expr stack entry and stack_limit is supposed to point to
+  // the word just below the last expr stack entry. See
+  // generate_compute_interpreter_state.
+  to_fill->_stack_limit = stack_base - (method->max_stack() + 1);
+  to_fill->_monitor_base = (BasicObjectLock*) monitor_base;
+
+  to_fill->_frame_bottom = frame_sp;
+
+  // PPC64 specific
+  to_fill->_last_Java_pc = NULL;
+  to_fill->_last_Java_fp = NULL;
+  to_fill->_last_Java_sp = frame_sp;
+#ifdef ASSERT
+  to_fill->_self_link = to_fill;
+  to_fill->_native_fresult = 123456.789;
+  to_fill->_native_lresult = CONST64(0xdeafcafedeadc0de);
+#endif
+}
+
+void BytecodeInterpreter::pd_layout_interpreterState(interpreterState istate,
+                                                     address last_Java_pc,
+                                                     intptr_t* last_Java_fp) {
+  istate->_last_Java_pc = last_Java_pc;
+  istate->_last_Java_fp = last_Java_fp;
+}
+
+int AbstractInterpreter::layout_activation(Method* method,
+                                           int temps,        // Number of slots on java expression stack in use.
+                                           int popframe_args,
+                                           int monitors,     // Number of active monitors.
+                                           int caller_actual_parameters,
+                                           int callee_params,// Number of slots for callee parameters.
+                                           int callee_locals,// Number of slots for locals.
+                                           frame* caller,
+                                           frame* interpreter_frame,
+                                           bool is_top_frame,
+                                           bool is_bottom_frame) {
+
+  // NOTE this code must exactly mimic what
+  // InterpreterGenerator::generate_compute_interpreter_state() does
+  // as far as allocating an interpreter frame. However there is an
+  // exception. With the C++ based interpreter only the top most frame
+  // has a full sized expression stack.  The 16 byte slop factor is
+  // both the abi scratch area and a place to hold a result from a
+  // callee on its way to the callers stack.
+
+  int monitor_size = frame::interpreter_frame_monitor_size_in_bytes() * monitors;
+  int frame_size;
+  int top_frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
+                                + monitor_size
+                                + (method->max_stack() *Interpreter::stackElementWords * BytesPerWord)
+                                + 2*BytesPerWord,
+                                frame::alignment_in_bytes)
+                      + frame::top_ijava_frame_abi_size;
+  if (is_top_frame) {
+    frame_size = top_frame_size;
+  } else {
+    frame_size = round_to(frame::interpreter_frame_cinterpreterstate_size_in_bytes()
+                          + monitor_size
+                          + ((temps - callee_params + callee_locals) *
+                             Interpreter::stackElementWords * BytesPerWord)
+                          + 2*BytesPerWord,
+                          frame::alignment_in_bytes)
+                 + frame::parent_ijava_frame_abi_size;
+    assert(popframe_args==0, "non-zero for top_frame only");
+  }
+
+  // If we actually have a frame to layout we must now fill in all the pieces.
+  if (interpreter_frame != NULL) {
+
+    intptr_t sp = (intptr_t)interpreter_frame->sp();
+    intptr_t fp = *(intptr_t *)sp;
+    assert(fp == (intptr_t)caller->sp(), "fp must match");
+    interpreterState cur_state =
+      (interpreterState)(fp - frame::interpreter_frame_cinterpreterstate_size_in_bytes());
+
+    // Now fill in the interpreterState object.
+
+    intptr_t* locals;
+    if (caller->is_interpreted_frame()) {
+      // Locals must agree with the caller because it will be used to set the
+      // caller's tos when we return.
+      interpreterState prev  = caller->get_interpreterState();
+      // Calculate start of "locals" for MH calls.  For MH calls, the
+      // current method() (= MH target) and prev->callee() (=
+      // MH.invoke*()) are different and especially have different
+      // signatures. To pop the argumentsof the caller, we must use
+      // the prev->callee()->size_of_arguments() because that's what
+      // the caller actually pushed.  Currently, for synthetic MH
+      // calls (deoptimized from inlined MH calls), detected by
+      // is_method_handle_invoke(), we use the callee's arguments
+      // because here, the caller's and callee's signature match.
+      if (true /*!caller->is_at_mh_callsite()*/) {
+        locals = prev->stack() + method->size_of_parameters();
+      } else {
+        // Normal MH call.
+        locals = prev->stack() + prev->callee()->size_of_parameters();
+      }
+    } else {
+      bool is_deopted;
+      locals = (intptr_t*) (fp + ((method->max_locals() - 1) * BytesPerWord) +
+                            frame::parent_ijava_frame_abi_size);
+    }
+
+    intptr_t* monitor_base = (intptr_t*) cur_state;
+    intptr_t* stack_base   = (intptr_t*) ((intptr_t) monitor_base - monitor_size);
+
+    // Provide pop_frame capability on PPC64, add popframe_args.
+    // +1 because stack is always prepushed.
+    intptr_t* stack = (intptr_t*) ((intptr_t) stack_base - (temps + popframe_args + 1) * BytesPerWord);
+
+    BytecodeInterpreter::layout_interpreterState(cur_state,
+                                                 caller,
+                                                 interpreter_frame,
+                                                 method,
+                                                 locals,
+                                                 stack,
+                                                 stack_base,
+                                                 monitor_base,
+                                                 (intptr_t*)(((intptr_t)fp)-top_frame_size),
+                                                 is_top_frame);
+
+    BytecodeInterpreter::pd_layout_interpreterState(cur_state, interpreter_return_address,
+                                                    interpreter_frame->fp());
+  }
+  return frame_size/BytesPerWord;
+}
+
+#endif // CC_INTERP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/cppInterpreter_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
+#define CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
+
+  protected:
+
+  // Size of interpreter code.  Increase if too small.  Interpreter will
+  // fail with a guarantee ("not enough space for interpreter generation");
+  // if too small.
+  // Run with +PrintInterpreter to get the VM to print out the size.
+  // Max size with JVMTI
+
+  const static int InterpreterCodeSize = 12*K;
+
+#endif // CPU_PPC_VM_CPPINTERPRETER_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/debug_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/codeCache.hpp"
+#include "code/nmethod.hpp"
+#include "runtime/frame.hpp"
+#include "runtime/init.hpp"
+#include "runtime/os.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/top.hpp"
+
+void pd_ps(frame f) {}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/depChecker_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_DEPCHECKER_PPC_HPP
+#define CPU_PPC_VM_DEPCHECKER_PPC_HPP
+
+// Nothing to do on ppc64
+
+#endif // CPU_PPC_VM_DEPCHECKER_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/disassembler_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_DISASSEMBLER_PPC_HPP
+#define CPU_PPC_VM_DISASSEMBLER_PPC_HPP
+
+  static int pd_instruction_alignment() {
+    return sizeof(int);
+  }
+
+  static const char* pd_cpu_opts() {
+    return "ppc64";
+  }
+
+#endif // CPU_PPC_VM_DISASSEMBLER_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/frame_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/markOop.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/monitorChunk.hpp"
+#include "runtime/signature.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "vmreg_ppc.inline.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#include "runtime/vframeArray.hpp"
+#endif
+
+#ifndef CC_INTERP
+#error "CC_INTERP must be defined on PPC64"
+#endif
+
+#ifdef ASSERT
+void RegisterMap::check_location_valid() {
+}
+#endif // ASSERT
+
+bool frame::safe_for_sender(JavaThread *thread) {
+  bool safe = false;
+  address   cursp = (address)sp();
+  address   curfp = (address)fp();
+  if ((cursp != NULL && curfp != NULL &&
+      (cursp <= thread->stack_base() && cursp >= thread->stack_base() - thread->stack_size())) &&
+      (curfp <= thread->stack_base() && curfp >= thread->stack_base() - thread->stack_size())) {
+      safe = true;
+  }
+  return safe;
+}
+
+bool frame::is_interpreted_frame() const  {
+  return Interpreter::contains(pc());
+}
+
+frame frame::sender_for_entry_frame(RegisterMap *map) const {
+  assert(map != NULL, "map must be set");
+  // Java frame called from C; skip all C frames and return top C
+  // frame of that chunk as the sender.
+  JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
+  assert(!entry_frame_is_first(), "next Java fp must be non zero");
+  assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
+  map->clear();
+  assert(map->include_argument_oops(), "should be set by clear");
+
+  if (jfa->last_Java_pc() != NULL) {
+    frame fr(jfa->last_Java_sp(), jfa->last_Java_pc());
+    return fr;
+  }
+  // Last_java_pc is not set, if we come here from compiled code. The
+  // constructor retrieves the PC from the stack.
+  frame fr(jfa->last_Java_sp());
+  return fr;
+}
+
+frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
+  // Pass callers initial_caller_sp as unextended_sp.
+  return frame(sender_sp(), sender_pc(), (intptr_t*)((parent_ijava_frame_abi *)callers_abi())->initial_caller_sp);
+}
+
+frame frame::sender_for_compiled_frame(RegisterMap *map) const {
+  assert(map != NULL, "map must be set");
+
+  // Frame owned by compiler.
+  address pc = *compiled_sender_pc_addr(_cb);
+  frame caller(compiled_sender_sp(_cb), pc);
+
+  // Now adjust the map.
+
+  // Get the rest.
+  if (map->update_map()) {
+    // Tell GC to use argument oopmaps for some runtime stubs that need it.
+    map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
+    if (_cb->oop_maps() != NULL) {
+      OopMapSet::update_register_map(this, map);
+    }
+  }
+
+  return caller;
+}
+
+intptr_t* frame::compiled_sender_sp(CodeBlob* cb) const {
+  return sender_sp();
+}
+
+address* frame::compiled_sender_pc_addr(CodeBlob* cb) const {
+  return sender_pc_addr();
+}
+
+frame frame::sender(RegisterMap* map) const {
+  // Default is we do have to follow them. The sender_for_xxx will
+  // update it accordingly.
+  map->set_include_argument_oops(false);
+
+  if (is_entry_frame())       return sender_for_entry_frame(map);
+  if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
+  assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
+
+  if (_cb != NULL) {
+    return sender_for_compiled_frame(map);
+  }
+  // Must be native-compiled frame, i.e. the marshaling code for native
+  // methods that exists in the core system.
+  return frame(sender_sp(), sender_pc());
+}
+
+void frame::patch_pc(Thread* thread, address pc) {
+  if (TracePcPatching) {
+    tty->print_cr("patch_pc at address " PTR_FORMAT " [" PTR_FORMAT " -> " PTR_FORMAT "]",
+                  &((address*) _sp)[-1], ((address*) _sp)[-1], pc);
+  }
+  own_abi()->lr = (uint64_t)pc;
+  _cb = CodeCache::find_blob(pc);
+  if (_cb != NULL && _cb->is_nmethod() && ((nmethod*)_cb)->is_deopt_pc(_pc)) {
+    address orig = (((nmethod*)_cb)->get_original_pc(this));
+    assert(orig == _pc, "expected original to be stored before patching");
+    _deopt_state = is_deoptimized;
+    // Leave _pc as is.
+  } else {
+    _deopt_state = not_deoptimized;
+    _pc = pc;
+  }
+}
+
+void frame::pd_gc_epilog() {
+  if (is_interpreted_frame()) {
+    // Set constant pool cache entry for interpreter.
+    Method* m = interpreter_frame_method();
+
+    *interpreter_frame_cpoolcache_addr() = m->constants()->cache();
+  }
+}
+
+bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
+  // Is there anything to do?
+  assert(is_interpreted_frame(), "Not an interpreted frame");
+  return true;
+}
+
+BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
+  assert(is_interpreted_frame(), "interpreted frame expected");
+  Method* method = interpreter_frame_method();
+  BasicType type = method->result_type();
+
+  if (method->is_native()) {
+    // Prior to calling into the runtime to notify the method exit the possible
+    // result value is saved into the interpreter frame.
+#ifdef CC_INTERP
+    interpreterState istate = get_interpreterState();
+    address lresult = (address)istate + in_bytes(BytecodeInterpreter::native_lresult_offset());
+    address fresult = (address)istate + in_bytes(BytecodeInterpreter::native_fresult_offset());
+#endif
+
+    switch (method->result_type()) {
+      case T_OBJECT:
+      case T_ARRAY: {
+        oop* obj_p = *(oop**)lresult;
+        oop obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
+        assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
+        *oop_result = obj;
+        break;
+      }
+      // We use std/stfd to store the values.
+      case T_BOOLEAN : value_result->z = (jboolean) *(unsigned long*)lresult; break;
+      case T_INT     : value_result->i = (jint)     *(long*)lresult;          break;
+      case T_CHAR    : value_result->c = (jchar)    *(unsigned long*)lresult; break;
+      case T_SHORT   : value_result->s = (jshort)   *(long*)lresult;          break;
+      case T_BYTE    : value_result->z = (jbyte)    *(long*)lresult;          break;
+      case T_LONG    : value_result->j = (jlong)    *(long*)lresult;          break;
+      case T_FLOAT   : value_result->f = (jfloat)   *(double*)fresult;        break;
+      case T_DOUBLE  : value_result->d = (jdouble)  *(double*)fresult;        break;
+      case T_VOID    : /* Nothing to do */ break;
+      default        : ShouldNotReachHere();
+    }
+  } else {
+    intptr_t* tos_addr = interpreter_frame_tos_address();
+    switch (method->result_type()) {
+      case T_OBJECT:
+      case T_ARRAY: {
+        oop obj = *(oop*)tos_addr;
+        assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
+        *oop_result = obj;
+      }
+      case T_BOOLEAN : value_result->z = (jboolean) *(jint*)tos_addr; break;
+      case T_BYTE    : value_result->b = (jbyte) *(jint*)tos_addr; break;
+      case T_CHAR    : value_result->c = (jchar) *(jint*)tos_addr; break;
+      case T_SHORT   : value_result->s = (jshort) *(jint*)tos_addr; break;
+      case T_INT     : value_result->i = *(jint*)tos_addr; break;
+      case T_LONG    : value_result->j = *(jlong*)tos_addr; break;
+      case T_FLOAT   : value_result->f = *(jfloat*)tos_addr; break;
+      case T_DOUBLE  : value_result->d = *(jdouble*)tos_addr; break;
+      case T_VOID    : /* Nothing to do */ break;
+      default        : ShouldNotReachHere();
+    }
+  }
+  return type;
+}
+
+#ifndef PRODUCT
+
+void frame::describe_pd(FrameValues& values, int frame_no) {
+  if (is_interpreted_frame()) {
+#ifdef CC_INTERP
+    interpreterState istate = get_interpreterState();
+    values.describe(frame_no, (intptr_t*)istate, "istate");
+    values.describe(frame_no, (intptr_t*)&(istate->_thread), " thread");
+    values.describe(frame_no, (intptr_t*)&(istate->_bcp), " bcp");
+    values.describe(frame_no, (intptr_t*)&(istate->_locals), " locals");
+    values.describe(frame_no, (intptr_t*)&(istate->_constants), " constants");
+    values.describe(frame_no, (intptr_t*)&(istate->_method), err_msg(" method = %s", istate->_method->name_and_sig_as_C_string()));
+    values.describe(frame_no, (intptr_t*)&(istate->_mdx), " mdx");
+    values.describe(frame_no, (intptr_t*)&(istate->_stack), " stack");
+    values.describe(frame_no, (intptr_t*)&(istate->_msg), err_msg(" msg = %s", BytecodeInterpreter::C_msg(istate->_msg)));
+    values.describe(frame_no, (intptr_t*)&(istate->_result), " result");
+    values.describe(frame_no, (intptr_t*)&(istate->_prev_link), " prev_link");
+    values.describe(frame_no, (intptr_t*)&(istate->_oop_temp), " oop_temp");
+    values.describe(frame_no, (intptr_t*)&(istate->_stack_base), " stack_base");
+    values.describe(frame_no, (intptr_t*)&(istate->_stack_limit), " stack_limit");
+    values.describe(frame_no, (intptr_t*)&(istate->_monitor_base), " monitor_base");
+    values.describe(frame_no, (intptr_t*)&(istate->_frame_bottom), " frame_bottom");
+    values.describe(frame_no, (intptr_t*)&(istate->_last_Java_pc), " last_Java_pc");
+    values.describe(frame_no, (intptr_t*)&(istate->_last_Java_fp), " last_Java_fp");
+    values.describe(frame_no, (intptr_t*)&(istate->_last_Java_sp), " last_Java_sp");
+    values.describe(frame_no, (intptr_t*)&(istate->_self_link), " self_link");
+    values.describe(frame_no, (intptr_t*)&(istate->_native_fresult), " native_fresult");
+    values.describe(frame_no, (intptr_t*)&(istate->_native_lresult), " native_lresult");
+#else
+    Unimplemented();
+#endif
+  }
+}
+#endif
+
+void frame::adjust_unextended_sp() {
+  // If we are returning to a compiled MethodHandle call site, the
+  // saved_fp will in fact be a saved value of the unextended SP. The
+  // simplest way to tell whether we are returning to such a call site
+  // is as follows:
+
+  if (is_compiled_frame() && false /*is_at_mh_callsite()*/) {  // TODO PPC port
+    // If the sender PC is a deoptimization point, get the original
+    // PC. For MethodHandle call site the unextended_sp is stored in
+    // saved_fp.
+    _unextended_sp = _fp - _cb->frame_size();
+
+#ifdef ASSERT
+    nmethod *sender_nm = _cb->as_nmethod_or_null();
+    assert(sender_nm && *_sp == *_unextended_sp, "backlink changed");
+
+    intptr_t* sp = _unextended_sp;  // check if stack can be walked from here
+    for (int x = 0; x < 5; ++x) {   // check up to a couple of backlinks
+      intptr_t* prev_sp = *(intptr_t**)sp;
+      if (prev_sp == 0) break;      // end of stack
+      assert(prev_sp>sp, "broken stack");
+      sp = prev_sp;
+    }
+
+    if (sender_nm->is_deopt_mh_entry(_pc)) { // checks for deoptimization
+      address original_pc = sender_nm->get_original_pc(this);
+      assert(sender_nm->insts_contains(original_pc), "original PC must be in nmethod");
+      assert(sender_nm->is_method_handle_return(original_pc), "must be");
+    }
+#endif
+  }
+}
+
+intptr_t *frame::initial_deoptimization_info() {
+  // unused... but returns fp() to minimize changes introduced by 7087445
+  return fp();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/frame_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_FRAME_PPC_HPP
+#define CPU_PPC_VM_FRAME_PPC_HPP
+
+#include "runtime/synchronizer.hpp"
+#include "utilities/top.hpp"
+
+#ifndef CC_INTERP
+#error "CC_INTERP must be defined on PPC64"
+#endif
+
+  //  C frame layout on PPC-64.
+  //
+  //  In this figure the stack grows upwards, while memory grows
+  //  downwards. See "64-bit PowerPC ELF ABI Supplement Version 1.7",
+  //  IBM Corp. (2003-10-29)
+  //  (http://math-atlas.sourceforge.net/devel/assembly/PPC-elf64abi-1.7.pdf).
+  //
+  //  Square brackets denote stack regions possibly larger
+  //  than a single 64 bit slot.
+  //
+  //  STACK:
+  //    0       [C_FRAME]               <-- SP after prolog (mod 16 = 0)
+  //            [C_FRAME]               <-- SP before prolog
+  //            ...
+  //            [C_FRAME]
+  //
+  //  C_FRAME:
+  //    0       [ABI_112]
+  //    112     CARG_9: outgoing arg 9 (arg_1 ... arg_8 via gpr_3 ... gpr_{10})
+  //            ...
+  //    40+M*8  CARG_M: outgoing arg M (M is the maximum of outgoing args taken over all call sites in the procedure)
+  //            local 1
+  //            ...
+  //            local N
+  //            spill slot for vector reg (16 bytes aligned)
+  //            ...
+  //            spill slot for vector reg
+  //            alignment       (4 or 12 bytes)
+  //    V       SR_VRSAVE
+  //    V+4     spill slot for GR
+  //    ...     ...
+  //            spill slot for GR
+  //            spill slot for FR
+  //            ...
+  //            spill slot for FR
+  //
+  //  ABI_48:
+  //    0       caller's SP
+  //    8       space for condition register (CR) for next call
+  //    16      space for link register (LR) for next call
+  //    24      reserved
+  //    32      reserved
+  //    40      space for TOC (=R2) register for next call
+  //
+  //  ABI_112:
+  //    0       [ABI_48]
+  //    48      CARG_1: spill slot for outgoing arg 1. used by next callee.
+  //    ...     ...
+  //    104     CARG_8: spill slot for outgoing arg 8. used by next callee.
+  //
+
+ public:
+
+  // C frame layout
+
+  enum {
+    // stack alignment
+    alignment_in_bytes = 16,
+    // log_2(16*8 bits) = 7.
+    log_2_of_alignment_in_bits = 7
+  };
+
+  // ABI_48:
+  struct abi_48 {
+    uint64_t callers_sp;
+    uint64_t cr;                                  //_16
+    uint64_t lr;
+    uint64_t reserved1;                           //_16
+    uint64_t reserved2;
+    uint64_t toc;                                 //_16
+    // nothing to add here!
+    // aligned to frame::alignment_in_bytes (16)
+  };
+
+  enum {
+    abi_48_size = sizeof(abi_48)
+  };
+
+  struct abi_112 : abi_48 {
+    uint64_t carg_1;
+    uint64_t carg_2;                              //_16
+    uint64_t carg_3;
+    uint64_t carg_4;                              //_16
+    uint64_t carg_5;
+    uint64_t carg_6;                              //_16
+    uint64_t carg_7;
+    uint64_t carg_8;                              //_16
+    // aligned to frame::alignment_in_bytes (16)
+  };
+
+  enum {
+    abi_112_size = sizeof(abi_112)
+  };
+
+  #define _abi(_component) \
+          (offset_of(frame::abi_112, _component))
+
+  struct abi_112_spill : abi_112 {
+    // additional spill slots
+    uint64_t spill_ret;
+    uint64_t spill_fret;                          //_16
+    // aligned to frame::alignment_in_bytes (16)
+  };
+
+  enum {
+    abi_112_spill_size = sizeof(abi_112_spill)
+  };
+
+  #define _abi_112_spill(_component) \
+          (offset_of(frame::abi_112_spill, _component))
+
+  // non-volatile GPRs:
+
+  struct spill_nonvolatiles {
+    uint64_t r14;
+    uint64_t r15;                                 //_16
+    uint64_t r16;
+    uint64_t r17;                                 //_16
+    uint64_t r18;
+    uint64_t r19;                                 //_16
+    uint64_t r20;
+    uint64_t r21;                                 //_16
+    uint64_t r22;
+    uint64_t r23;                                 //_16
+    uint64_t r24;
+    uint64_t r25;                                 //_16
+    uint64_t r26;
+    uint64_t r27;                                 //_16
+    uint64_t r28;
+    uint64_t r29;                                 //_16
+    uint64_t r30;
+    uint64_t r31;                                 //_16
+
+    double f14;
+    double f15;
+    double f16;
+    double f17;
+    double f18;
+    double f19;
+    double f20;
+    double f21;
+    double f22;
+    double f23;
+    double f24;
+    double f25;
+    double f26;
+    double f27;
+    double f28;
+    double f29;
+    double f30;
+    double f31;
+
+    // aligned to frame::alignment_in_bytes (16)
+  };
+
+  enum {
+    spill_nonvolatiles_size = sizeof(spill_nonvolatiles)
+  };
+
+  #define _spill_nonvolatiles_neg(_component) \
+     (int)(-frame::spill_nonvolatiles_size + offset_of(frame::spill_nonvolatiles, _component))
+
+  //  Frame layout for the Java interpreter on PPC64.
+  //
+  //  This frame layout provides a C-like frame for every Java frame.
+  //
+  //  In these figures the stack grows upwards, while memory grows
+  //  downwards. Square brackets denote regions possibly larger than
+  //  single 64 bit slots.
+  //
+  //  STACK (no JNI, no compiled code, no library calls,
+  //         interpreter-loop is active):
+  //    0       [InterpretMethod]
+  //            [TOP_IJAVA_FRAME]
+  //            [PARENT_IJAVA_FRAME]
+  //            ...
+  //            [PARENT_IJAVA_FRAME]
+  //            [ENTRY_FRAME]
+  //            [C_FRAME]
+  //            ...
+  //            [C_FRAME]
+  //
+  //  TOP_IJAVA_FRAME:
+  //    0       [TOP_IJAVA_FRAME_ABI]
+  //            alignment (optional)
+  //            [operand stack]
+  //            [monitors] (optional)
+  //            [cInterpreter object]
+  //            result, locals, and arguments are in parent frame!
+  //
+  //  PARENT_IJAVA_FRAME:
+  //    0       [PARENT_IJAVA_FRAME_ABI]
+  //            alignment (optional)
+  //            [callee's Java result]
+  //            [callee's locals w/o arguments]
+  //            [outgoing arguments]
+  //            [used part of operand stack w/o arguments]
+  //            [monitors] (optional)
+  //            [cInterpreter object]
+  //
+  //  ENTRY_FRAME:
+  //    0       [PARENT_IJAVA_FRAME_ABI]
+  //            alignment (optional)
+  //            [callee's Java result]
+  //            [callee's locals w/o arguments]
+  //            [outgoing arguments]
+  //            [ENTRY_FRAME_LOCALS]
+  //
+  //  PARENT_IJAVA_FRAME_ABI:
+  //    0       [ABI_48]
+  //            top_frame_sp
+  //            initial_caller_sp
+  //
+  //  TOP_IJAVA_FRAME_ABI:
+  //    0       [PARENT_IJAVA_FRAME_ABI]
+  //            carg_3_unused
+  //            carg_4_unused
+  //            carg_5_unused
+  //            carg_6_unused
+  //            carg_7_unused
+  //            frame_manager_lr
+  //
+
+  // PARENT_IJAVA_FRAME_ABI
+
+  struct parent_ijava_frame_abi : abi_48 {
+    // SOE registers.
+    // C2i adapters spill their top-frame stack-pointer here.
+    uint64_t top_frame_sp;                        //      carg_1
+    // Sp of calling compiled frame before it was resized by the c2i
+    // adapter or sp of call stub. Does not contain a valid value for
+    // non-initial frames.
+    uint64_t initial_caller_sp;                   //      carg_2
+    // aligned to frame::alignment_in_bytes (16)
+  };
+
+  enum {
+    parent_ijava_frame_abi_size = sizeof(parent_ijava_frame_abi)
+  };
+
+  #define _parent_ijava_frame_abi(_component) \
+          (offset_of(frame::parent_ijava_frame_abi, _component))
+
+  // TOP_IJAVA_FRAME_ABI
+
+  struct top_ijava_frame_abi : parent_ijava_frame_abi {
+    uint64_t carg_3_unused;                       //      carg_3
+    uint64_t card_4_unused;                       //_16   carg_4
+    uint64_t carg_5_unused;                       //      carg_5
+    uint64_t carg_6_unused;                       //_16   carg_6
+    uint64_t carg_7_unused;                       //      carg_7
+    // Use arg8 for storing frame_manager_lr. The size of
+    // top_ijava_frame_abi must match abi_112.
+    uint64_t frame_manager_lr;                    //_16   carg_8
+    // nothing to add here!
+    // aligned to frame::alignment_in_bytes (16)
+  };
+
+  enum {
+    top_ijava_frame_abi_size = sizeof(top_ijava_frame_abi)
+  };
+
+  #define _top_ijava_frame_abi(_component) \
+          (offset_of(frame::top_ijava_frame_abi, _component))
+
+  // ENTRY_FRAME
+
+  struct entry_frame_locals {
+    uint64_t call_wrapper_address;
+    uint64_t result_address;                      //_16
+    uint64_t result_type;
+    uint64_t arguments_tos_address;               //_16
+    // aligned to frame::alignment_in_bytes (16)
+    uint64_t r[spill_nonvolatiles_size/sizeof(uint64_t)];
+  };
+
+  enum {
+    entry_frame_locals_size = sizeof(entry_frame_locals)
+  };
+
+  #define _entry_frame_locals_neg(_component) \
+    (int)(-frame::entry_frame_locals_size + offset_of(frame::entry_frame_locals, _component))
+
+
+  //  Frame layout for JIT generated methods
+  //
+  //  In these figures the stack grows upwards, while memory grows
+  //  downwards. Square brackets denote regions possibly larger than single
+  //  64 bit slots.
+  //
+  //  STACK (interpreted Java calls JIT generated Java):
+  //          [JIT_FRAME]                                <-- SP (mod 16 = 0)
+  //          [TOP_IJAVA_FRAME]
+  //         ...
+  //
+  //  JIT_FRAME (is a C frame according to PPC-64 ABI):
+  //          [out_preserve]
+  //          [out_args]
+  //          [spills]
+  //          [pad_1]
+  //          [monitor] (optional)
+  //       ...
+  //          [monitor] (optional)
+  //          [pad_2]
+  //          [in_preserve] added / removed by prolog / epilog
+  //
+
+  // JIT_ABI (TOP and PARENT)
+
+  struct jit_abi {
+    uint64_t callers_sp;
+    uint64_t cr;
+    uint64_t lr;
+    uint64_t toc;
+    // Nothing to add here!
+    // NOT ALIGNED to frame::alignment_in_bytes (16).
+  };
+
+  struct jit_out_preserve : jit_abi {
+    // Nothing to add here!
+  };
+
+  struct jit_in_preserve {
+    // Nothing to add here!
+  };
+
+  enum {
+    jit_out_preserve_size = sizeof(jit_out_preserve),
+    jit_in_preserve_size  = sizeof(jit_in_preserve)
+  };
+
+  struct jit_monitor {
+    uint64_t monitor[1];
+  };
+
+  enum {
+    jit_monitor_size = sizeof(jit_monitor),
+  };
+
+ private:
+
+  //  STACK:
+  //            ...
+  //            [THIS_FRAME]             <-- this._sp (stack pointer for this frame)
+  //            [CALLER_FRAME]           <-- this.fp() (_sp of caller's frame)
+  //            ...
+  //
+
+  // frame pointer for this frame
+  intptr_t* _fp;
+
+  // The frame's stack pointer before it has been extended by a c2i adapter;
+  // needed by deoptimization
+  intptr_t* _unextended_sp;
+  void adjust_unextended_sp();
+
+ public:
+
+  // Accessors for fields
+  intptr_t* fp() const { return _fp; }
+
+  // Accessors for ABIs
+  inline abi_48* own_abi()     const { return (abi_48*) _sp; }
+  inline abi_48* callers_abi() const { return (abi_48*) _fp; }
+
+ private:
+
+  // Find codeblob and set deopt_state.
+  inline void find_codeblob_and_set_pc_and_deopt_state(address pc);
+
+ public:
+
+  // Constructors
+  inline frame(intptr_t* sp);
+  frame(intptr_t* sp, address pc);
+  inline frame(intptr_t* sp, address pc, intptr_t* unextended_sp);
+
+ private:
+
+  intptr_t* compiled_sender_sp(CodeBlob* cb) const;
+  address*  compiled_sender_pc_addr(CodeBlob* cb) const;
+  address*  sender_pc_addr(void) const;
+
+ public:
+
+#ifdef CC_INTERP
+  // Additional interface for interpreter frames:
+  inline interpreterState get_interpreterState() const;
+#endif // CC_INTERP
+
+  // Size of a monitor in bytes.
+  static int interpreter_frame_monitor_size_in_bytes();
+
+  // The size of a cInterpreter object.
+  static inline int interpreter_frame_cinterpreterstate_size_in_bytes();
+
+ private:
+
+  ConstantPoolCache** interpreter_frame_cpoolcache_addr() const;
+
+ public:
+
+  // Additional interface for entry frames:
+  inline entry_frame_locals* get_entry_frame_locals() const {
+    return (entry_frame_locals*) (((address) fp()) - entry_frame_locals_size);
+  }
+
+  enum {
+    // normal return address is 1 bundle past PC
+    pc_return_offset = 0
+  };
+
+#endif // CPU_PPC_VM_FRAME_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/frame_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_FRAME_PPC_INLINE_HPP
+#define CPU_PPC_VM_FRAME_PPC_INLINE_HPP
+
+#ifndef CC_INTERP
+#error "CC_INTERP must be defined on PPC64"
+#endif
+
+// Inline functions for ppc64 frames:
+
+// Find codeblob and set deopt_state.
+inline void frame::find_codeblob_and_set_pc_and_deopt_state(address pc) {
+  assert(pc != NULL, "precondition: must have PC");
+
+  _cb = CodeCache::find_blob(pc);
+  _pc = pc;   // Must be set for get_deopt_original_pc()
+
+  _fp = (intptr_t*)own_abi()->callers_sp;
+  // Use _fp - frame_size, needs to be done between _cb and _pc initialization
+  // and get_deopt_original_pc.
+  adjust_unextended_sp();
+
+  address original_pc = nmethod::get_deopt_original_pc(this);
+  if (original_pc != NULL) {
+    _pc = original_pc;
+    _deopt_state = is_deoptimized;
+  } else {
+    _deopt_state = not_deoptimized;
+  }
+
+  assert(((uint64_t)_sp & 0xf) == 0, "SP must be 16-byte aligned");
+}
+
+// Constructors
+
+// Initialize all fields, _unextended_sp will be adjusted in find_codeblob_and_set_pc_and_deopt_state.
+inline frame::frame() : _sp(NULL), _unextended_sp(NULL), _fp(NULL), _cb(NULL), _pc(NULL), _deopt_state(unknown) {}
+
+inline frame::frame(intptr_t* sp) : _sp(sp), _unextended_sp(sp) {
+  find_codeblob_and_set_pc_and_deopt_state((address)own_abi()->lr); // also sets _fp and adjusts _unextended_sp
+}
+
+inline frame::frame(intptr_t* sp, address pc) : _sp(sp), _unextended_sp(sp) {
+  find_codeblob_and_set_pc_and_deopt_state(pc); // also sets _fp and adjusts _unextended_sp
+}
+
+inline frame::frame(intptr_t* sp, address pc, intptr_t* unextended_sp) : _sp(sp), _unextended_sp(unextended_sp) {
+  find_codeblob_and_set_pc_and_deopt_state(pc); // also sets _fp and adjusts _unextended_sp
+}
+
+// Accessors
+
+// Return unique id for this frame. The id must have a value where we
+// can distinguish identity and younger/older relationship. NULL
+// represents an invalid (incomparable) frame.
+inline intptr_t* frame::id(void) const {
+  // Use _fp. _sp or _unextended_sp wouldn't be correct due to resizing.
+  return _fp;
+}
+
+// Return true if this frame is older (less recent activation) than
+// the frame represented by id.
+inline bool frame::is_older(intptr_t* id) const {
+   assert(this->id() != NULL && id != NULL, "NULL frame id");
+   // Stack grows towards smaller addresses on ppc64.
+   return this->id() > id;
+}
+
+inline int frame::frame_size(RegisterMap* map) const {
+  // Stack grows towards smaller addresses on PPC64: sender is at a higher address.
+  return sender_sp() - sp();
+}
+
+// Return the frame's stack pointer before it has been extended by a
+// c2i adapter. This is needed by deoptimization for ignoring c2i adapter
+// frames.
+inline intptr_t* frame::unextended_sp() const {
+  return _unextended_sp;
+}
+
+// All frames have this field.
+inline address frame::sender_pc() const {
+  return (address)callers_abi()->lr;
+}
+inline address* frame::sender_pc_addr() const {
+  return (address*)&(callers_abi()->lr);
+}
+
+// All frames have this field.
+inline intptr_t* frame::sender_sp() const {
+  return (intptr_t*)callers_abi();
+}
+
+// All frames have this field.
+inline intptr_t* frame::link() const {
+  return (intptr_t*)callers_abi()->callers_sp;
+}
+
+inline intptr_t* frame::real_fp() const {
+  return fp();
+}
+
+#ifdef CC_INTERP
+
+inline interpreterState frame::get_interpreterState() const {
+  return (interpreterState)(((address)callers_abi())
+                            - frame::interpreter_frame_cinterpreterstate_size_in_bytes());
+}
+
+inline intptr_t** frame::interpreter_frame_locals_addr() const {
+  interpreterState istate = get_interpreterState();
+  return (intptr_t**)&istate->_locals;
+}
+
+inline intptr_t* frame::interpreter_frame_bcx_addr() const {
+  interpreterState istate = get_interpreterState();
+  return (intptr_t*)&istate->_bcp;
+}
+
+inline intptr_t* frame::interpreter_frame_mdx_addr() const {
+  interpreterState istate = get_interpreterState();
+  return (intptr_t*)&istate->_mdx;
+}
+
+inline intptr_t* frame::interpreter_frame_expression_stack() const {
+  return (intptr_t*)interpreter_frame_monitor_end() - 1;
+}
+
+inline jint frame::interpreter_frame_expression_stack_direction() {
+  return -1;
+}
+
+// top of expression stack
+inline intptr_t* frame::interpreter_frame_tos_address() const {
+  interpreterState istate = get_interpreterState();
+  return istate->_stack + 1;
+}
+
+inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
+  return &interpreter_frame_tos_address()[offset];
+}
+
+// monitor elements
+
+// in keeping with Intel side: end is lower in memory than begin;
+// and beginning element is oldest element
+// Also begin is one past last monitor.
+
+inline BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
+  return get_interpreterState()->monitor_base();
+}
+
+inline BasicObjectLock* frame::interpreter_frame_monitor_end() const {
+  return (BasicObjectLock*)get_interpreterState()->stack_base();
+}
+
+inline int frame::interpreter_frame_cinterpreterstate_size_in_bytes() {
+  // Size of an interpreter object. Not aligned with frame size.
+  return round_to(sizeof(BytecodeInterpreter), 8);
+}
+
+inline Method** frame::interpreter_frame_method_addr() const {
+  interpreterState istate = get_interpreterState();
+  return &istate->_method;
+}
+
+// Constant pool cache
+
+inline ConstantPoolCache** frame::interpreter_frame_cpoolcache_addr() const {
+  interpreterState istate = get_interpreterState();
+  return &istate->_constants; // should really use accessor
+}
+
+inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
+  interpreterState istate = get_interpreterState();
+  return &istate->_constants;
+}
+#endif // CC_INTERP
+
+inline int frame::interpreter_frame_monitor_size() {
+  // Number of stack slots for a monitor.
+  return round_to(BasicObjectLock::size(),  // number of stack slots
+                  WordsPerLong);            // number of stack slots for a Java long
+}
+
+inline int frame::interpreter_frame_monitor_size_in_bytes() {
+  return frame::interpreter_frame_monitor_size() * wordSize;
+}
+
+// entry frames
+
+inline intptr_t* frame::entry_frame_argument_at(int offset) const {
+  // Since an entry frame always calls the interpreter first, the
+  // parameters are on the stack and relative to known register in the
+  // entry frame.
+  intptr_t* tos = (intptr_t*)get_entry_frame_locals()->arguments_tos_address;
+  return &tos[offset + 1]; // prepushed tos
+}
+
+inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
+  return (JavaCallWrapper**)&get_entry_frame_locals()->call_wrapper_address;
+}
+
+inline oop frame::saved_oop_result(RegisterMap* map) const {
+  return *((oop*)map->location(R3->as_VMReg()));
+}
+
+inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
+  *((oop*)map->location(R3->as_VMReg())) = obj;
+}
+
+#endif // CPU_PPC_VM_FRAME_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/globalDefinitions_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
+#define CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
+
+// Size of PPC Instructions
+const int BytesPerInstWord = 4;
+
+const int StackAlignmentInBytes = 16;
+
+// Indicates whether the C calling conventions require that
+// 32-bit integer argument values are properly extended to 64 bits.
+// If set, SharedRuntime::c_calling_convention() must adapt
+// signatures accordingly.
+const bool CCallingConventionRequiresIntsAsLongs = true;
+
+// The PPC CPUs are NOT multiple-copy-atomic.
+#define CPU_NOT_MULTIPLE_COPY_ATOMIC
+
+#endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/globals_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_GLOBALS_PPC_HPP
+#define CPU_PPC_VM_GLOBALS_PPC_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+// Sets the default values for platform dependent flags used by the runtime system.
+// (see globals.hpp)
+
+define_pd_global(bool, ConvertSleepToYield,   true);
+define_pd_global(bool, ShareVtableStubs,      false); // Improves performance markedly for mtrt and compress.
+define_pd_global(bool, NeedsDeoptSuspend,     false); // Only register window machines need this.
+
+
+define_pd_global(bool, ImplicitNullChecks,    true);  // Generate code for implicit null checks.
+define_pd_global(bool, TrapBasedNullChecks,   true);
+define_pd_global(bool, UncommonNullCast,      true);  // Uncommon-trap NULLs passed to check cast.
+
+// Use large code-entry alignment.
+define_pd_global(intx, CodeEntryAlignment,    128);
+define_pd_global(intx, OptoLoopAlignment,     16);
+define_pd_global(intx, InlineFrequencyCount,  100);
+define_pd_global(intx, InlineSmallCode,       1500);
+
+define_pd_global(intx, PreInflateSpin,        10);
+
+// Flags for template interpreter.
+define_pd_global(bool, RewriteBytecodes,      true);
+define_pd_global(bool, RewriteFrequentPairs,  true);
+
+define_pd_global(bool, UseMembar,             false);
+
+// GC Ergo Flags
+define_pd_global(uintx, CMSYoungGenPerWorker, 16*M);  // Default max size of CMS young gen, per GC worker thread.
+
+define_pd_global(uintx, TypeProfileLevel, 0);
+
+// Platform dependent flag handling: flags only defined on this platform.
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)  \
+                                                                            \
+  /* Load poll address from thread. This is used to implement per-thread */ \
+  /* safepoints on platforms != IA64. */                                    \
+  product(bool, LoadPollAddressFromThread, false,                           \
+          "Load polling page address from thread object (required for "     \
+          "per-thread safepoints on platforms != IA64)")                    \
+                                                                            \
+  product(uintx, PowerArchitecturePPC64, 0,                                 \
+          "CPU Version: x for PowerX. Currently recognizes Power5 to "      \
+          "Power7. Default is 0. CPUs newer than Power7 will be "           \
+          "recognized as Power7.")                                          \
+                                                                            \
+  /* Reoptimize code-sequences of calls at runtime, e.g. replace an */      \
+  /* indirect call by a direct call.                                */      \
+  product(bool, ReoptimizeCallSequences, true,                              \
+          "Reoptimize code-sequences of calls at runtime.")                 \
+                                                                            \
+  product(bool, UseLoadInstructionsForStackBangingPPC64, false,             \
+          "Use load instructions for stack banging.")                       \
+                                                                            \
+  /* special instructions */                                                \
+                                                                            \
+  product(bool, UseCountLeadingZerosInstructionsPPC64, true,                \
+          "Use count leading zeros instructions.")                          \
+                                                                            \
+  product(bool, UseExtendedLoadAndReserveInstructionsPPC64, false,          \
+          "Use extended versions of load-and-reserve instructions.")        \
+                                                                            \
+  product(bool, UseRotateAndMaskInstructionsPPC64, true,                    \
+          "Use rotate and mask instructions.")                              \
+                                                                            \
+  product(bool, UseStaticBranchPredictionInCompareAndSwapPPC64, true,       \
+          "Use static branch prediction hints in CAS operations.")          \
+  product(bool, UseStaticBranchPredictionForUncommonPathsPPC64, false,      \
+          "Use static branch prediction hints for uncommon paths.")         \
+                                                                            \
+  product(bool, UsePower6SchedulerPPC64, false,                             \
+          "Use Power6 Scheduler.")                                          \
+                                                                            \
+  product(bool, InsertEndGroupPPC64, false,                                 \
+          "Insert EndGroup instructions to optimize for Power6.")           \
+                                                                            \
+  /* Trap based checks. */                                                  \
+  /* Trap based checks use the ppc trap instructions to check certain */    \
+  /* conditions. This instruction raises a SIGTRAP caught by the      */    \
+  /* exception handler of the VM.                                     */    \
+  product(bool, UseSIGTRAP, true,                                           \
+          "Allow trap instructions that make use of SIGTRAP. Use this to "  \
+          "switch off all optimizations requiring SIGTRAP.")                \
+  product(bool, TrapBasedICMissChecks, true,                                \
+          "Raise and handle SIGTRAP if inline cache miss detected.")        \
+  product(bool, TrapBasedNotEntrantChecks, true,                            \
+          "Raise and handle SIGTRAP if calling not entrant or zombie"       \
+          " method.")                                                       \
+  product(bool, TraceTraps, false, "Trace all traps the signal handler"     \
+          "handles.")                                                       \
+                                                                            \
+  product(bool, ZapMemory, false, "Write 0x0101... to empty memory."        \
+          " Use this to ease debugging.")                                   \
+
+
+#endif // CPU_PPC_VM_GLOBALS_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/icBuffer_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "assembler_ppc.inline.hpp"
+#include "code/icBuffer.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+#include "interpreter/bytecodes.hpp"
+#include "memory/resourceArea.hpp"
+#include "nativeInst_ppc.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/oop.inline2.hpp"
+
+#define __ masm.
+
+int InlineCacheBuffer::ic_stub_code_size() {
+  return MacroAssembler::load_const_size + MacroAssembler::b64_patchable_size;
+}
+
+void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
+  ResourceMark rm;
+  CodeBuffer code(code_begin, ic_stub_code_size());
+  MacroAssembler masm(&code);
+  // Note: even though the code contains an embedded metadata, we do not need reloc info
+  // because
+  // (1) the metadata is old (i.e., doesn't matter for scavenges)
+  // (2) these ICStubs are removed *before* a GC happens, so the roots disappear.
+
+  // Load the oop ...
+  __ load_const(R19_method, (address) cached_value, R0);
+  // ... and jump to entry point.
+  __ b64_patchable((address) entry_point, relocInfo::none);
+
+  __ flush();
+}
+
+address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
+  NativeMovConstReg* move = nativeMovConstReg_at(code_begin);   // creation also verifies the object
+  NativeJump*        jump = nativeJump_at(move->next_instruction_address());
+  return jump->jump_destination();
+}
+
+void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
+  NativeMovConstReg* move = nativeMovConstReg_at(code_begin);   // creation also verifies the object
+  void* o = (void*)move->data();
+  return o;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/icache_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "assembler_ppc.inline.hpp"
+#include "runtime/icache.hpp"
+
+// Use inline assembler to implement icache flush.
+int ICache::ppc64_flush_icache(address start, int lines, int magic) {
+  address end = start + (unsigned int)lines*ICache::line_size;
+  assert(start <= end, "flush_icache parms");
+
+  // store modified cache lines from data cache
+  for (address a = start; a < end; a += ICache::line_size) {
+    __asm__ __volatile__(
+     "dcbst 0, %0  \n"
+     :
+     : "r" (a)
+     : "memory");
+  }
+
+  // sync instruction
+  __asm__ __volatile__(
+     "sync \n"
+     :
+     :
+     : "memory");
+
+  // invalidate respective cache lines in instruction cache
+  for (address a = start; a < end; a += ICache::line_size) {
+    __asm__ __volatile__(
+     "icbi 0, %0   \n"
+     :
+     : "r" (a)
+     : "memory");
+  }
+
+  // discard fetched instructions
+  __asm__ __volatile__(
+     "isync \n"
+     :
+     :
+     : "memory");
+
+  return magic;
+}
+
+void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {
+  StubCodeMark mark(this, "ICache", "flush_icache_stub");
+
+  *flush_icache_stub = (ICache::flush_icache_stub_t)ICache::ppc64_flush_icache;
+
+  // First call to flush itself
+  ICache::invalidate_range((address)(*flush_icache_stub), 0);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/icache_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_ICACHE_PPC_HPP
+#define CPU_PPC_VM_ICACHE_PPC_HPP
+
+// Interface for updating the instruction cache.  Whenever the VM modifies
+// code, part of the processor instruction cache potentially has to be flushed.
+
+class ICache : public AbstractICache {
+  friend class ICacheStubGenerator;
+  static int ppc64_flush_icache(address start, int lines, int magic);
+
+ public:
+  enum {
+    // Actually, cache line size is 64, but keeping it as it is to be
+    // on the safe side on ALL PPC64 implementations.
+    log2_line_size = 5,
+    line_size      = 1 << log2_line_size
+  };
+
+  static void ppc64_flush_icache_bytes(address start, int bytes) {
+    // Align start address to an icache line boundary and transform
+    // nbytes to an icache line count.
+    const uint line_offset = mask_address_bits(start, line_size - 1);
+    ppc64_flush_icache(start - line_offset, (bytes + line_offset + line_size - 1) >> log2_line_size, 0);
+  }
+};
+
+#endif // CPU_PPC_VM_ICACHE_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,510 @@
+/*
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "interp_masm_ppc_64.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) // nothing
+#else
+#define BLOCK_COMMENT(str) block_comment(str)
+#endif
+
+void InterpreterMacroAssembler::null_check_throw(Register a, int offset, Register temp_reg) {
+#ifdef CC_INTERP
+  address exception_entry = StubRoutines::throw_NullPointerException_at_call_entry();
+#else
+  address exception_entry = Interpreter::throw_NullPointerException_entry();
+#endif
+  MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry);
+}
+
+// Lock object
+//
+// Registers alive
+//   monitor - Address of the BasicObjectLock to be used for locking,
+//             which must be initialized with the object to lock.
+//   object  - Address of the object to be locked.
+//
+void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
+  if (UseHeavyMonitors) {
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
+            monitor, /*check_for_exceptions=*/true CC_INTERP_ONLY(&& false));
+  } else {
+    // template code:
+    //
+    // markOop displaced_header = obj->mark().set_unlocked();
+    // monitor->lock()->set_displaced_header(displaced_header);
+    // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+    //   // We stored the monitor address into the object's mark word.
+    // } else if (THREAD->is_lock_owned((address)displaced_header))
+    //   // Simple recursive case.
+    //   monitor->lock()->set_displaced_header(NULL);
+    // } else {
+    //   // Slow path.
+    //   InterpreterRuntime::monitorenter(THREAD, monitor);
+    // }
+
+    const Register displaced_header = R7_ARG5;
+    const Register object_mark_addr = R8_ARG6;
+    const Register current_header   = R9_ARG7;
+    const Register tmp              = R10_ARG8;
+
+    Label done;
+    Label cas_failed, slow_case;
+
+    assert_different_registers(displaced_header, object_mark_addr, current_header, tmp);
+
+
+    // markOop displaced_header = obj->mark().set_unlocked();
+
+    // Load markOop from object into displaced_header.
+    ld(displaced_header, oopDesc::mark_offset_in_bytes(), object);
+
+    if (UseBiasedLocking) {
+      biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case);
+    }
+
+    // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
+    ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
+
+
+    // monitor->lock()->set_displaced_header(displaced_header);
+
+    // Initialize the box (Must happen before we update the object mark!).
+    std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
+        BasicLock::displaced_header_offset_in_bytes(), monitor);
+
+    // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+
+    // Store stack address of the BasicObjectLock (this is monitor) into object.
+    addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
+
+    // Must fence, otherwise, preceding store(s) may float below cmpxchg.
+    // CmpxchgX sets CCR0 to cmpX(current, displaced).
+    fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
+    cmpxchgd(/*flag=*/CCR0,
+             /*current_value=*/current_header,
+             /*compare_value=*/displaced_header, /*exchange_value=*/monitor,
+             /*where=*/object_mark_addr,
+             MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
+             MacroAssembler::cmpxchgx_hint_acquire_lock(),
+             noreg,
+             &cas_failed);
+
+    // If the compare-and-exchange succeeded, then we found an unlocked
+    // object and we have now locked it.
+    b(done);
+    bind(cas_failed);
+
+    // } else if (THREAD->is_lock_owned((address)displaced_header))
+    //   // Simple recursive case.
+    //   monitor->lock()->set_displaced_header(NULL);
+
+    // We did not see an unlocked object so try the fast recursive case.
+
+    // Check if owner is self by comparing the value in the markOop of object
+    // (current_header) with the stack pointer.
+    sub(current_header, current_header, R1_SP);
+
+    assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
+    load_const_optimized(tmp,
+                         (address) (~(os::vm_page_size()-1) |
+                                    markOopDesc::lock_mask_in_place));
+
+    and_(R0/*==0?*/, current_header, tmp);
+    // If condition is true we are done and hence we can store 0 in the displaced
+    // header indicating it is a recursive lock.
+    bne(CCR0, slow_case);
+    release();
+    std(R0/*==0!*/, BasicObjectLock::lock_offset_in_bytes() +
+        BasicLock::displaced_header_offset_in_bytes(), monitor);
+    b(done);
+
+
+    // } else {
+    //   // Slow path.
+    //   InterpreterRuntime::monitorenter(THREAD, monitor);
+
+    // None of the above fast optimizations worked so we have to get into the
+    // slow case of monitor enter.
+    bind(slow_case);
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
+            monitor, /*check_for_exceptions=*/true CC_INTERP_ONLY(&& false));
+    // }
+
+    bind(done);
+  }
+}
+
+// Unlocks an object. Used in monitorexit bytecode and remove_activation.
+//
+// Registers alive
+//   monitor - Address of the BasicObjectLock to be used for locking,
+//             which must be initialized with the object to lock.
+//
+// Throw IllegalMonitorException if object is not locked by current thread.
+void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_exceptions) {
+  if (UseHeavyMonitors) {
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
+            monitor, /*check_for_exceptions=*/false);
+  } else {
+
+    // template code:
+    //
+    // if ((displaced_header = monitor->displaced_header()) == NULL) {
+    //   // Recursive unlock.  Mark the monitor unlocked by setting the object field to NULL.
+    //   monitor->set_obj(NULL);
+    // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+    //   // We swapped the unlocked mark in displaced_header into the object's mark word.
+    //   monitor->set_obj(NULL);
+    // } else {
+    //   // Slow path.
+    //   InterpreterRuntime::monitorexit(THREAD, monitor);
+    // }
+
+    const Register object           = R7_ARG5;
+    const Register displaced_header = R8_ARG6;
+    const Register object_mark_addr = R9_ARG7;
+    const Register current_header   = R10_ARG8;
+
+    Label free_slot;
+    Label slow_case;
+
+    assert_different_registers(object, displaced_header, object_mark_addr, current_header);
+
+    if (UseBiasedLocking) {
+      // The object address from the monitor is in object.
+      ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
+      assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+      biased_locking_exit(CCR0, object, displaced_header, free_slot);
+    }
+
+    // Test first if we are in the fast recursive case.
+    ld(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
+           BasicLock::displaced_header_offset_in_bytes(), monitor);
+
+    // If the displaced header is zero, we have a recursive unlock.
+    cmpdi(CCR0, displaced_header, 0);
+    beq(CCR0, free_slot); // recursive unlock
+
+    // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+    //   // We swapped the unlocked mark in displaced_header into the object's mark word.
+    //   monitor->set_obj(NULL);
+
+    // If we still have a lightweight lock, unlock the object and be done.
+
+    // The object address from the monitor is in object.
+    if (!UseBiasedLocking) ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
+    addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
+
+    // We have the displaced header in displaced_header. If the lock is still
+    // lightweight, it will contain the monitor address and we'll store the
+    // displaced header back into the object's mark word.
+    // CmpxchgX sets CCR0 to cmpX(current, monitor).
+    cmpxchgd(/*flag=*/CCR0,
+             /*current_value=*/current_header,
+             /*compare_value=*/monitor, /*exchange_value=*/displaced_header,
+             /*where=*/object_mark_addr,
+             MacroAssembler::MemBarRel,
+             MacroAssembler::cmpxchgx_hint_release_lock(),
+             noreg,
+             &slow_case);
+    b(free_slot);
+
+    // } else {
+    //   // Slow path.
+    //   InterpreterRuntime::monitorexit(THREAD, monitor);
+
+    // The lock has been converted into a heavy lock and hence
+    // we need to get into the slow case.
+    bind(slow_case);
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
+            monitor, check_for_exceptions CC_INTERP_ONLY(&& false));
+    // }
+
+    Label done;
+    b(done); // Monitor register may be overwritten! Runtime has already freed the slot.
+
+    // Exchange worked, do monitor->set_obj(NULL);
+    align(32, 12);
+    bind(free_slot);
+    li(R0, 0);
+    std(R0, BasicObjectLock::obj_offset_in_bytes(), monitor);
+    bind(done);
+  }
+}
+
+void InterpreterMacroAssembler::get_method_counters(Register method,
+                                                    Register Rcounters,
+                                                    Label& skip) {
+  BLOCK_COMMENT("Load and ev. allocate counter object {");
+  Label has_counters;
+  ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
+  cmpdi(CCR0, Rcounters, 0);
+  bne(CCR0, has_counters);
+  call_VM(noreg, CAST_FROM_FN_PTR(address,
+                                  InterpreterRuntime::build_method_counters), method, false);
+  ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
+  cmpdi(CCR0, Rcounters, 0);
+  beq(CCR0, skip); // No MethodCounters, OutOfMemory.
+  BLOCK_COMMENT("} Load and ev. allocate counter object");
+
+  bind(has_counters);
+}
+
+void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters, Register iv_be_count, Register Rtmp_r0) {
+  assert(UseCompiler, "incrementing must be useful");
+  Register invocation_count = iv_be_count;
+  Register backedge_count   = Rtmp_r0;
+  int delta = InvocationCounter::count_increment;
+
+  // Load each counter in a register.
+  //  ld(inv_counter, Rtmp);
+  //  ld(be_counter, Rtmp2);
+  int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() +
+                                    InvocationCounter::counter_offset());
+  int be_counter_offset  = in_bytes(MethodCounters::backedge_counter_offset() +
+                                    InvocationCounter::counter_offset());
+
+  BLOCK_COMMENT("Increment profiling counters {");
+
+  // Load the backedge counter.
+  lwz(backedge_count, be_counter_offset, Rcounters); // is unsigned int
+  // Mask the backedge counter.
+  Register tmp = invocation_count;
+  li(tmp, InvocationCounter::count_mask_value);
+  andr(backedge_count, tmp, backedge_count); // Cannot use andi, need sign extension of count_mask_value.
+
+  // Load the invocation counter.
+  lwz(invocation_count, inv_counter_offset, Rcounters); // is unsigned int
+  // Add the delta to the invocation counter and store the result.
+  addi(invocation_count, invocation_count, delta);
+  // Store value.
+  stw(invocation_count, inv_counter_offset, Rcounters);
+
+  // Add invocation counter + backedge counter.
+  add(iv_be_count, backedge_count, invocation_count);
+
+  // Note that this macro must leave the backedge_count + invocation_count in
+  // register iv_be_count!
+  BLOCK_COMMENT("} Increment profiling counters");
+}
+
+void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
+  if (state == atos) { MacroAssembler::verify_oop(reg); }
+}
+
+// Inline assembly for:
+//
+// if (thread is in interp_only_mode) {
+//   InterpreterRuntime::post_method_entry();
+// }
+// if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ) ||
+//     *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2)   ) {
+//   SharedRuntime::jvmpi_method_entry(method, receiver);
+// }
+void InterpreterMacroAssembler::notify_method_entry() {
+  // JVMTI
+  // Whenever JVMTI puts a thread in interp_only_mode, method
+  // entry/exit events are sent for that thread to track stack
+  // depth. If it is possible to enter interp_only_mode we add
+  // the code to check if the event should be sent.
+  if (JvmtiExport::can_post_interpreter_events()) {
+    Label jvmti_post_done;
+
+    lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
+    cmpwi(CCR0, R0, 0);
+    beq(CCR0, jvmti_post_done);
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry),
+            /*check_exceptions=*/false);
+
+    bind(jvmti_post_done);
+  }
+}
+
+
+// Inline assembly for:
+//
+// if (thread is in interp_only_mode) {
+//   // save result
+//   InterpreterRuntime::post_method_exit();
+//   // restore result
+// }
+// if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT)) {
+//   // save result
+//   SharedRuntime::jvmpi_method_exit();
+//   // restore result
+// }
+//
+// Native methods have their result stored in d_tmp and l_tmp.
+// Java methods have their result stored in the expression stack.
+void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state) {
+  // JVMTI
+  // Whenever JVMTI puts a thread in interp_only_mode, method
+  // entry/exit events are sent for that thread to track stack
+  // depth. If it is possible to enter interp_only_mode we add
+  // the code to check if the event should be sent.
+  if (JvmtiExport::can_post_interpreter_events()) {
+    Label jvmti_post_done;
+
+    lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
+    cmpwi(CCR0, R0, 0);
+    beq(CCR0, jvmti_post_done);
+    call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit),
+            /*check_exceptions=*/false);
+
+    align(32, 12);
+    bind(jvmti_post_done);
+  }
+}
+
+// Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME
+// (using parent_frame_resize) and push a new interpreter
+// TOP_IJAVA_FRAME (using frame_size).
+void InterpreterMacroAssembler::push_interpreter_frame(Register top_frame_size, Register parent_frame_resize,
+                                                       Register tmp1, Register tmp2, Register tmp3,
+                                                       Register tmp4, Register pc) {
+  assert_different_registers(top_frame_size, parent_frame_resize, tmp1, tmp2, tmp3, tmp4);
+  ld(tmp1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+  mr(tmp2/*top_frame_sp*/, R1_SP);
+  // Move initial_caller_sp.
+  ld(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
+  neg(parent_frame_resize, parent_frame_resize);
+  resize_frame(parent_frame_resize/*-parent_frame_resize*/, tmp3);
+
+  // Set LR in new parent frame.
+  std(tmp1, _abi(lr), R1_SP);
+  // Set top_frame_sp info for new parent frame.
+  std(tmp2, _parent_ijava_frame_abi(top_frame_sp), R1_SP);
+  std(tmp4, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
+
+  // Push new TOP_IJAVA_FRAME.
+  push_frame(top_frame_size, tmp2);
+
+  get_PC_trash_LR(tmp3);
+  std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+  // Used for non-initial callers by unextended_sp().
+  std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
+}
+
+// Pop the topmost TOP_IJAVA_FRAME and convert the previous
+// PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
+void InterpreterMacroAssembler::pop_interpreter_frame(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
+  assert_different_registers(tmp1, tmp2, tmp3, tmp4);
+
+  ld(tmp1/*caller's sp*/, _abi(callers_sp), R1_SP);
+  ld(tmp3, _abi(lr), tmp1);
+
+  ld(tmp4, _parent_ijava_frame_abi(initial_caller_sp), tmp1);
+
+  ld(tmp2/*caller's caller's sp*/, _abi(callers_sp), tmp1);
+  // Merge top frame.
+  std(tmp2, _abi(callers_sp), R1_SP);
+
+  ld(tmp2, _parent_ijava_frame_abi(top_frame_sp), tmp1);
+
+  // Update C stack pointer to caller's top_abi.
+  resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/);
+
+  // Update LR in top_frame.
+  std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+
+  std(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
+
+  // Store the top-frame stack-pointer for c2i adapters.
+  std(R1_SP, _top_ijava_frame_abi(top_frame_sp), R1_SP);
+}
+
+#ifdef CC_INTERP
+// Turn state's interpreter frame into the current TOP_IJAVA_FRAME.
+void InterpreterMacroAssembler::pop_interpreter_frame_to_state(Register state, Register tmp1, Register tmp2, Register tmp3) {
+  assert_different_registers(R14_state, R15_prev_state, tmp1, tmp2, tmp3);
+
+  if (state == R14_state) {
+    ld(tmp1/*state's fp*/, state_(_last_Java_fp));
+    ld(tmp2/*state's sp*/, state_(_last_Java_sp));
+  } else if (state == R15_prev_state) {
+    ld(tmp1/*state's fp*/, prev_state_(_last_Java_fp));
+    ld(tmp2/*state's sp*/, prev_state_(_last_Java_sp));
+  } else {
+    ShouldNotReachHere();
+  }
+
+  // Merge top frames.
+  std(tmp1, _abi(callers_sp), R1_SP);
+
+  // Tmp2 is new SP.
+  // Tmp1 is parent's SP.
+  resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/);
+
+  // Update LR in top_frame.
+  // Must be interpreter frame.
+  get_PC_trash_LR(tmp3);
+  std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+  // Used for non-initial callers by unextended_sp().
+  std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
+}
+#endif // CC_INTERP
+
+// Set SP to initial caller's sp, but before fix the back chain.
+void InterpreterMacroAssembler::resize_frame_to_initial_caller(Register tmp1, Register tmp2) {
+  ld(tmp1, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
+  ld(tmp2, _parent_ijava_frame_abi(callers_sp), R1_SP);
+  std(tmp2, _parent_ijava_frame_abi(callers_sp), tmp1); // Fix back chain ...
+  mr(R1_SP, tmp1); // ... and resize to initial caller.
+}
+
+#ifdef CC_INTERP
+// Pop the current interpreter state (without popping the correspoding
+// frame) and restore R14_state and R15_prev_state accordingly.
+// Use prev_state_may_be_0 to indicate whether prev_state may be 0
+// in order to generate an extra check before retrieving prev_state_(_prev_link).
+void InterpreterMacroAssembler::pop_interpreter_state(bool prev_state_may_be_0)
+{
+  // Move prev_state to state and restore prev_state from state_(_prev_link).
+  Label prev_state_is_0;
+  mr(R14_state, R15_prev_state);
+
+  // Don't retrieve /*state==*/prev_state_(_prev_link)
+  // if /*state==*/prev_state is 0.
+  if (prev_state_may_be_0) {
+    cmpdi(CCR0, R15_prev_state, 0);
+    beq(CCR0, prev_state_is_0);
+  }
+
+  ld(R15_prev_state, /*state==*/prev_state_(_prev_link));
+  bind(prev_state_is_0);
+}
+
+void InterpreterMacroAssembler::restore_prev_state() {
+  // _prev_link is private, but cInterpreter is a friend.
+  ld(R15_prev_state, state_(_prev_link));
+}
+#endif // CC_INTERP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
+#define CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
+
+#include "assembler_ppc.inline.hpp"
+#include "interpreter/invocationCounter.hpp"
+
+// This file specializes the assembler with interpreter-specific macros
+
+
+class InterpreterMacroAssembler: public MacroAssembler {
+
+ public:
+  InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
+
+  void null_check_throw(Register a, int offset, Register temp_reg);
+
+  // Handy address generation macros
+#define thread_(field_name) in_bytes(JavaThread::field_name ## _offset()), R16_thread
+#define method_(field_name) in_bytes(Method::field_name ## _offset()), R19_method
+
+#ifdef CC_INTERP
+#define state_(field_name)  in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R14_state
+#define prev_state_(field_name)  in_bytes(byte_offset_of(BytecodeInterpreter, field_name)), R15_prev_state
+#endif
+
+  void get_method_counters(Register method, Register Rcounters, Label& skip);
+  void increment_invocation_counter(Register iv_be_count, Register Rtmp1, Register Rtmp2_r0);
+
+  // Object locking
+  void lock_object  (Register lock_reg, Register obj_reg);
+  void unlock_object(Register lock_reg, bool check_for_exceptions = true);
+
+  // Debugging
+  void verify_oop(Register reg, TosState state = atos);    // only if +VerifyOops && state == atos
+
+  // support for jvmdi/jvmpi
+  void notify_method_entry();
+  void notify_method_exit(bool is_native_method, TosState state);
+
+#ifdef CC_INTERP
+  // Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME
+  // (using parent_frame_resize) and push a new interpreter
+  // TOP_IJAVA_FRAME (using frame_size).
+  void push_interpreter_frame(Register top_frame_size, Register parent_frame_resize,
+                              Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register pc=noreg);
+
+  // Pop the topmost TOP_IJAVA_FRAME and convert the previous
+  // PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
+  void pop_interpreter_frame(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
+
+  // Turn state's interpreter frame into the current TOP_IJAVA_FRAME.
+  void pop_interpreter_frame_to_state(Register state, Register tmp1, Register tmp2, Register tmp3);
+
+  // Set SP to initial caller's sp, but before fix the back chain.
+  void resize_frame_to_initial_caller(Register tmp1, Register tmp2);
+
+  // Pop the current interpreter state (without popping the
+  // correspoding frame) and restore R14_state and R15_prev_state
+  // accordingly. Use prev_state_may_be_0 to indicate whether
+  // prev_state may be 0 in order to generate an extra check before
+  // retrieving prev_state_(_prev_link).
+  void pop_interpreter_state(bool prev_state_may_be_0);
+
+  void restore_prev_state();
+#endif
+};
+
+#endif // CPU_PPC_VM_INTERP_MASM_PPC_64_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
+#define CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
+
+ friend class AbstractInterpreterGenerator;
+
+ private:
+
+  address generate_abstract_entry(void);
+  address generate_accessor_entry(void);
+  address generate_Reference_get_entry(void);
+
+#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/interpreterRT_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/universe.inline.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/icache.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/signature.hpp"
+
+#define __ _masm->
+
+// Access macros for Java and C arguments.
+// The first Java argument is at index -1.
+#define locals_j_arg_at(index)    (Interpreter::local_offset_in_bytes(index)), R18_locals
+// The first C argument is at index 0.
+#define sp_c_arg_at(index)        ((index)*wordSize + _abi(carg_1)), R1_SP
+
+// Implementation of SignatureHandlerGenerator
+
+void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
+  Argument jni_arg(jni_offset());
+  Register r = jni_arg.is_register() ? jni_arg.as_register() : R0;
+
+  __ lwa(r, locals_j_arg_at(offset())); // sign extension of integer
+  if (DEBUG_ONLY(true ||) !jni_arg.is_register()) {
+    __ std(r, sp_c_arg_at(jni_arg.number()));
+  }
+}
+
+void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
+  Argument jni_arg(jni_offset());
+  Register r = jni_arg.is_register() ? jni_arg.as_register() : R0;
+
+  __ ld(r, locals_j_arg_at(offset()+1)); // long resides in upper slot
+  if (DEBUG_ONLY(true ||) !jni_arg.is_register()) {
+    __ std(r, sp_c_arg_at(jni_arg.number()));
+  }
+}
+
+void InterpreterRuntime::SignatureHandlerGenerator::pass_float() {
+  FloatRegister fp_reg = (_num_used_fp_arg_regs < 13/*max_fp_register_arguments*/)
+                         ? as_FloatRegister((_num_used_fp_arg_regs++) + F1_ARG1->encoding())
+                         : F0;
+
+  __ lfs(fp_reg, locals_j_arg_at(offset()));
+  if (DEBUG_ONLY(true ||) jni_offset() > 8) {
+    __ stfs(fp_reg, sp_c_arg_at(jni_offset()));
+  }
+}
+
+void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
+  FloatRegister fp_reg = (_num_used_fp_arg_regs < 13/*max_fp_register_arguments*/)
+                         ? as_FloatRegister((_num_used_fp_arg_regs++) + F1_ARG1->encoding())
+                         : F0;
+
+  __ lfd(fp_reg, locals_j_arg_at(offset()+1));
+  if (DEBUG_ONLY(true ||) jni_offset() > 8) {
+    __ stfd(fp_reg, sp_c_arg_at(jni_offset()));
+  }
+}
+
+void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
+  Argument jni_arg(jni_offset());
+  Register r = jni_arg.is_register() ? jni_arg.as_register() : R11_scratch1;
+
+  // The handle for a receiver will never be null.
+  bool do_NULL_check = offset() != 0 || is_static();
+
+  Label do_null;
+  if (do_NULL_check) {
+    __ ld(R0, locals_j_arg_at(offset()));
+    __ cmpdi(CCR0, R0, 0);
+    __ li(r, 0);
+    __ beq(CCR0, do_null);
+  }
+  __ addir(r, locals_j_arg_at(offset()));
+  __ bind(do_null);
+  if (DEBUG_ONLY(true ||) !jni_arg.is_register()) {
+    __ std(r, sp_c_arg_at(jni_arg.number()));
+  }
+}
+
+void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) {
+  // Emit fd for current codebuffer. Needs patching!
+  __ emit_fd();
+
+  // Generate code to handle arguments.
+  iterate(fingerprint);
+
+  // Return the result handler.
+  __ load_const(R3_RET, AbstractInterpreter::result_handler(method()->result_type()));
+  __ blr();
+
+  __ flush();
+}
+
+#undef __
+
+// Implementation of SignatureHandlerLibrary
+
+void SignatureHandlerLibrary::pd_set_handler(address handler) {
+  // patch fd here.
+  FunctionDescriptor* fd = (FunctionDescriptor*) handler;
+
+  fd->set_entry(handler + (int)sizeof(FunctionDescriptor));
+  assert(fd->toc() == (address)0xcafe, "need to adjust TOC here");
+}
+
+
+// Access function to get the signature.
+IRT_ENTRY(address, InterpreterRuntime::get_signature(JavaThread* thread, Method* method))
+  methodHandle m(thread, method);
+  assert(m->is_native(), "sanity check");
+  Symbol *s = m->signature();
+  return (address) s->base();
+IRT_END
+
+IRT_ENTRY(address, InterpreterRuntime::get_result_handler(JavaThread* thread, Method* method))
+  methodHandle m(thread, method);
+  assert(m->is_native(), "sanity check");
+  return AbstractInterpreter::result_handler(m->result_type());
+IRT_END
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/interpreterRT_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_INTERPRETERRT_PPC_HPP
+#define CPU_PPC_VM_INTERPRETERRT_PPC_HPP
+
+#include "memory/allocation.hpp"
+
+// native method calls
+
+class SignatureHandlerGenerator: public NativeSignatureIterator {
+ private:
+  MacroAssembler* _masm;
+  // number of already used floating-point argument registers
+  int _num_used_fp_arg_regs;
+
+  void pass_int();
+  void pass_long();
+  void pass_double();
+  void pass_float();
+  void pass_object();
+
+ public:
+  // Creation
+  SignatureHandlerGenerator(methodHandle method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+    _masm = new MacroAssembler(buffer);
+    _num_used_fp_arg_regs = 0;
+  }
+
+  // Code generation
+  void generate(uint64_t fingerprint);
+};
+
+// Support for generate_slow_signature_handler.
+static address get_result_handler(JavaThread* thread, Method* method);
+
+// A function to get the signature.
+static address get_signature(JavaThread* thread, Method* method);
+
+#endif // CPU_PPC_VM_INTERPRETERRT_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/interpreter_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,801 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/bytecodeHistogram.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/templateTable.hpp"
+#include "oops/arrayOop.hpp"
+#include "oops/methodData.hpp"
+#include "oops/method.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/timer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif
+
+#ifndef CC_INTERP
+#error "CC_INTERP must be defined on PPC"
+#endif
+
+#define __ _masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) // nothing
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+int AbstractInterpreter::BasicType_as_index(BasicType type) {
+  int i = 0;
+  switch (type) {
+    case T_BOOLEAN: i = 0; break;
+    case T_CHAR   : i = 1; break;
+    case T_BYTE   : i = 2; break;
+    case T_SHORT  : i = 3; break;
+    case T_INT    : i = 4; break;
+    case T_LONG   : i = 5; break;
+    case T_VOID   : i = 6; break;
+    case T_FLOAT  : i = 7; break;
+    case T_DOUBLE : i = 8; break;
+    case T_OBJECT : i = 9; break;
+    case T_ARRAY  : i = 9; break;
+    default       : ShouldNotReachHere();
+  }
+  assert(0 <= i && i < AbstractInterpreter::number_of_result_handlers, "index out of bounds");
+  return i;
+}
+
+address AbstractInterpreterGenerator::generate_slow_signature_handler() {
+  // Slow_signature handler that respects the PPC C calling conventions.
+  //
+  // We get called by the native entry code with our output register
+  // area == 8. First we call InterpreterRuntime::get_result_handler
+  // to copy the pointer to the signature string temporarily to the
+  // first C-argument and to return the result_handler in
+  // R3_RET. Since native_entry will copy the jni-pointer to the
+  // first C-argument slot later on, it is OK to occupy this slot
+  // temporarilly. Then we copy the argument list on the java
+  // expression stack into native varargs format on the native stack
+  // and load arguments into argument registers. Integer arguments in
+  // the varargs vector will be sign-extended to 8 bytes.
+  //
+  // On entry:
+  //   R3_ARG1        - intptr_t*     Address of java argument list in memory.
+  //   R15_prev_state - BytecodeInterpreter* Address of interpreter state for
+  //     this method
+  //   R19_method
+  //
+  // On exit (just before return instruction):
+  //   R3_RET            - contains the address of the result_handler.
+  //   R4_ARG2           - is not updated for static methods and contains "this" otherwise.
+  //   R5_ARG3-R10_ARG8: - When the (i-2)th Java argument is not of type float or double,
+  //                       ARGi contains this argument. Otherwise, ARGi is not updated.
+  //   F1_ARG1-F13_ARG13 - contain the first 13 arguments of type float or double.
+
+  const int LogSizeOfTwoInstructions = 3;
+
+  // FIXME: use Argument:: GL: Argument names different numbers!
+  const int max_fp_register_arguments  = 13;
+  const int max_int_register_arguments = 6;  // first 2 are reserved
+
+  const Register arg_java       = R21_tmp1;
+  const Register arg_c          = R22_tmp2;
+  const Register signature      = R23_tmp3;  // is string
+  const Register sig_byte       = R24_tmp4;
+  const Register fpcnt          = R25_tmp5;
+  const Register argcnt         = R26_tmp6;
+  const Register intSlot        = R27_tmp7;
+  const Register target_sp      = R28_tmp8;
+  const FloatRegister floatSlot = F0;
+
+  address entry = __ emit_fd();
+
+  __ save_LR_CR(R0);
+  __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
+  // We use target_sp for storing arguments in the C frame.
+  __ mr(target_sp, R1_SP);
+  __ push_frame_abi112_nonvolatiles(0, R11_scratch1);
+
+  __ mr(arg_java, R3_ARG1);
+
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), R16_thread, R19_method);
+
+  // Signature is in R3_RET. Signature is callee saved.
+  __ mr(signature, R3_RET);
+
+  // Reload method, it may have moved.
+#ifdef CC_INTERP
+  __ ld(R19_method, state_(_method));
+#else
+  __ unimplemented("slow signature handler 1");
+#endif
+
+  // Get the result handler.
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), R16_thread, R19_method);
+
+  // Reload method, it may have moved.
+#ifdef CC_INTERP
+  __ ld(R19_method, state_(_method));
+#else
+  __ unimplemented("slow signature handler 2");
+#endif
+
+  {
+    Label L;
+    // test if static
+    // _access_flags._flags must be at offset 0.
+    // TODO PPC port: requires change in shared code.
+    //assert(in_bytes(AccessFlags::flags_offset()) == 0,
+    //       "MethodOopDesc._access_flags == MethodOopDesc._access_flags._flags");
+    // _access_flags must be a 32 bit value.
+    assert(sizeof(AccessFlags) == 4, "wrong size");
+    __ lwa(R11_scratch1/*access_flags*/, method_(access_flags));
+    // testbit with condition register.
+    __ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
+    __ btrue(CCR0, L);
+    // For non-static functions, pass "this" in R4_ARG2 and copy it
+    // to 2nd C-arg slot.
+    // We need to box the Java object here, so we use arg_java
+    // (address of current Java stack slot) as argument and don't
+    // dereference it as in case of ints, floats, etc.
+    __ mr(R4_ARG2, arg_java);
+    __ addi(arg_java, arg_java, -BytesPerWord);
+    __ std(R4_ARG2, _abi(carg_2), target_sp);
+    __ bind(L);
+  }
+
+  // Will be incremented directly after loop_start. argcnt=0
+  // corresponds to 3rd C argument.
+  __ li(argcnt, -1);
+  // arg_c points to 3rd C argument
+  __ addi(arg_c, target_sp, _abi(carg_3));
+  // no floating-point args parsed so far
+  __ li(fpcnt, 0);
+
+  Label move_intSlot_to_ARG, move_floatSlot_to_FARG;
+  Label loop_start, loop_end;
+  Label do_int, do_long, do_float, do_double, do_dontreachhere, do_object, do_array, do_boxed;
+
+  // signature points to '(' at entry
+#ifdef ASSERT
+  __ lbz(sig_byte, 0, signature);
+  __ cmplwi(CCR0, sig_byte, '(');
+  __ bne(CCR0, do_dontreachhere);
+#endif
+
+  __ bind(loop_start);
+
+  __ addi(argcnt, argcnt, 1);
+  __ lbzu(sig_byte, 1, signature);
+
+  __ cmplwi(CCR0, sig_byte, ')'); // end of signature
+  __ beq(CCR0, loop_end);
+
+  __ cmplwi(CCR0, sig_byte, 'B'); // byte
+  __ beq(CCR0, do_int);
+
+  __ cmplwi(CCR0, sig_byte, 'C'); // char
+  __ beq(CCR0, do_int);
+
+  __ cmplwi(CCR0, sig_byte, 'D'); // double
+  __ beq(CCR0, do_double);
+
+  __ cmplwi(CCR0, sig_byte, 'F'); // float
+  __ beq(CCR0, do_float);
+
+  __ cmplwi(CCR0, sig_byte, 'I'); // int
+  __ beq(CCR0, do_int);
+
+  __ cmplwi(CCR0, sig_byte, 'J'); // long
+  __ beq(CCR0, do_long);
+
+  __ cmplwi(CCR0, sig_byte, 'S'); // short
+  __ beq(CCR0, do_int);
+
+  __ cmplwi(CCR0, sig_byte, 'Z'); // boolean
+  __ beq(CCR0, do_int);
+
+  __ cmplwi(CCR0, sig_byte, 'L'); // object
+  __ beq(CCR0, do_object);
+
+  __ cmplwi(CCR0, sig_byte, '['); // array
+  __ beq(CCR0, do_array);
+
+  //  __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
+  //  __ beq(CCR0, do_void);
+
+  __ bind(do_dontreachhere);
+
+  __ unimplemented("ShouldNotReachHere in slow_signature_handler", 120);
+
+  __ bind(do_array);
+
+  {
+    Label start_skip, end_skip;
+
+    __ bind(start_skip);
+    __ lbzu(sig_byte, 1, signature);
+    __ cmplwi(CCR0, sig_byte, '[');
+    __ beq(CCR0, start_skip); // skip further brackets
+    __ cmplwi(CCR0, sig_byte, '9');
+    __ bgt(CCR0, end_skip);   // no optional size
+    __ cmplwi(CCR0, sig_byte, '0');
+    __ bge(CCR0, start_skip); // skip optional size
+    __ bind(end_skip);
+
+    __ cmplwi(CCR0, sig_byte, 'L');
+    __ beq(CCR0, do_object);  // for arrays of objects, the name of the object must be skipped
+    __ b(do_boxed);          // otherwise, go directly to do_boxed
+  }
+
+  __ bind(do_object);
+  {
+    Label L;
+    __ bind(L);
+    __ lbzu(sig_byte, 1, signature);
+    __ cmplwi(CCR0, sig_byte, ';');
+    __ bne(CCR0, L);
+   }
+  // Need to box the Java object here, so we use arg_java (address of
+  // current Java stack slot) as argument and don't dereference it as
+  // in case of ints, floats, etc.
+  Label do_null;
+  __ bind(do_boxed);
+  __ ld(R0,0, arg_java);
+  __ cmpdi(CCR0, R0, 0);
+  __ li(intSlot,0);
+  __ beq(CCR0, do_null);
+  __ mr(intSlot, arg_java);
+  __ bind(do_null);
+  __ std(intSlot, 0, arg_c);
+  __ addi(arg_java, arg_java, -BytesPerWord);
+  __ addi(arg_c, arg_c, BytesPerWord);
+  __ cmplwi(CCR0, argcnt, max_int_register_arguments);
+  __ blt(CCR0, move_intSlot_to_ARG);
+  __ b(loop_start);
+
+  __ bind(do_int);
+  __ lwa(intSlot, 0, arg_java);
+  __ std(intSlot, 0, arg_c);
+  __ addi(arg_java, arg_java, -BytesPerWord);
+  __ addi(arg_c, arg_c, BytesPerWord);
+  __ cmplwi(CCR0, argcnt, max_int_register_arguments);
+  __ blt(CCR0, move_intSlot_to_ARG);
+  __ b(loop_start);
+
+  __ bind(do_long);
+  __ ld(intSlot, -BytesPerWord, arg_java);
+  __ std(intSlot, 0, arg_c);
+  __ addi(arg_java, arg_java, - 2 * BytesPerWord);
+  __ addi(arg_c, arg_c, BytesPerWord);
+  __ cmplwi(CCR0, argcnt, max_int_register_arguments);
+  __ blt(CCR0, move_intSlot_to_ARG);
+  __ b(loop_start);
+
+  __ bind(do_float);
+  __ lfs(floatSlot, 0, arg_java);
+#if defined(LINUX)
+  __ stfs(floatSlot, 4, arg_c);
+#elif defined(AIX)
+  __ stfs(floatSlot, 0, arg_c);
+#else
+#error "unknown OS"
+#endif
+  __ addi(arg_java, arg_java, -BytesPerWord);
+  __ addi(arg_c, arg_c, BytesPerWord);
+  __ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
+  __ blt(CCR0, move_floatSlot_to_FARG);
+  __ b(loop_start);
+
+  __ bind(do_double);
+  __ lfd(floatSlot, - BytesPerWord, arg_java);
+  __ stfd(floatSlot, 0, arg_c);
+  __ addi(arg_java, arg_java, - 2 * BytesPerWord);
+  __ addi(arg_c, arg_c, BytesPerWord);
+  __ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
+  __ blt(CCR0, move_floatSlot_to_FARG);
+  __ b(loop_start);
+
+  __ bind(loop_end);
+
+  __ pop_frame();
+  __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
+  __ restore_LR_CR(R0);
+
+  __ blr();
+
+  Label move_int_arg, move_float_arg;
+  __ bind(move_int_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
+  __ mr(R5_ARG3, intSlot);  __ b(loop_start);
+  __ mr(R6_ARG4, intSlot);  __ b(loop_start);
+  __ mr(R7_ARG5, intSlot);  __ b(loop_start);
+  __ mr(R8_ARG6, intSlot);  __ b(loop_start);
+  __ mr(R9_ARG7, intSlot);  __ b(loop_start);
+  __ mr(R10_ARG8, intSlot); __ b(loop_start);
+
+  __ bind(move_float_arg); // each case must consist of 2 instructions (otherwise adapt LogSizeOfTwoInstructions)
+  __ fmr(F1_ARG1, floatSlot);   __ b(loop_start);
+  __ fmr(F2_ARG2, floatSlot);   __ b(loop_start);
+  __ fmr(F3_ARG3, floatSlot);   __ b(loop_start);
+  __ fmr(F4_ARG4, floatSlot);   __ b(loop_start);
+  __ fmr(F5_ARG5, floatSlot);   __ b(loop_start);
+  __ fmr(F6_ARG6, floatSlot);   __ b(loop_start);
+  __ fmr(F7_ARG7, floatSlot);   __ b(loop_start);
+  __ fmr(F8_ARG8, floatSlot);   __ b(loop_start);
+  __ fmr(F9_ARG9, floatSlot);   __ b(loop_start);
+  __ fmr(F10_ARG10, floatSlot); __ b(loop_start);
+  __ fmr(F11_ARG11, floatSlot); __ b(loop_start);
+  __ fmr(F12_ARG12, floatSlot); __ b(loop_start);
+  __ fmr(F13_ARG13, floatSlot); __ b(loop_start);
+
+  __ bind(move_intSlot_to_ARG);
+  __ sldi(R0, argcnt, LogSizeOfTwoInstructions);
+  __ load_const(R11_scratch1, move_int_arg); // Label must be bound here.
+  __ add(R11_scratch1, R0, R11_scratch1);
+  __ mtctr(R11_scratch1/*branch_target*/);
+  __ bctr();
+  __ bind(move_floatSlot_to_FARG);
+  __ sldi(R0, fpcnt, LogSizeOfTwoInstructions);
+  __ addi(fpcnt, fpcnt, 1);
+  __ load_const(R11_scratch1, move_float_arg); // Label must be bound here.
+  __ add(R11_scratch1, R0, R11_scratch1);
+  __ mtctr(R11_scratch1/*branch_target*/);
+  __ bctr();
+
+  return entry;
+}
+
+address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type) {
+  //
+  // Registers alive
+  //   R3_RET
+  //   LR
+  //
+  // Registers updated
+  //   R3_RET
+  //
+
+  Label done;
+  address entry = __ pc();
+
+  switch (type) {
+  case T_BOOLEAN:
+    // convert !=0 to 1
+    __ neg(R0, R3_RET);
+    __ orr(R0, R3_RET, R0);
+    __ srwi(R3_RET, R0, 31);
+    break;
+  case T_BYTE:
+     // sign extend 8 bits
+     __ extsb(R3_RET, R3_RET);
+     break;
+  case T_CHAR:
+     // zero extend 16 bits
+     __ clrldi(R3_RET, R3_RET, 48);
+     break;
+  case T_SHORT:
+     // sign extend 16 bits
+     __ extsh(R3_RET, R3_RET);
+     break;
+  case T_INT:
+     // sign extend 32 bits
+     __ extsw(R3_RET, R3_RET);
+     break;
+  case T_LONG:
+     break;
+  case T_OBJECT:
+    // unbox result if not null
+    __ cmpdi(CCR0, R3_RET, 0);
+    __ beq(CCR0, done);
+    __ ld(R3_RET, 0, R3_RET);
+    __ verify_oop(R3_RET);
+    break;
+  case T_FLOAT:
+     break;
+  case T_DOUBLE:
+     break;
+  case T_VOID:
+     break;
+  default: ShouldNotReachHere();
+  }
+
+  __ BIND(done);
+  __ blr();
+
+  return entry;
+}
+
+// Abstract method entry.
+//
+address InterpreterGenerator::generate_abstract_entry(void) {
+  address entry = __ pc();
+
+  //
+  // Registers alive
+  //   R16_thread     - JavaThread*
+  //   R19_method     - callee's methodOop (method to be invoked)
+  //   R1_SP          - SP prepared such that caller's outgoing args are near top
+  //   LR             - return address to caller
+  //
+  // Stack layout at this point:
+  //
+  //   0       [TOP_IJAVA_FRAME_ABI]         <-- R1_SP
+  //           alignment (optional)
+  //           [outgoing Java arguments]
+  //           ...
+  //   PARENT  [PARENT_IJAVA_FRAME_ABI]
+  //            ...
+  //
+
+  // Can't use call_VM here because we have not set up a new
+  // interpreter state. Make the call to the vm and make it look like
+  // our caller set up the JavaFrameAnchor.
+  __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/);
+
+  // Push a new C frame and save LR.
+  __ save_LR_CR(R0);
+  __ push_frame_abi112(0, R11_scratch1);
+
+  // This is not a leaf but we have a JavaFrameAnchor now and we will
+  // check (create) exceptions afterward so this is ok.
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+
+  // Pop the C frame and restore LR.
+  __ pop_frame();
+  __ restore_LR_CR(R0);
+
+  // Reset JavaFrameAnchor from call_VM_leaf above.
+  __ reset_last_Java_frame();
+
+#ifdef CC_INTERP
+  // Return to frame manager, it will handle the pending exception.
+  __ blr();
+#else
+  Unimplemented();
+#endif
+
+  return entry;
+}
+
+// Call an accessor method (assuming it is resolved, otherwise drop into
+// vanilla (slow path) entry.
+address InterpreterGenerator::generate_accessor_entry(void) {
+  if(!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods)))
+    return NULL;
+
+  Label Lslow_path, Lacquire;
+
+  const Register
+         Rclass_or_obj = R3_ARG1,
+         Rconst_method = R4_ARG2,
+         Rcodes        = Rconst_method,
+         Rcpool_cache  = R5_ARG3,
+         Rscratch      = R11_scratch1,
+         Rjvmti_mode   = Rscratch,
+         Roffset       = R12_scratch2,
+         Rflags        = R6_ARG4,
+         Rbtable       = R7_ARG5;
+
+  static address branch_table[number_of_states];
+
+  address entry = __ pc();
+
+  // Check for safepoint:
+  // Ditch this, real man don't need safepoint checks.
+
+  // Also check for JVMTI mode
+  // Check for null obj, take slow path if so.
+  __ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
+  __ lwz(Rjvmti_mode, thread_(interp_only_mode));
+  __ cmpdi(CCR1, Rclass_or_obj, 0);
+  __ cmpwi(CCR0, Rjvmti_mode, 0);
+  __ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
+  __ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
+
+  // Do 2 things in parallel:
+  // 1. Load the index out of the first instruction word, which looks like this:
+  //    <0x2a><0xb4><index (2 byte, native endianess)>.
+  // 2. Load constant pool cache base.
+  __ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
+  __ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
+
+  __ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
+  __ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
+
+  // Get the const pool entry by means of <index>.
+  const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
+  __ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
+  __ add(Rcpool_cache, Rscratch, Rcpool_cache);
+
+  // Check if cpool cache entry is resolved.
+  // We are resolved if the indices offset contains the current bytecode.
+  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
+  // Big Endian:
+  __ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
+  __ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
+  __ bne(CCR0, Lslow_path);
+  __ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
+
+  // Finally, start loading the value: Get cp cache entry into regs.
+  __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
+  __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
+
+  // Following code is from templateTable::getfield_or_static
+  // Load pointer to branch table
+  __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
+
+  // Get volatile flag
+  __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit
+  // note: sync is needed before volatile load on PPC64
+
+  // Check field type
+  __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
+
+#ifdef ASSERT
+  Label LFlagInvalid;
+  __ cmpldi(CCR0, Rflags, number_of_states);
+  __ bge(CCR0, LFlagInvalid);
+
+  __ ld(R9_ARG7, 0, R1_SP);
+  __ ld(R10_ARG8, 0, R21_sender_SP);
+  __ cmpd(CCR0, R9_ARG7, R10_ARG8);
+  __ asm_assert_eq("backlink", 0x543);
+#endif // ASSERT
+  __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+
+  // Load from branch table and dispatch (volatile case: one instruction ahead)
+  __ sldi(Rflags, Rflags, LogBytesPerWord);
+  __ cmpwi(CCR6, Rscratch, 1); // volatile?
+  __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
+  __ ldx(Rbtable, Rbtable, Rflags);
+
+  __ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
+  __ mtctr(Rbtable);
+  __ bctr();
+
+#ifdef ASSERT
+  __ bind(LFlagInvalid);
+  __ stop("got invalid flag", 0x6541);
+
+  bool all_uninitialized = true,
+       all_initialized   = true;
+  for (int i = 0; i<number_of_states; ++i) {
+    all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
+    all_initialized   = all_initialized   && (branch_table[i] != NULL);
+  }
+  assert(all_uninitialized != all_initialized, "consistency"); // either or
+
+  __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+  if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
+  if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
+  if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
+  __ stop("unexpected type", 0x6551);
+#endif
+
+  if (branch_table[itos] == 0) { // generate only once
+    __ align(32, 28, 28); // align load
+    __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+    branch_table[itos] = __ pc(); // non-volatile_entry point
+    __ lwax(R3_RET, Rclass_or_obj, Roffset);
+    __ beq(CCR6, Lacquire);
+    __ blr();
+  }
+
+  if (branch_table[ltos] == 0) { // generate only once
+    __ align(32, 28, 28); // align load
+    __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+    branch_table[ltos] = __ pc(); // non-volatile_entry point
+    __ ldx(R3_RET, Rclass_or_obj, Roffset);
+    __ beq(CCR6, Lacquire);
+    __ blr();
+  }
+
+  if (branch_table[btos] == 0) { // generate only once
+    __ align(32, 28, 28); // align load
+    __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+    branch_table[btos] = __ pc(); // non-volatile_entry point
+    __ lbzx(R3_RET, Rclass_or_obj, Roffset);
+    __ extsb(R3_RET, R3_RET);
+    __ beq(CCR6, Lacquire);
+    __ blr();
+  }
+
+  if (branch_table[ctos] == 0) { // generate only once
+    __ align(32, 28, 28); // align load
+    __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+    branch_table[ctos] = __ pc(); // non-volatile_entry point
+    __ lhzx(R3_RET, Rclass_or_obj, Roffset);
+    __ beq(CCR6, Lacquire);
+    __ blr();
+  }
+
+  if (branch_table[stos] == 0) { // generate only once
+    __ align(32, 28, 28); // align load
+    __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+    branch_table[stos] = __ pc(); // non-volatile_entry point
+    __ lhax(R3_RET, Rclass_or_obj, Roffset);
+    __ beq(CCR6, Lacquire);
+    __ blr();
+  }
+
+  if (branch_table[atos] == 0) { // generate only once
+    __ align(32, 28, 28); // align load
+    __ sync(); // volatile entry point (one instruction before non-volatile_entry point)
+    branch_table[atos] = __ pc(); // non-volatile_entry point
+    __ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
+    __ verify_oop(R3_RET);
+    //__ dcbt(R3_RET); // prefetch
+    __ beq(CCR6, Lacquire);
+    __ blr();
+  }
+
+  __ align(32, 12);
+  __ bind(Lacquire);
+  __ twi_0(R3_RET);
+  __ isync(); // acquire
+  __ blr();
+
+#ifdef ASSERT
+  for (int i = 0; i<number_of_states; ++i) {
+    assert(branch_table[i], "accessor_entry initialization");
+    //tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
+  }
+#endif
+
+  __ bind(Lslow_path);
+  assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
+  __ load_const_optimized(Rscratch, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
+  __ mtctr(Rscratch);
+  __ bctr();
+  __ flush();
+
+  return entry;
+}
+
+// Interpreter intrinsic for WeakReference.get().
+// 1. Don't push a full blown frame and go on dispatching, but fetch the value
+//    into R8 and return quickly
+// 2. If G1 is active we *must* execute this intrinsic for corrrectness:
+//    It contains a GC barrier which puts the reference into the satb buffer
+//    to indicate that someone holds a strong reference to the object the
+//    weak ref points to!
+address InterpreterGenerator::generate_Reference_get_entry(void) {
+  // Code: _aload_0, _getfield, _areturn
+  // parameter size = 1
+  //
+  // The code that gets generated by this routine is split into 2 parts:
+  //    1. the "intrinsified" code for G1 (or any SATB based GC),
+  //    2. the slow path - which is an expansion of the regular method entry.
+  //
+  // Notes:
+  // * In the G1 code we do not check whether we need to block for
+  //   a safepoint. If G1 is enabled then we must execute the specialized
+  //   code for Reference.get (except when the Reference object is null)
+  //   so that we can log the value in the referent field with an SATB
+  //   update buffer.
+  //   If the code for the getfield template is modified so that the
+  //   G1 pre-barrier code is executed when the current method is
+  //   Reference.get() then going through the normal method entry
+  //   will be fine.
+  // * The G1 code can, however, check the receiver object (the instance
+  //   of java.lang.Reference) and jump to the slow path if null. If the
+  //   Reference object is null then we obviously cannot fetch the referent
+  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
+  //   regular method entry code to generate the NPE.
+  //
+  // This code is based on generate_accessor_enty.
+
+  address entry = __ pc();
+
+  const int referent_offset = java_lang_ref_Reference::referent_offset;
+  guarantee(referent_offset > 0, "referent offset not initialized");
+
+  if (UseG1GC) {
+     Label slow_path;
+
+    // Debugging not possible, so can't use __ skip_if_jvmti_mode(slow_path, GR31_SCRATCH);
+
+    // In the G1 code we don't check if we need to reach a safepoint. We
+    // continue and the thread will safepoint at the next bytecode dispatch.
+
+    // If the receiver is null then it is OK to jump to the slow path.
+    __ ld(R3_RET, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp)); // get receiver
+
+    // Check if receiver == NULL and go the slow path.
+    __ cmpdi(CCR0, R3_RET, 0);
+    __ beq(CCR0, slow_path);
+
+    // Load the value of the referent field.
+    __ load_heap_oop(R3_RET, referent_offset, R3_RET);
+
+    // Generate the G1 pre-barrier code to log the value of
+    // the referent field in an SATB buffer. Note with
+    // these parameters the pre-barrier does not generate
+    // the load of the previous value.
+
+    // Restore caller sp for c2i case.
+#ifdef ASSERT
+      __ ld(R9_ARG7, 0, R1_SP);
+      __ ld(R10_ARG8, 0, R21_sender_SP);
+      __ cmpd(CCR0, R9_ARG7, R10_ARG8);
+      __ asm_assert_eq("backlink", 0x544);
+#endif // ASSERT
+    __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
+
+    __ g1_write_barrier_pre(noreg,         // obj
+                            noreg,         // offset
+                            R3_RET,        // pre_val
+                            R11_scratch1,  // tmp
+                            R12_scratch2,  // tmp
+                            true);         // needs_frame
+
+    __ blr();
+
+    // Generate regular method entry.
+    __ bind(slow_path);
+    assert(Interpreter::entry_for_kind(Interpreter::zerolocals), "Normal entry must have been generated by now");
+    __ load_const_optimized(R11_scratch1, Interpreter::entry_for_kind(Interpreter::zerolocals), R0);
+    __ mtctr(R11_scratch1);
+    __ bctr();
+    __ flush();
+
+    return entry;
+  } else {
+    return generate_accessor_entry();
+  }
+}
+
+void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
+  // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
+  // the days we had adapter frames. When we deoptimize a situation where a
+  // compiled caller calls a compiled caller will have registers it expects
+  // to survive the call to the callee. If we deoptimize the callee the only
+  // way we can restore these registers is to have the oldest interpreter
+  // frame that we create restore these values. That is what this routine
+  // will accomplish.
+
+  // At the moment we have modified c2 to not have any callee save registers
+  // so this problem does not exist and this routine is just a place holder.
+
+  assert(f->is_interpreted_frame(), "must be interpreted");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/interpreter_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_INTERPRETER_PPC_HPP
+#define CPU_PPC_VM_INTERPRETER_PPC_HPP
+
+ public:
+
+  // Stack index relative to tos (which points at value)
+  static int expr_index_at(int i) {
+    return stackElementWords * i;
+  }
+
+  // Already negated by c++ interpreter
+  static int local_index_at(int i) {
+    assert(i <= 0, "local direction already negated");
+    return stackElementWords * i;
+  }
+
+#endif // CPU_PPC_VM_INTERPRETER_PPC_PP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/javaFrameAnchor_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
+#define CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
+
+#ifndef CC_INTERP
+#error "CC_INTERP must be defined on PPC64"
+#endif
+
+public:
+  // Each arch must define reset, save, restore
+  // These are used by objects that only care about:
+  //  1 - initializing a new state (thread creation, javaCalls)
+  //  2 - saving a current state (javaCalls)
+  //  3 - restoring an old state (javaCalls)
+
+  inline void clear(void) {
+    // clearing _last_Java_sp must be first
+    _last_Java_sp = NULL;
+    // fence?
+    OrderAccess::release();
+    _last_Java_pc = NULL;
+  }
+
+  inline void set(intptr_t* sp, address pc) {
+    _last_Java_pc = pc;
+    OrderAccess::release();
+    _last_Java_sp = sp;
+  }
+
+  void copy(JavaFrameAnchor* src) {
+    // In order to make sure the transition state is valid for "this".
+    // We must clear _last_Java_sp before copying the rest of the new data.
+    //
+    // Hack Alert: Temporary bugfix for 4717480/4721647
+    // To act like previous version (pd_cache_state) don't NULL _last_Java_sp
+    // unless the value is changing.
+    if (_last_Java_sp != src->_last_Java_sp) {
+      _last_Java_sp = NULL;
+      OrderAccess::release();
+    }
+    _last_Java_pc = src->_last_Java_pc;
+    // Must be last so profiler will always see valid frame if has_last_frame() is true.
+    OrderAccess::release();
+    _last_Java_sp = src->_last_Java_sp;
+  }
+
+  // Always walkable.
+  bool walkable(void) { return true; }
+  // Never any thing to do since we are always walkable and can find address of return addresses.
+  void make_walkable(JavaThread* thread) { }
+
+  intptr_t* last_Java_sp(void) const  { return _last_Java_sp; }
+
+  address last_Java_pc(void)          { return _last_Java_pc; }
+
+  void set_last_Java_sp(intptr_t* sp) { OrderAccess::release(); _last_Java_sp = sp; }
+
+#endif // CPU_PPC_VM_JAVAFRAMEANCHOR_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/jniFastGetField_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "assembler_ppc.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "prims/jniFastGetField.hpp"
+#include "prims/jvm_misc.hpp"
+#include "runtime/safepoint.hpp"
+
+
+address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
+  // we don't have fast jni accessors.
+  return (address) -1;
+}
+
+address JNI_FastGetField::generate_fast_get_boolean_field() {
+  return generate_fast_get_int_field0(T_BOOLEAN);
+}
+
+address JNI_FastGetField::generate_fast_get_byte_field() {
+  return generate_fast_get_int_field0(T_BYTE);
+}
+
+address JNI_FastGetField::generate_fast_get_char_field() {
+  return generate_fast_get_int_field0(T_CHAR);
+}
+
+address JNI_FastGetField::generate_fast_get_short_field() {
+  return generate_fast_get_int_field0(T_SHORT);
+}
+
+address JNI_FastGetField::generate_fast_get_int_field() {
+  return generate_fast_get_int_field0(T_INT);
+}
+
+address JNI_FastGetField::generate_fast_get_long_field() {
+  // we don't have fast jni accessors.
+  return (address) -1;
+}
+
+address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
+  // e don't have fast jni accessors.
+  return (address) -1;
+}
+
+address JNI_FastGetField::generate_fast_get_float_field() {
+  return generate_fast_get_float_field0(T_FLOAT);
+}
+
+address JNI_FastGetField::generate_fast_get_double_field() {
+  return generate_fast_get_float_field0(T_DOUBLE);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/jniTypes_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_JNITYPES_PPC_HPP
+#define CPU_PPC_VM_JNITYPES_PPC_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+#include "prims/jni.h"
+
+// This file holds platform-dependent routines used to write primitive
+// jni types to the array of arguments passed into JavaCalls::call.
+
+class JNITypes : AllStatic {
+  // These functions write a java primitive type (in native format) to
+  // a java stack slot array to be passed as an argument to
+  // JavaCalls:calls.  I.e., they are functionally 'push' operations
+  // if they have a 'pos' formal parameter.  Note that jlong's and
+  // jdouble's are written _in reverse_ of the order in which they
+  // appear in the interpreter stack.  This is because call stubs (see
+  // stubGenerator_sparc.cpp) reverse the argument list constructed by
+  // JavaCallArguments (see javaCalls.hpp).
+
+ private:
+
+#ifndef PPC64
+#error "ppc32 support currently not implemented!!!"
+#endif // PPC64
+
+ public:
+  // Ints are stored in native format in one JavaCallArgument slot at *to.
+  static inline void put_int(jint  from, intptr_t *to)           { *(jint *)(to +   0  ) =  from; }
+  static inline void put_int(jint  from, intptr_t *to, int& pos) { *(jint *)(to + pos++) =  from; }
+  static inline void put_int(jint *from, intptr_t *to, int& pos) { *(jint *)(to + pos++) = *from; }
+
+  // Longs are stored in native format in one JavaCallArgument slot at
+  // *(to+1).
+  static inline void put_long(jlong  from, intptr_t *to) {
+    *(jlong*) (to + 1) = from;
+  }
+
+  static inline void put_long(jlong  from, intptr_t *to, int& pos) {
+    *(jlong*) (to + 1 + pos) = from;
+    pos += 2;
+  }
+
+  static inline void put_long(jlong *from, intptr_t *to, int& pos) {
+    *(jlong*) (to + 1 + pos) = *from;
+    pos += 2;
+  }
+
+  // Oops are stored in native format in one JavaCallArgument slot at *to.
+  static inline void put_obj(oop  from, intptr_t *to)           { *(oop *)(to +   0  ) =  from; }
+  static inline void put_obj(oop  from, intptr_t *to, int& pos) { *(oop *)(to + pos++) =  from; }
+  static inline void put_obj(oop *from, intptr_t *to, int& pos) { *(oop *)(to + pos++) = *from; }
+
+  // Floats are stored in native format in one JavaCallArgument slot at *to.
+  static inline void put_float(jfloat  from, intptr_t *to)           { *(jfloat *)(to +   0  ) =  from;  }
+  static inline void put_float(jfloat  from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) =  from; }
+  static inline void put_float(jfloat *from, intptr_t *to, int& pos) { *(jfloat *)(to + pos++) = *from; }
+
+  // Doubles are stored in native word format in one JavaCallArgument
+  // slot at *(to+1).
+  static inline void put_double(jdouble  from, intptr_t *to) {
+    *(jdouble*) (to + 1) = from;
+  }
+
+  static inline void put_double(jdouble  from, intptr_t *to, int& pos) {
+    *(jdouble*) (to + 1 + pos) = from;
+    pos += 2;
+  }
+
+  static inline void put_double(jdouble *from, intptr_t *to, int& pos) {
+    *(jdouble*) (to + 1 + pos) = *from;
+    pos += 2;
+  }
+
+  // The get_xxx routines, on the other hand, actually _do_ fetch
+  // java primitive types from the interpreter stack.
+  // No need to worry about alignment on Intel.
+  static inline jint    get_int   (intptr_t *from) { return *(jint *)    from; }
+  static inline jlong   get_long  (intptr_t *from) { return *(jlong *)  (from + 1); }
+  static inline oop     get_obj   (intptr_t *from) { return *(oop *)     from; }
+  static inline jfloat  get_float (intptr_t *from) { return *(jfloat *)  from; }
+  static inline jdouble get_double(intptr_t *from) { return *(jdouble *)(from + 1); }
+};
+
+#endif // CPU_PPC_VM_JNITYPES_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/jni_ppc.h	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef CPU_PPC_VM_JNI_PPC_H
+#define CPU_PPC_VM_JNI_PPC_H
+
+// Note: please do not change these without also changing jni_md.h in the JDK
+// repository
+#ifndef __has_attribute
+  #define __has_attribute(x) 0
+#endif
+#if (defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4) && (__GNUC_MINOR__ > 2))) || __has_attribute(visibility)
+  #define JNIEXPORT     __attribute__((visibility("default")))
+  #define JNIIMPORT     __attribute__((visibility("default")))
+#else
+  #define JNIEXPORT
+  #define JNIIMPORT
+#endif
+
+#define JNICALL
+
+typedef int jint;
+
+#if defined(_LP64)
+  typedef long jlong;
+#else
+  typedef long long jlong;
+#endif
+
+typedef signed char jbyte;
+
+#endif // CPU_PPC_VM_JNI_PPC_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,3094 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/assembler.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "compiler/disassembler.hpp"
+#include "gc_interface/collectedHeap.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/cardTableModRefBS.hpp"
+#include "memory/resourceArea.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/biasedLocking.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/objectMonitor.hpp"
+#include "runtime/os.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#endif // INCLUDE_ALL_GCS
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) // nothing
+#else
+#define BLOCK_COMMENT(str) block_comment(str)
+#endif
+
+#ifdef ASSERT
+// On RISC, there's no benefit to verifying instruction boundaries.
+bool AbstractAssembler::pd_check_instruction_mark() { return false; }
+#endif
+
+void MacroAssembler::ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop) {
+  assert(Assembler::is_simm(si31, 31) && si31 >= 0, "si31 out of range");
+  if (Assembler::is_simm(si31, 16)) {
+    ld(d, si31, a);
+    if (emit_filler_nop) nop();
+  } else {
+    const int hi = MacroAssembler::largeoffset_si16_si16_hi(si31);
+    const int lo = MacroAssembler::largeoffset_si16_si16_lo(si31);
+    addis(d, a, hi);
+    ld(d, lo, d);
+  }
+}
+
+void MacroAssembler::ld_largeoffset(Register d, int si31, Register a, int emit_filler_nop) {
+  assert_different_registers(d, a);
+  ld_largeoffset_unchecked(d, si31, a, emit_filler_nop);
+}
+
+void MacroAssembler::load_sized_value(Register dst, RegisterOrConstant offs, Register base,
+                                      size_t size_in_bytes, bool is_signed) {
+  switch (size_in_bytes) {
+  case  8:              ld(dst, offs, base);                         break;
+  case  4:  is_signed ? lwa(dst, offs, base) : lwz(dst, offs, base); break;
+  case  2:  is_signed ? lha(dst, offs, base) : lhz(dst, offs, base); break;
+  case  1:  lbz(dst, offs, base); if (is_signed) extsb(dst, dst);    break; // lba doesn't exist :(
+  default:  ShouldNotReachHere();
+  }
+}
+
+void MacroAssembler::store_sized_value(Register dst, RegisterOrConstant offs, Register base,
+                                       size_t size_in_bytes) {
+  switch (size_in_bytes) {
+  case  8:  std(dst, offs, base); break;
+  case  4:  stw(dst, offs, base); break;
+  case  2:  sth(dst, offs, base); break;
+  case  1:  stb(dst, offs, base); break;
+  default:  ShouldNotReachHere();
+  }
+}
+
+void MacroAssembler::align(int modulus, int max, int rem) {
+  int padding = (rem + modulus - (offset() % modulus)) % modulus;
+  if (padding > max) return;
+  for (int c = (padding >> 2); c > 0; --c) { nop(); }
+}
+
+// Issue instructions that calculate given TOC from global TOC.
+void MacroAssembler::calculate_address_from_global_toc(Register dst, address addr, bool hi16, bool lo16,
+                                                       bool add_relocation, bool emit_dummy_addr) {
+  int offset = -1;
+  if (emit_dummy_addr) {
+    offset = -128; // dummy address
+  } else if (addr != (address)(intptr_t)-1) {
+    offset = MacroAssembler::offset_to_global_toc(addr);
+  }
+
+  if (hi16) {
+    addis(dst, R29, MacroAssembler::largeoffset_si16_si16_hi(offset));
+  }
+  if (lo16) {
+    if (add_relocation) {
+      // Relocate at the addi to avoid confusion with a load from the method's TOC.
+      relocate(internal_word_Relocation::spec(addr));
+    }
+    addi(dst, dst, MacroAssembler::largeoffset_si16_si16_lo(offset));
+  }
+}
+
+int MacroAssembler::patch_calculate_address_from_global_toc_at(address a, address bound, address addr) {
+  const int offset = MacroAssembler::offset_to_global_toc(addr);
+
+  const address inst2_addr = a;
+  const int inst2 = *(int *)inst2_addr;
+
+  // The relocation points to the second instruction, the addi,
+  // and the addi reads and writes the same register dst.
+  const int dst = inv_rt_field(inst2);
+  assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
+
+  // Now, find the preceding addis which writes to dst.
+  int inst1 = 0;
+  address inst1_addr = inst2_addr - BytesPerInstWord;
+  while (inst1_addr >= bound) {
+    inst1 = *(int *) inst1_addr;
+    if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
+      // Stop, found the addis which writes dst.
+      break;
+    }
+    inst1_addr -= BytesPerInstWord;
+  }
+
+  assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
+  set_imm((int *)inst1_addr, MacroAssembler::largeoffset_si16_si16_hi(offset));
+  set_imm((int *)inst2_addr, MacroAssembler::largeoffset_si16_si16_lo(offset));
+  return (int)((intptr_t)addr - (intptr_t)inst1_addr);
+}
+
+address MacroAssembler::get_address_of_calculate_address_from_global_toc_at(address a, address bound) {
+  const address inst2_addr = a;
+  const int inst2 = *(int *)inst2_addr;
+
+  // The relocation points to the second instruction, the addi,
+  // and the addi reads and writes the same register dst.
+  const int dst = inv_rt_field(inst2);
+  assert(is_addi(inst2) && inv_ra_field(inst2) == dst, "must be addi reading and writing dst");
+
+  // Now, find the preceding addis which writes to dst.
+  int inst1 = 0;
+  address inst1_addr = inst2_addr - BytesPerInstWord;
+  while (inst1_addr >= bound) {
+    inst1 = *(int *) inst1_addr;
+    if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
+      // stop, found the addis which writes dst
+      break;
+    }
+    inst1_addr -= BytesPerInstWord;
+  }
+
+  assert(is_addis(inst1) && inv_ra_field(inst1) == 29 /* R29 */, "source must be global TOC");
+
+  int offset = (get_imm(inst1_addr, 0) << 16) + get_imm(inst2_addr, 0);
+  // -1 is a special case
+  if (offset == -1) {
+    return (address)(intptr_t)-1;
+  } else {
+    return global_toc() + offset;
+  }
+}
+
+#ifdef _LP64
+// Patch compressed oops or klass constants.
+// Assembler sequence is
+// 1) compressed oops:
+//    lis  rx = const.hi
+//    ori rx = rx | const.lo
+// 2) compressed klass:
+//    lis  rx = const.hi
+//    clrldi rx = rx & 0xFFFFffff // clearMS32b, optional
+//    ori rx = rx | const.lo
+// Clrldi will be passed by.
+int MacroAssembler::patch_set_narrow_oop(address a, address bound, narrowOop data) {
+  assert(UseCompressedOops, "Should only patch compressed oops");
+
+  const address inst2_addr = a;
+  const int inst2 = *(int *)inst2_addr;
+
+  // The relocation points to the second instruction, the ori,
+  // and the ori reads and writes the same register dst.
+  const int dst = inv_rta_field(inst2);
+  assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
+  // Now, find the preceding addis which writes to dst.
+  int inst1 = 0;
+  address inst1_addr = inst2_addr - BytesPerInstWord;
+  bool inst1_found = false;
+  while (inst1_addr >= bound) {
+    inst1 = *(int *)inst1_addr;
+    if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break; }
+    inst1_addr -= BytesPerInstWord;
+  }
+  assert(inst1_found, "inst is not lis");
+
+  int xc = (data >> 16) & 0xffff;
+  int xd = (data >>  0) & 0xffff;
+
+  set_imm((int *)inst1_addr, (short)(xc)); // see enc_load_con_narrow_hi/_lo
+  set_imm((int *)inst2_addr,        (xd)); // unsigned int
+  return (int)((intptr_t)inst2_addr - (intptr_t)inst1_addr);
+}
+
+// Get compressed oop or klass constant.
+narrowOop MacroAssembler::get_narrow_oop(address a, address bound) {
+  assert(UseCompressedOops, "Should only patch compressed oops");
+
+  const address inst2_addr = a;
+  const int inst2 = *(int *)inst2_addr;
+
+  // The relocation points to the second instruction, the ori,
+  // and the ori reads and writes the same register dst.
+  const int dst = inv_rta_field(inst2);
+  assert(is_ori(inst2) && inv_rs_field(inst2) == dst, "must be ori reading and writing dst");
+  // Now, find the preceding lis which writes to dst.
+  int inst1 = 0;
+  address inst1_addr = inst2_addr - BytesPerInstWord;
+  bool inst1_found = false;
+
+  while (inst1_addr >= bound) {
+    inst1 = *(int *) inst1_addr;
+    if (is_lis(inst1) && inv_rs_field(inst1) == dst) { inst1_found = true; break;}
+    inst1_addr -= BytesPerInstWord;
+  }
+  assert(inst1_found, "inst is not lis");
+
+  uint xl = ((unsigned int) (get_imm(inst2_addr, 0) & 0xffff));
+  uint xh = (((get_imm(inst1_addr, 0)) & 0xffff) << 16);
+
+  return (int) (xl | xh);
+}
+#endif // _LP64
+
+void MacroAssembler::load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc) {
+  int toc_offset = 0;
+  // Use RelocationHolder::none for the constant pool entry, otherwise
+  // we will end up with a failing NativeCall::verify(x) where x is
+  // the address of the constant pool entry.
+  // FIXME: We should insert relocation information for oops at the constant
+  // pool entries instead of inserting it at the loads; patching of a constant
+  // pool entry should be less expensive.
+  address oop_address = address_constant((address)a.value(), RelocationHolder::none);
+  // Relocate at the pc of the load.
+  relocate(a.rspec());
+  toc_offset = (int)(oop_address - code()->consts()->start());
+  ld_largeoffset_unchecked(dst, toc_offset, toc, true);
+}
+
+bool MacroAssembler::is_load_const_from_method_toc_at(address a) {
+  const address inst1_addr = a;
+  const int inst1 = *(int *)inst1_addr;
+
+   // The relocation points to the ld or the addis.
+   return (is_ld(inst1)) ||
+          (is_addis(inst1) && inv_ra_field(inst1) != 0);
+}
+
+int MacroAssembler::get_offset_of_load_const_from_method_toc_at(address a) {
+  assert(is_load_const_from_method_toc_at(a), "must be load_const_from_method_toc");
+
+  const address inst1_addr = a;
+  const int inst1 = *(int *)inst1_addr;
+
+  if (is_ld(inst1)) {
+    return inv_d1_field(inst1);
+  } else if (is_addis(inst1)) {
+    const int dst = inv_rt_field(inst1);
+
+    // Now, find the succeeding ld which reads and writes to dst.
+    address inst2_addr = inst1_addr + BytesPerInstWord;
+    int inst2 = 0;
+    while (true) {
+      inst2 = *(int *) inst2_addr;
+      if (is_ld(inst2) && inv_ra_field(inst2) == dst && inv_rt_field(inst2) == dst) {
+        // Stop, found the ld which reads and writes dst.
+        break;
+      }
+      inst2_addr += BytesPerInstWord;
+    }
+    return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
+  }
+  ShouldNotReachHere();
+  return 0;
+}
+
+// Get the constant from a `load_const' sequence.
+long MacroAssembler::get_const(address a) {
+  assert(is_load_const_at(a), "not a load of a constant");
+  const int *p = (const int*) a;
+  unsigned long x = (((unsigned long) (get_imm(a,0) & 0xffff)) << 48);
+  if (is_ori(*(p+1))) {
+    x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 32);
+    x |= (((unsigned long) (get_imm(a,3) & 0xffff)) << 16);
+    x |= (((unsigned long) (get_imm(a,4) & 0xffff)));
+  } else if (is_lis(*(p+1))) {
+    x |= (((unsigned long) (get_imm(a,2) & 0xffff)) << 32);
+    x |= (((unsigned long) (get_imm(a,1) & 0xffff)) << 16);
+    x |= (((unsigned long) (get_imm(a,3) & 0xffff)));
+  } else {
+    ShouldNotReachHere();
+    return (long) 0;
+  }
+  return (long) x;
+}
+
+// Patch the 64 bit constant of a `load_const' sequence. This is a low
+// level procedure. It neither flushes the instruction cache nor is it
+// mt safe.
+void MacroAssembler::patch_const(address a, long x) {
+  assert(is_load_const_at(a), "not a load of a constant");
+  int *p = (int*) a;
+  if (is_ori(*(p+1))) {
+    set_imm(0 + p, (x >> 48) & 0xffff);
+    set_imm(1 + p, (x >> 32) & 0xffff);
+    set_imm(3 + p, (x >> 16) & 0xffff);
+    set_imm(4 + p, x & 0xffff);
+  } else if (is_lis(*(p+1))) {
+    set_imm(0 + p, (x >> 48) & 0xffff);
+    set_imm(2 + p, (x >> 32) & 0xffff);
+    set_imm(1 + p, (x >> 16) & 0xffff);
+    set_imm(3 + p, x & 0xffff);
+  } else {
+    ShouldNotReachHere();
+  }
+}
+
+AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
+  assert(oop_recorder() != NULL, "this assembler needs a Recorder");
+  int index = oop_recorder()->allocate_metadata_index(obj);
+  RelocationHolder rspec = metadata_Relocation::spec(index);
+  return AddressLiteral((address)obj, rspec);
+}
+
+AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
+  assert(oop_recorder() != NULL, "this assembler needs a Recorder");
+  int index = oop_recorder()->find_index(obj);
+  RelocationHolder rspec = metadata_Relocation::spec(index);
+  return AddressLiteral((address)obj, rspec);
+}
+
+AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
+  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
+  int oop_index = oop_recorder()->allocate_oop_index(obj);
+  return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
+}
+
+AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
+  assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
+  int oop_index = oop_recorder()->find_index(obj);
+  return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
+}
+
+RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
+                                                      Register tmp, int offset) {
+  intptr_t value = *delayed_value_addr;
+  if (value != 0) {
+    return RegisterOrConstant(value + offset);
+  }
+
+  // Load indirectly to solve generation ordering problem.
+  // static address, no relocation
+  int simm16_offset = load_const_optimized(tmp, delayed_value_addr, noreg, true);
+  ld(tmp, simm16_offset, tmp); // must be aligned ((xa & 3) == 0)
+
+  if (offset != 0) {
+    addi(tmp, tmp, offset);
+  }
+
+  return RegisterOrConstant(tmp);
+}
+
+#ifndef PRODUCT
+void MacroAssembler::pd_print_patched_instruction(address branch) {
+  Unimplemented(); // TODO: PPC port
+}
+#endif // ndef PRODUCT
+
+// Conditional far branch for destinations encodable in 24+2 bits.
+void MacroAssembler::bc_far(int boint, int biint, Label& dest, int optimize) {
+
+  // If requested by flag optimize, relocate the bc_far as a
+  // runtime_call and prepare for optimizing it when the code gets
+  // relocated.
+  if (optimize == bc_far_optimize_on_relocate) {
+    relocate(relocInfo::runtime_call_type);
+  }
+
+  // variant 2:
+  //
+  //    b!cxx SKIP
+  //    bxx   DEST
+  //  SKIP:
+  //
+
+  const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
+                                                opposite_bcond(inv_boint_bcond(boint)));
+
+  // We emit two branches.
+  // First, a conditional branch which jumps around the far branch.
+  const address not_taken_pc = pc() + 2 * BytesPerInstWord;
+  const address bc_pc        = pc();
+  bc(opposite_boint, biint, not_taken_pc);
+
+  const int bc_instr = *(int*)bc_pc;
+  assert(not_taken_pc == (address)inv_bd_field(bc_instr, (intptr_t)bc_pc), "postcondition");
+  assert(opposite_boint == inv_bo_field(bc_instr), "postcondition");
+  assert(boint == add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(bc_instr))),
+                                     opposite_bcond(inv_boint_bcond(inv_bo_field(bc_instr)))),
+         "postcondition");
+  assert(biint == inv_bi_field(bc_instr), "postcondition");
+
+  // Second, an unconditional far branch which jumps to dest.
+  // Note: target(dest) remembers the current pc (see CodeSection::target)
+  //       and returns the current pc if the label is not bound yet; when
+  //       the label gets bound, the unconditional far branch will be patched.
+  const address target_pc = target(dest);
+  const address b_pc  = pc();
+  b(target_pc);
+
+  assert(not_taken_pc == pc(),                     "postcondition");
+  assert(dest.is_bound() || target_pc == b_pc, "postcondition");
+}
+
+bool MacroAssembler::is_bc_far_at(address instruction_addr) {
+  return is_bc_far_variant1_at(instruction_addr) ||
+         is_bc_far_variant2_at(instruction_addr) ||
+         is_bc_far_variant3_at(instruction_addr);
+}
+
+address MacroAssembler::get_dest_of_bc_far_at(address instruction_addr) {
+  if (is_bc_far_variant1_at(instruction_addr)) {
+    const address instruction_1_addr = instruction_addr;
+    const int instruction_1 = *(int*)instruction_1_addr;
+    return (address)inv_bd_field(instruction_1, (intptr_t)instruction_1_addr);
+  } else if (is_bc_far_variant2_at(instruction_addr)) {
+    const address instruction_2_addr = instruction_addr + 4;
+    return bxx_destination(instruction_2_addr);
+  } else if (is_bc_far_variant3_at(instruction_addr)) {
+    return instruction_addr + 8;
+  }
+  // variant 4 ???
+  ShouldNotReachHere();
+  return NULL;
+}
+void MacroAssembler::set_dest_of_bc_far_at(address instruction_addr, address dest) {
+
+  if (is_bc_far_variant3_at(instruction_addr)) {
+    // variant 3, far cond branch to the next instruction, already patched to nops:
+    //
+    //    nop
+    //    endgroup
+    //  SKIP/DEST:
+    //
+    return;
+  }
+
+  // first, extract boint and biint from the current branch
+  int boint = 0;
+  int biint = 0;
+
+  ResourceMark rm;
+  const int code_size = 2 * BytesPerInstWord;
+  CodeBuffer buf(instruction_addr, code_size);
+  MacroAssembler masm(&buf);
+  if (is_bc_far_variant2_at(instruction_addr) && dest == instruction_addr + 8) {
+    // Far branch to next instruction: Optimize it by patching nops (produce variant 3).
+    masm.nop();
+    masm.endgroup();
+  } else {
+    if (is_bc_far_variant1_at(instruction_addr)) {
+      // variant 1, the 1st instruction contains the destination address:
+      //
+      //    bcxx  DEST
+      //    endgroup
+      //
+      const int instruction_1 = *(int*)(instruction_addr);
+      boint = inv_bo_field(instruction_1);
+      biint = inv_bi_field(instruction_1);
+    } else if (is_bc_far_variant2_at(instruction_addr)) {
+      // variant 2, the 2nd instruction contains the destination address:
+      //
+      //    b!cxx SKIP
+      //    bxx   DEST
+      //  SKIP:
+      //
+      const int instruction_1 = *(int*)(instruction_addr);
+      boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(inv_bo_field(instruction_1))),
+          opposite_bcond(inv_boint_bcond(inv_bo_field(instruction_1))));
+      biint = inv_bi_field(instruction_1);
+    } else {
+      // variant 4???
+      ShouldNotReachHere();
+    }
+
+    // second, set the new branch destination and optimize the code
+    if (dest != instruction_addr + 4 && // the bc_far is still unbound!
+        masm.is_within_range_of_bcxx(dest, instruction_addr)) {
+      // variant 1:
+      //
+      //    bcxx  DEST
+      //    endgroup
+      //
+      masm.bc(boint, biint, dest);
+      masm.endgroup();
+    } else {
+      // variant 2:
+      //
+      //    b!cxx SKIP
+      //    bxx   DEST
+      //  SKIP:
+      //
+      const int opposite_boint = add_bhint_to_boint(opposite_bhint(inv_boint_bhint(boint)),
+                                                    opposite_bcond(inv_boint_bcond(boint)));
+      const address not_taken_pc = masm.pc() + 2 * BytesPerInstWord;
+      masm.bc(opposite_boint, biint, not_taken_pc);
+      masm.b(dest);
+    }
+  }
+  ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
+}
+
+// Emit a NOT mt-safe patchable 64 bit absolute call/jump.
+void MacroAssembler::bxx64_patchable(address dest, relocInfo::relocType rt, bool link) {
+  // get current pc
+  uint64_t start_pc = (uint64_t) pc();
+
+  const address pc_of_bl = (address) (start_pc + (6*BytesPerInstWord)); // bl is last
+  const address pc_of_b  = (address) (start_pc + (0*BytesPerInstWord)); // b is first
+
+  // relocate here
+  if (rt != relocInfo::none) {
+    relocate(rt);
+  }
+
+  if ( ReoptimizeCallSequences &&
+       (( link && is_within_range_of_b(dest, pc_of_bl)) ||
+        (!link && is_within_range_of_b(dest, pc_of_b)))) {
+    // variant 2:
+    // Emit an optimized, pc-relative call/jump.
+
+    if (link) {
+      // some padding
+      nop();
+      nop();
+      nop();
+      nop();
+      nop();
+      nop();
+
+      // do the call
+      assert(pc() == pc_of_bl, "just checking");
+      bl(dest, relocInfo::none);
+    } else {
+      // do the jump
+      assert(pc() == pc_of_b, "just checking");
+      b(dest, relocInfo::none);
+
+      // some padding
+      nop();
+      nop();
+      nop();
+      nop();
+      nop();
+      nop();
+    }
+
+    // Assert that we can identify the emitted call/jump.
+    assert(is_bxx64_patchable_variant2_at((address)start_pc, link),
+           "can't identify emitted call");
+  } else {
+    // variant 1:
+
+    mr(R0, R11);  // spill R11 -> R0.
+
+    // Load the destination address into CTR,
+    // calculate destination relative to global toc.
+    calculate_address_from_global_toc(R11, dest, true, true, false);
+
+    mtctr(R11);
+    mr(R11, R0);  // spill R11 <- R0.
+    nop();
+
+    // do the call/jump
+    if (link) {
+      bctrl();
+    } else{
+      bctr();
+    }
+    // Assert that we can identify the emitted call/jump.
+    assert(is_bxx64_patchable_variant1b_at((address)start_pc, link),
+           "can't identify emitted call");
+  }
+
+  // Assert that we can identify the emitted call/jump.
+  assert(is_bxx64_patchable_at((address)start_pc, link),
+         "can't identify emitted call");
+  assert(get_dest_of_bxx64_patchable_at((address)start_pc, link) == dest,
+         "wrong encoding of dest address");
+}
+
+// Identify a bxx64_patchable instruction.
+bool MacroAssembler::is_bxx64_patchable_at(address instruction_addr, bool link) {
+  return is_bxx64_patchable_variant1b_at(instruction_addr, link)
+    //|| is_bxx64_patchable_variant1_at(instruction_addr, link)
+      || is_bxx64_patchable_variant2_at(instruction_addr, link);
+}
+
+// Does the call64_patchable instruction use a pc-relative encoding of
+// the call destination?
+bool MacroAssembler::is_bxx64_patchable_pcrelative_at(address instruction_addr, bool link) {
+  // variant 2 is pc-relative
+  return is_bxx64_patchable_variant2_at(instruction_addr, link);
+}
+
+// Identify variant 1.
+bool MacroAssembler::is_bxx64_patchable_variant1_at(address instruction_addr, bool link) {
+  unsigned int* instr = (unsigned int*) instruction_addr;
+  return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
+      && is_mtctr(instr[5]) // mtctr
+    && is_load_const_at(instruction_addr);
+}
+
+// Identify variant 1b: load destination relative to global toc.
+bool MacroAssembler::is_bxx64_patchable_variant1b_at(address instruction_addr, bool link) {
+  unsigned int* instr = (unsigned int*) instruction_addr;
+  return (link ? is_bctrl(instr[6]) : is_bctr(instr[6])) // bctr[l]
+    && is_mtctr(instr[3]) // mtctr
+    && is_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord, instruction_addr);
+}
+
+// Identify variant 2.
+bool MacroAssembler::is_bxx64_patchable_variant2_at(address instruction_addr, bool link) {
+  unsigned int* instr = (unsigned int*) instruction_addr;
+  if (link) {
+    return is_bl (instr[6])  // bl dest is last
+      && is_nop(instr[0])  // nop
+      && is_nop(instr[1])  // nop
+      && is_nop(instr[2])  // nop
+      && is_nop(instr[3])  // nop
+      && is_nop(instr[4])  // nop
+      && is_nop(instr[5]); // nop
+  } else {
+    return is_b  (instr[0])  // b  dest is first
+      && is_nop(instr[1])  // nop
+      && is_nop(instr[2])  // nop
+      && is_nop(instr[3])  // nop
+      && is_nop(instr[4])  // nop
+      && is_nop(instr[5])  // nop
+      && is_nop(instr[6]); // nop
+  }
+}
+
+// Set dest address of a bxx64_patchable instruction.
+void MacroAssembler::set_dest_of_bxx64_patchable_at(address instruction_addr, address dest, bool link) {
+  ResourceMark rm;
+  int code_size = MacroAssembler::bxx64_patchable_size;
+  CodeBuffer buf(instruction_addr, code_size);
+  MacroAssembler masm(&buf);
+  masm.bxx64_patchable(dest, relocInfo::none, link);
+  ICache::ppc64_flush_icache_bytes(instruction_addr, code_size);
+}
+
+// Get dest address of a bxx64_patchable instruction.
+address MacroAssembler::get_dest_of_bxx64_patchable_at(address instruction_addr, bool link) {
+  if (is_bxx64_patchable_variant1_at(instruction_addr, link)) {
+    return (address) (unsigned long) get_const(instruction_addr);
+  } else if (is_bxx64_patchable_variant2_at(instruction_addr, link)) {
+    unsigned int* instr = (unsigned int*) instruction_addr;
+    if (link) {
+      const int instr_idx = 6; // bl is last
+      int branchoffset = branch_destination(instr[instr_idx], 0);
+      return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
+    } else {
+      const int instr_idx = 0; // b is first
+      int branchoffset = branch_destination(instr[instr_idx], 0);
+      return instruction_addr + branchoffset + instr_idx*BytesPerInstWord;
+    }
+  // Load dest relative to global toc.
+  } else if (is_bxx64_patchable_variant1b_at(instruction_addr, link)) {
+    return get_address_of_calculate_address_from_global_toc_at(instruction_addr + 2*BytesPerInstWord,
+                                                               instruction_addr);
+  } else {
+    ShouldNotReachHere();
+    return NULL;
+  }
+}
+
+// Uses ordering which corresponds to ABI:
+//    _savegpr0_14:  std  r14,-144(r1)
+//    _savegpr0_15:  std  r15,-136(r1)
+//    _savegpr0_16:  std  r16,-128(r1)
+void MacroAssembler::save_nonvolatile_gprs(Register dst, int offset) {
+  std(R14, offset, dst);   offset += 8;
+  std(R15, offset, dst);   offset += 8;
+  std(R16, offset, dst);   offset += 8;
+  std(R17, offset, dst);   offset += 8;
+  std(R18, offset, dst);   offset += 8;
+  std(R19, offset, dst);   offset += 8;
+  std(R20, offset, dst);   offset += 8;
+  std(R21, offset, dst);   offset += 8;
+  std(R22, offset, dst);   offset += 8;
+  std(R23, offset, dst);   offset += 8;
+  std(R24, offset, dst);   offset += 8;
+  std(R25, offset, dst);   offset += 8;
+  std(R26, offset, dst);   offset += 8;
+  std(R27, offset, dst);   offset += 8;
+  std(R28, offset, dst);   offset += 8;
+  std(R29, offset, dst);   offset += 8;
+  std(R30, offset, dst);   offset += 8;
+  std(R31, offset, dst);   offset += 8;
+
+  stfd(F14, offset, dst);   offset += 8;
+  stfd(F15, offset, dst);   offset += 8;
+  stfd(F16, offset, dst);   offset += 8;
+  stfd(F17, offset, dst);   offset += 8;
+  stfd(F18, offset, dst);   offset += 8;
+  stfd(F19, offset, dst);   offset += 8;
+  stfd(F20, offset, dst);   offset += 8;
+  stfd(F21, offset, dst);   offset += 8;
+  stfd(F22, offset, dst);   offset += 8;
+  stfd(F23, offset, dst);   offset += 8;
+  stfd(F24, offset, dst);   offset += 8;
+  stfd(F25, offset, dst);   offset += 8;
+  stfd(F26, offset, dst);   offset += 8;
+  stfd(F27, offset, dst);   offset += 8;
+  stfd(F28, offset, dst);   offset += 8;
+  stfd(F29, offset, dst);   offset += 8;
+  stfd(F30, offset, dst);   offset += 8;
+  stfd(F31, offset, dst);
+}
+
+// Uses ordering which corresponds to ABI:
+//    _restgpr0_14:  ld   r14,-144(r1)
+//    _restgpr0_15:  ld   r15,-136(r1)
+//    _restgpr0_16:  ld   r16,-128(r1)
+void MacroAssembler::restore_nonvolatile_gprs(Register src, int offset) {
+  ld(R14, offset, src);   offset += 8;
+  ld(R15, offset, src);   offset += 8;
+  ld(R16, offset, src);   offset += 8;
+  ld(R17, offset, src);   offset += 8;
+  ld(R18, offset, src);   offset += 8;
+  ld(R19, offset, src);   offset += 8;
+  ld(R20, offset, src);   offset += 8;
+  ld(R21, offset, src);   offset += 8;
+  ld(R22, offset, src);   offset += 8;
+  ld(R23, offset, src);   offset += 8;
+  ld(R24, offset, src);   offset += 8;
+  ld(R25, offset, src);   offset += 8;
+  ld(R26, offset, src);   offset += 8;
+  ld(R27, offset, src);   offset += 8;
+  ld(R28, offset, src);   offset += 8;
+  ld(R29, offset, src);   offset += 8;
+  ld(R30, offset, src);   offset += 8;
+  ld(R31, offset, src);   offset += 8;
+
+  // FP registers
+  lfd(F14, offset, src);   offset += 8;
+  lfd(F15, offset, src);   offset += 8;
+  lfd(F16, offset, src);   offset += 8;
+  lfd(F17, offset, src);   offset += 8;
+  lfd(F18, offset, src);   offset += 8;
+  lfd(F19, offset, src);   offset += 8;
+  lfd(F20, offset, src);   offset += 8;
+  lfd(F21, offset, src);   offset += 8;
+  lfd(F22, offset, src);   offset += 8;
+  lfd(F23, offset, src);   offset += 8;
+  lfd(F24, offset, src);   offset += 8;
+  lfd(F25, offset, src);   offset += 8;
+  lfd(F26, offset, src);   offset += 8;
+  lfd(F27, offset, src);   offset += 8;
+  lfd(F28, offset, src);   offset += 8;
+  lfd(F29, offset, src);   offset += 8;
+  lfd(F30, offset, src);   offset += 8;
+  lfd(F31, offset, src);
+}
+
+// For verify_oops.
+void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
+  std(R3,  offset, dst);   offset += 8;
+  std(R4,  offset, dst);   offset += 8;
+  std(R5,  offset, dst);   offset += 8;
+  std(R6,  offset, dst);   offset += 8;
+  std(R7,  offset, dst);   offset += 8;
+  std(R8,  offset, dst);   offset += 8;
+  std(R9,  offset, dst);   offset += 8;
+  std(R10, offset, dst);   offset += 8;
+  std(R11, offset, dst);   offset += 8;
+  std(R12, offset, dst);
+}
+
+// For verify_oops.
+void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
+  ld(R3,  offset, src);   offset += 8;
+  ld(R4,  offset, src);   offset += 8;
+  ld(R5,  offset, src);   offset += 8;
+  ld(R6,  offset, src);   offset += 8;
+  ld(R7,  offset, src);   offset += 8;
+  ld(R8,  offset, src);   offset += 8;
+  ld(R9,  offset, src);   offset += 8;
+  ld(R10, offset, src);   offset += 8;
+  ld(R11, offset, src);   offset += 8;
+  ld(R12, offset, src);
+}
+
+void MacroAssembler::save_LR_CR(Register tmp) {
+  mfcr(tmp);
+  std(tmp, _abi(cr), R1_SP);
+  mflr(tmp);
+  std(tmp, _abi(lr), R1_SP);
+  // Tmp must contain lr on exit! (see return_addr and prolog in ppc64.ad)
+}
+
+void MacroAssembler::restore_LR_CR(Register tmp) {
+  assert(tmp != R1_SP, "must be distinct");
+  ld(tmp, _abi(lr), R1_SP);
+  mtlr(tmp);
+  ld(tmp, _abi(cr), R1_SP);
+  mtcr(tmp);
+}
+
+address MacroAssembler::get_PC_trash_LR(Register result) {
+  Label L;
+  bl(L);
+  bind(L);
+  address lr_pc = pc();
+  mflr(result);
+  return lr_pc;
+}
+
+void MacroAssembler::resize_frame(Register offset, Register tmp) {
+#ifdef ASSERT
+  assert_different_registers(offset, tmp, R1_SP);
+  andi_(tmp, offset, frame::alignment_in_bytes-1);
+  asm_assert_eq("resize_frame: unaligned", 0x204);
+#endif
+
+  // tmp <- *(SP)
+  ld(tmp, _abi(callers_sp), R1_SP);
+  // addr <- SP + offset;
+  // *(addr) <- tmp;
+  // SP <- addr
+  stdux(tmp, R1_SP, offset);
+}
+
+void MacroAssembler::resize_frame(int offset, Register tmp) {
+  assert(is_simm(offset, 16), "too big an offset");
+  assert_different_registers(tmp, R1_SP);
+  assert((offset & (frame::alignment_in_bytes-1))==0, "resize_frame: unaligned");
+  // tmp <- *(SP)
+  ld(tmp, _abi(callers_sp), R1_SP);
+  // addr <- SP + offset;
+  // *(addr) <- tmp;
+  // SP <- addr
+  stdu(tmp, offset, R1_SP);
+}
+
+void MacroAssembler::resize_frame_absolute(Register addr, Register tmp1, Register tmp2) {
+  // (addr == tmp1) || (addr == tmp2) is allowed here!
+  assert(tmp1 != tmp2, "must be distinct");
+
+  // compute offset w.r.t. current stack pointer
+  // tmp_1 <- addr - SP (!)
+  subf(tmp1, R1_SP, addr);
+
+  // atomically update SP keeping back link.
+  resize_frame(tmp1/* offset */, tmp2/* tmp */);
+}
+
+void MacroAssembler::push_frame(Register bytes, Register tmp) {
+#ifdef ASSERT
+  assert(bytes != R0, "r0 not allowed here");
+  andi_(R0, bytes, frame::alignment_in_bytes-1);
+  asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203);
+#endif
+  neg(tmp, bytes);
+  stdux(R1_SP, R1_SP, tmp);
+}
+
+// Push a frame of size `bytes'.
+void MacroAssembler::push_frame(unsigned int bytes, Register tmp) {
+  long offset = align_addr(bytes, frame::alignment_in_bytes);
+  if (is_simm(-offset, 16)) {
+    stdu(R1_SP, -offset, R1_SP);
+  } else {
+    load_const(tmp, -offset);
+    stdux(R1_SP, R1_SP, tmp);
+  }
+}
+
+// Push a frame of size `bytes' plus abi112 on top.
+void MacroAssembler::push_frame_abi112(unsigned int bytes, Register tmp) {
+  push_frame(bytes + frame::abi_112_size, tmp);
+}
+
+// Setup up a new C frame with a spill area for non-volatile GPRs and
+// additional space for local variables.
+void MacroAssembler::push_frame_abi112_nonvolatiles(unsigned int bytes,
+                                                    Register tmp) {
+  push_frame(bytes + frame::abi_112_size + frame::spill_nonvolatiles_size, tmp);
+}
+
+// Pop current C frame.
+void MacroAssembler::pop_frame() {
+  ld(R1_SP, _abi(callers_sp), R1_SP);
+}
+
+// Generic version of a call to C function via a function descriptor
+// with variable support for C calling conventions (TOC, ENV, etc.).
+// Updates and returns _last_calls_return_pc.
+address MacroAssembler::branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
+                                  bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee) {
+  // we emit standard ptrgl glue code here
+  assert((function_descriptor != R0), "function_descriptor cannot be R0");
+
+  // retrieve necessary entries from the function descriptor
+  ld(R0, in_bytes(FunctionDescriptor::entry_offset()), function_descriptor);
+  mtctr(R0);
+
+  if (load_toc_of_callee) {
+    ld(R2_TOC, in_bytes(FunctionDescriptor::toc_offset()), function_descriptor);
+  }
+  if (load_env_of_callee) {
+    ld(R11, in_bytes(FunctionDescriptor::env_offset()), function_descriptor);
+  } else if (load_toc_of_callee) {
+    li(R11, 0);
+  }
+
+  // do a call or a branch
+  if (and_link) {
+    bctrl();
+  } else {
+    bctr();
+  }
+  _last_calls_return_pc = pc();
+
+  return _last_calls_return_pc;
+}
+
+// Call a C function via a function descriptor and use full C calling
+// conventions.
+// We don't use the TOC in generated code, so there is no need to save
+// and restore its value.
+address MacroAssembler::call_c(Register fd) {
+  return branch_to(fd, /*and_link=*/true,
+                       /*save toc=*/false,
+                       /*restore toc=*/false,
+                       /*load toc=*/true,
+                       /*load env=*/true);
+}
+
+address MacroAssembler::call_c_and_return_to_caller(Register fd) {
+  return branch_to(fd, /*and_link=*/false,
+                       /*save toc=*/false,
+                       /*restore toc=*/false,
+                       /*load toc=*/true,
+                       /*load env=*/true);
+}
+
+address MacroAssembler::call_c(const FunctionDescriptor* fd, relocInfo::relocType rt) {
+  if (rt != relocInfo::none) {
+    // this call needs to be relocatable
+    if (!ReoptimizeCallSequences
+        || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
+        || fd == NULL   // support code-size estimation
+        || !fd->is_friend_function()
+        || fd->entry() == NULL) {
+      // it's not a friend function as defined by class FunctionDescriptor,
+      // so do a full call-c here.
+      load_const(R11, (address)fd, R0);
+
+      bool has_env = (fd != NULL && fd->env() != NULL);
+      return branch_to(R11, /*and_link=*/true,
+                            /*save toc=*/false,
+                            /*restore toc=*/false,
+                            /*load toc=*/true,
+                            /*load env=*/has_env);
+    } else {
+      // It's a friend function. Load the entry point and don't care about
+      // toc and env. Use an optimizable call instruction, but ensure the
+      // same code-size as in the case of a non-friend function.
+      nop();
+      nop();
+      nop();
+      bl64_patchable(fd->entry(), rt);
+      _last_calls_return_pc = pc();
+      return _last_calls_return_pc;
+    }
+  } else {
+    // This call does not need to be relocatable, do more aggressive
+    // optimizations.
+    if (!ReoptimizeCallSequences
+      || !fd->is_friend_function()) {
+      // It's not a friend function as defined by class FunctionDescriptor,
+      // so do a full call-c here.
+      load_const(R11, (address)fd, R0);
+      return branch_to(R11, /*and_link=*/true,
+                            /*save toc=*/false,
+                            /*restore toc=*/false,
+                            /*load toc=*/true,
+                            /*load env=*/true);
+    } else {
+      // it's a friend function, load the entry point and don't care about
+      // toc and env.
+      address dest = fd->entry();
+      if (is_within_range_of_b(dest, pc())) {
+        bl(dest);
+      } else {
+        bl64_patchable(dest, rt);
+      }
+      _last_calls_return_pc = pc();
+      return _last_calls_return_pc;
+    }
+  }
+}
+
+// Call a C function.  All constants needed reside in TOC.
+//
+// Read the address to call from the TOC.
+// Read env from TOC, if fd specifies an env.
+// Read new TOC from TOC.
+address MacroAssembler::call_c_using_toc(const FunctionDescriptor* fd,
+                                         relocInfo::relocType rt, Register toc) {
+  if (!ReoptimizeCallSequences
+    || (rt != relocInfo::runtime_call_type && rt != relocInfo::none)
+    || !fd->is_friend_function()) {
+    // It's not a friend function as defined by class FunctionDescriptor,
+    // so do a full call-c here.
+    assert(fd->entry() != NULL, "function must be linked");
+
+    AddressLiteral fd_entry(fd->entry());
+    load_const_from_method_toc(R11, fd_entry, toc);
+    mtctr(R11);
+    if (fd->env() == NULL) {
+      li(R11, 0);
+      nop();
+    } else {
+      AddressLiteral fd_env(fd->env());
+      load_const_from_method_toc(R11, fd_env, toc);
+    }
+    AddressLiteral fd_toc(fd->toc());
+    load_toc_from_toc(R2_TOC, fd_toc, toc);
+    // R2_TOC is killed.
+    bctrl();
+    _last_calls_return_pc = pc();
+  } else {
+    // It's a friend function, load the entry point and don't care about
+    // toc and env. Use an optimizable call instruction, but ensure the
+    // same code-size as in the case of a non-friend function.
+    nop();
+    bl64_patchable(fd->entry(), rt);
+    _last_calls_return_pc = pc();
+  }
+  return _last_calls_return_pc;
+}
+
+void MacroAssembler::call_VM_base(Register oop_result,
+                                  Register last_java_sp,
+                                  address  entry_point,
+                                  bool     check_exceptions) {
+  BLOCK_COMMENT("call_VM {");
+  // Determine last_java_sp register.
+  if (!last_java_sp->is_valid()) {
+    last_java_sp = R1_SP;
+  }
+  set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, R11_scratch1);
+
+  // ARG1 must hold thread address.
+  mr(R3_ARG1, R16_thread);
+
+  address return_pc = call_c((FunctionDescriptor*)entry_point, relocInfo::none);
+
+  reset_last_Java_frame();
+
+  // Check for pending exceptions.
+  if (check_exceptions) {
+    // We don't check for exceptions here.
+    ShouldNotReachHere();
+  }
+
+  // Get oop result if there is one and reset the value in the thread.
+  if (oop_result->is_valid()) {
+    get_vm_result(oop_result);
+  }
+
+  _last_calls_return_pc = return_pc;
+  BLOCK_COMMENT("} call_VM");
+}
+
+void MacroAssembler::call_VM_leaf_base(address entry_point) {
+  BLOCK_COMMENT("call_VM_leaf {");
+  call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::none);
+  BLOCK_COMMENT("} call_VM_leaf");
+}
+
+void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
+  call_VM_base(oop_result, noreg, entry_point, check_exceptions);
+}
+
+void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1,
+                             bool check_exceptions) {
+  // R3_ARG1 is reserved for the thread.
+  mr_if_needed(R4_ARG2, arg_1);
+  call_VM(oop_result, entry_point, check_exceptions);
+}
+
+void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
+                             bool check_exceptions) {
+  // R3_ARG1 is reserved for the thread
+  mr_if_needed(R4_ARG2, arg_1);
+  assert(arg_2 != R4_ARG2, "smashed argument");
+  mr_if_needed(R5_ARG3, arg_2);
+  call_VM(oop_result, entry_point, check_exceptions);
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point) {
+  call_VM_leaf_base(entry_point);
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
+  mr_if_needed(R3_ARG1, arg_1);
+  call_VM_leaf(entry_point);
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
+  mr_if_needed(R3_ARG1, arg_1);
+  assert(arg_2 != R3_ARG1, "smashed argument");
+  mr_if_needed(R4_ARG2, arg_2);
+  call_VM_leaf(entry_point);
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
+  mr_if_needed(R3_ARG1, arg_1);
+  assert(arg_2 != R3_ARG1, "smashed argument");
+  mr_if_needed(R4_ARG2, arg_2);
+  assert(arg_3 != R3_ARG1 && arg_3 != R4_ARG2, "smashed argument");
+  mr_if_needed(R5_ARG3, arg_3);
+  call_VM_leaf(entry_point);
+}
+
+// Check whether instruction is a read access to the polling page
+// which was emitted by load_from_polling_page(..).
+bool MacroAssembler::is_load_from_polling_page(int instruction, void* ucontext,
+                                               address* polling_address_ptr) {
+  if (!is_ld(instruction))
+    return false; // It's not a ld. Fail.
+
+  int rt = inv_rt_field(instruction);
+  int ra = inv_ra_field(instruction);
+  int ds = inv_ds_field(instruction);
+  if (!(ds == 0 && ra != 0 && rt == 0)) {
+    return false; // It's not a ld(r0, X, ra). Fail.
+  }
+
+  if (!ucontext) {
+    // Set polling address.
+    if (polling_address_ptr != NULL) {
+      *polling_address_ptr = NULL;
+    }
+    return true; // No ucontext given. Can't check value of ra. Assume true.
+  }
+
+#ifdef LINUX
+  // Ucontext given. Check that register ra contains the address of
+  // the safepoing polling page.
+  ucontext_t* uc = (ucontext_t*) ucontext;
+  // Set polling address.
+  address addr = (address)uc->uc_mcontext.regs->gpr[ra] + (ssize_t)ds;
+  if (polling_address_ptr != NULL) {
+    *polling_address_ptr = addr;
+  }
+  return os::is_poll_address(addr);
+#else
+  // Not on Linux, ucontext must be NULL.
+  ShouldNotReachHere();
+  return false;
+#endif
+}
+
+bool MacroAssembler::is_memory_serialization(int instruction, JavaThread* thread, void* ucontext) {
+#ifdef LINUX
+  ucontext_t* uc = (ucontext_t*) ucontext;
+
+  if (is_stwx(instruction) || is_stwux(instruction)) {
+    int ra = inv_ra_field(instruction);
+    int rb = inv_rb_field(instruction);
+
+    // look up content of ra and rb in ucontext
+    address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
+    long rb_val=(long)uc->uc_mcontext.regs->gpr[rb];
+    return os::is_memory_serialize_page(thread, ra_val+rb_val);
+  } else if (is_stw(instruction) || is_stwu(instruction)) {
+    int ra = inv_ra_field(instruction);
+    int d1 = inv_d1_field(instruction);
+
+    // look up content of ra in ucontext
+    address ra_val=(address)uc->uc_mcontext.regs->gpr[ra];
+    return os::is_memory_serialize_page(thread, ra_val+d1);
+  } else {
+    return false;
+  }
+#else
+  // workaround not needed on !LINUX :-)
+  ShouldNotCallThis();
+  return false;
+#endif
+}
+
+void MacroAssembler::bang_stack_with_offset(int offset) {
+  // When increasing the stack, the old stack pointer will be written
+  // to the new top of stack according to the PPC64 abi.
+  // Therefore, stack banging is not necessary when increasing
+  // the stack by <= os::vm_page_size() bytes.
+  // When increasing the stack by a larger amount, this method is
+  // called repeatedly to bang the intermediate pages.
+
+  // Stack grows down, caller passes positive offset.
+  assert(offset > 0, "must bang with positive offset");
+
+  long stdoffset = -offset;
+
+  if (is_simm(stdoffset, 16)) {
+    // Signed 16 bit offset, a simple std is ok.
+    if (UseLoadInstructionsForStackBangingPPC64) {
+      ld(R0, (int)(signed short)stdoffset, R1_SP);
+    } else {
+      std(R0,(int)(signed short)stdoffset, R1_SP);
+    }
+  } else if (is_simm(stdoffset, 31)) {
+    const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
+    const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
+
+    Register tmp = R11;
+    addis(tmp, R1_SP, hi);
+    if (UseLoadInstructionsForStackBangingPPC64) {
+      ld(R0,  lo, tmp);
+    } else {
+      std(R0, lo, tmp);
+    }
+  } else {
+    ShouldNotReachHere();
+  }
+}
+
+// If instruction is a stack bang of the form
+//    std    R0,    x(Ry),       (see bang_stack_with_offset())
+//    stdu   R1_SP, x(R1_SP),    (see push_frame(), resize_frame())
+// or stdux  R1_SP, Rx, R1_SP    (see push_frame(), resize_frame())
+// return the banged address. Otherwise, return 0.
+address MacroAssembler::get_stack_bang_address(int instruction, void *ucontext) {
+#ifdef LINUX
+  ucontext_t* uc = (ucontext_t*) ucontext;
+  int rs = inv_rs_field(instruction);
+  int ra = inv_ra_field(instruction);
+  if (   (is_ld(instruction)   && rs == 0 &&  UseLoadInstructionsForStackBangingPPC64)
+      || (is_std(instruction)  && rs == 0 && !UseLoadInstructionsForStackBangingPPC64)
+      || (is_stdu(instruction) && rs == 1)) {
+    int ds = inv_ds_field(instruction);
+    // return banged address
+    return ds+(address)uc->uc_mcontext.regs->gpr[ra];
+  } else if (is_stdux(instruction) && rs == 1) {
+    int rb = inv_rb_field(instruction);
+    address sp = (address)uc->uc_mcontext.regs->gpr[1];
+    long rb_val = (long)uc->uc_mcontext.regs->gpr[rb];
+    return ra != 1 || rb_val >= 0 ? NULL         // not a stack bang
+                                  : sp + rb_val; // banged address
+  }
+  return NULL; // not a stack bang
+#else
+  // workaround not needed on !LINUX :-)
+  ShouldNotCallThis();
+  return NULL;
+#endif
+}
+
+// CmpxchgX sets condition register to cmpX(current, compare).
+void MacroAssembler::cmpxchgw(ConditionRegister flag, Register dest_current_value,
+                              Register compare_value, Register exchange_value,
+                              Register addr_base, int semantics, bool cmpxchgx_hint,
+                              Register int_flag_success, bool contention_hint) {
+  Label retry;
+  Label failed;
+  Label done;
+
+  // Save one branch if result is returned via register and
+  // result register is different from the other ones.
+  bool use_result_reg    = (int_flag_success != noreg);
+  bool preset_result_reg = (int_flag_success != dest_current_value && int_flag_success != compare_value &&
+                            int_flag_success != exchange_value && int_flag_success != addr_base);
+
+  // release/fence semantics
+  if (semantics & MemBarRel) {
+    release();
+  }
+
+  if (use_result_reg && preset_result_reg) {
+    li(int_flag_success, 0); // preset (assume cas failed)
+  }
+
+  // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
+  if (contention_hint) { // Don't try to reserve if cmp fails.
+    lwz(dest_current_value, 0, addr_base);
+    cmpw(flag, dest_current_value, compare_value);
+    bne(flag, failed);
+  }
+
+  // atomic emulation loop
+  bind(retry);
+
+  lwarx(dest_current_value, addr_base, cmpxchgx_hint);
+  cmpw(flag, dest_current_value, compare_value);
+  if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
+    bne_predict_not_taken(flag, failed);
+  } else {
+    bne(                  flag, failed);
+  }
+  // branch to done  => (flag == ne), (dest_current_value != compare_value)
+  // fall through    => (flag == eq), (dest_current_value == compare_value)
+
+  stwcx_(exchange_value, addr_base);
+  if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
+    bne_predict_not_taken(CCR0, retry); // StXcx_ sets CCR0.
+  } else {
+    bne(                  CCR0, retry); // StXcx_ sets CCR0.
+  }
+  // fall through    => (flag == eq), (dest_current_value == compare_value), (swapped)
+
+  // Result in register (must do this at the end because int_flag_success can be the
+  // same register as one above).
+  if (use_result_reg) {
+    li(int_flag_success, 1);
+  }
+
+  if (semantics & MemBarFenceAfter) {
+    fence();
+  } else if (semantics & MemBarAcq) {
+    isync();
+  }
+
+  if (use_result_reg && !preset_result_reg) {
+    b(done);
+  }
+
+  bind(failed);
+  if (use_result_reg && !preset_result_reg) {
+    li(int_flag_success, 0);
+  }
+
+  bind(done);
+  // (flag == ne) => (dest_current_value != compare_value), (!swapped)
+  // (flag == eq) => (dest_current_value == compare_value), ( swapped)
+}
+
+// Preforms atomic compare exchange:
+//   if (compare_value == *addr_base)
+//     *addr_base = exchange_value
+//     int_flag_success = 1;
+//   else
+//     int_flag_success = 0;
+//
+// ConditionRegister flag       = cmp(compare_value, *addr_base)
+// Register dest_current_value  = *addr_base
+// Register compare_value       Used to compare with value in memory
+// Register exchange_value      Written to memory if compare_value == *addr_base
+// Register addr_base           The memory location to compareXChange
+// Register int_flag_success    Set to 1 if exchange_value was written to *addr_base
+//
+// To avoid the costly compare exchange the value is tested beforehand.
+// Several special cases exist to avoid that unnecessary information is generated.
+//
+void MacroAssembler::cmpxchgd(ConditionRegister flag,
+                              Register dest_current_value, Register compare_value, Register exchange_value,
+                              Register addr_base, int semantics, bool cmpxchgx_hint,
+                              Register int_flag_success, Label* failed_ext, bool contention_hint) {
+  Label retry;
+  Label failed_int;
+  Label& failed = (failed_ext != NULL) ? *failed_ext : failed_int;
+  Label done;
+
+  // Save one branch if result is returned via register and result register is different from the other ones.
+  bool use_result_reg    = (int_flag_success!=noreg);
+  bool preset_result_reg = (int_flag_success!=dest_current_value && int_flag_success!=compare_value &&
+                            int_flag_success!=exchange_value && int_flag_success!=addr_base);
+  assert(int_flag_success == noreg || failed_ext == NULL, "cannot have both");
+
+  // release/fence semantics
+  if (semantics & MemBarRel) {
+    release();
+  }
+
+  if (use_result_reg && preset_result_reg) {
+    li(int_flag_success, 0); // preset (assume cas failed)
+  }
+
+  // Add simple guard in order to reduce risk of starving under high contention (recommended by IBM).
+  if (contention_hint) { // Don't try to reserve if cmp fails.
+    ld(dest_current_value, 0, addr_base);
+    cmpd(flag, dest_current_value, compare_value);
+    bne(flag, failed);
+  }
+
+  // atomic emulation loop
+  bind(retry);
+
+  ldarx(dest_current_value, addr_base, cmpxchgx_hint);
+  cmpd(flag, dest_current_value, compare_value);
+  if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
+    bne_predict_not_taken(flag, failed);
+  } else {
+    bne(                  flag, failed);
+  }
+
+  stdcx_(exchange_value, addr_base);
+  if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
+    bne_predict_not_taken(CCR0, retry); // stXcx_ sets CCR0
+  } else {
+    bne(                  CCR0, retry); // stXcx_ sets CCR0
+  }
+
+  // result in register (must do this at the end because int_flag_success can be the same register as one above)
+  if (use_result_reg) {
+    li(int_flag_success, 1);
+  }
+
+  // POWER6 doesn't need isync in CAS.
+  // Always emit isync to be on the safe side.
+  if (semantics & MemBarFenceAfter) {
+    fence();
+  } else if (semantics & MemBarAcq) {
+    isync();
+  }
+
+  if (use_result_reg && !preset_result_reg) {
+    b(done);
+  }
+
+  bind(failed_int);
+  if (use_result_reg && !preset_result_reg) {
+    li(int_flag_success, 0);
+  }
+
+  bind(done);
+  // (flag == ne) => (dest_current_value != compare_value), (!swapped)
+  // (flag == eq) => (dest_current_value == compare_value), ( swapped)
+}
+
+// Look up the method for a megamorphic invokeinterface call.
+// The target method is determined by <intf_klass, itable_index>.
+// The receiver klass is in recv_klass.
+// On success, the result will be in method_result, and execution falls through.
+// On failure, execution transfers to the given label.
+void MacroAssembler::lookup_interface_method(Register recv_klass,
+                                             Register intf_klass,
+                                             RegisterOrConstant itable_index,
+                                             Register method_result,
+                                             Register scan_temp,
+                                             Register sethi_temp,
+                                             Label& L_no_such_interface) {
+  assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
+  assert(itable_index.is_constant() || itable_index.as_register() == method_result,
+         "caller must use same register for non-constant itable index as for method");
+
+  // Compute start of first itableOffsetEntry (which is at the end of the vtable).
+  int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
+  int itentry_off = itableMethodEntry::method_offset_in_bytes();
+  int logMEsize   = exact_log2(itableMethodEntry::size() * wordSize);
+  int scan_step   = itableOffsetEntry::size() * wordSize;
+  int log_vte_size= exact_log2(vtableEntry::size() * wordSize);
+
+  lwz(scan_temp, InstanceKlass::vtable_length_offset() * wordSize, recv_klass);
+  // %%% We should store the aligned, prescaled offset in the klassoop.
+  // Then the next several instructions would fold away.
+
+  sldi(scan_temp, scan_temp, log_vte_size);
+  addi(scan_temp, scan_temp, vtable_base);
+  add(scan_temp, recv_klass, scan_temp);
+
+  // Adjust recv_klass by scaled itable_index, so we can free itable_index.
+  if (itable_index.is_register()) {
+    Register itable_offset = itable_index.as_register();
+    sldi(itable_offset, itable_offset, logMEsize);
+    if (itentry_off) addi(itable_offset, itable_offset, itentry_off);
+    add(recv_klass, itable_offset, recv_klass);
+  } else {
+    long itable_offset = (long)itable_index.as_constant();
+    load_const_optimized(sethi_temp, (itable_offset<<logMEsize)+itentry_off); // static address, no relocation
+    add(recv_klass, sethi_temp, recv_klass);
+  }
+
+  // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
+  //   if (scan->interface() == intf) {
+  //     result = (klass + scan->offset() + itable_index);
+  //   }
+  // }
+  Label search, found_method;
+
+  for (int peel = 1; peel >= 0; peel--) {
+    // %%%% Could load both offset and interface in one ldx, if they were
+    // in the opposite order. This would save a load.
+    ld(method_result, itableOffsetEntry::interface_offset_in_bytes(), scan_temp);
+
+    // Check that this entry is non-null. A null entry means that
+    // the receiver class doesn't implement the interface, and wasn't the
+    // same as when the caller was compiled.
+    cmpd(CCR0, method_result, intf_klass);
+
+    if (peel) {
+      beq(CCR0, found_method);
+    } else {
+      bne(CCR0, search);
+      // (invert the test to fall through to found_method...)
+    }
+
+    if (!peel) break;
+
+    bind(search);
+
+    cmpdi(CCR0, method_result, 0);
+    beq(CCR0, L_no_such_interface);
+    addi(scan_temp, scan_temp, scan_step);
+  }
+
+  bind(found_method);
+
+  // Got a hit.
+  int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
+  lwz(scan_temp, ito_offset, scan_temp);
+  ldx(method_result, scan_temp, recv_klass);
+}
+
+// virtual method calling
+void MacroAssembler::lookup_virtual_method(Register recv_klass,
+                                           RegisterOrConstant vtable_index,
+                                           Register method_result) {
+
+  assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
+
+  const int base = InstanceKlass::vtable_start_offset() * wordSize;
+  assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
+
+  if (vtable_index.is_register()) {
+    sldi(vtable_index.as_register(), vtable_index.as_register(), LogBytesPerWord);
+    add(recv_klass, vtable_index.as_register(), recv_klass);
+  } else {
+    addi(recv_klass, recv_klass, vtable_index.as_constant() << LogBytesPerWord);
+  }
+  ld(R19_method, base + vtableEntry::method_offset_in_bytes(), recv_klass);
+}
+
+/////////////////////////////////////////// subtype checking ////////////////////////////////////////////
+
+void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
+                                                   Register super_klass,
+                                                   Register temp1_reg,
+                                                   Register temp2_reg,
+                                                   Label& L_success,
+                                                   Label& L_failure) {
+
+  const Register check_cache_offset = temp1_reg;
+  const Register cached_super       = temp2_reg;
+
+  assert_different_registers(sub_klass, super_klass, check_cache_offset, cached_super);
+
+  int sco_offset = in_bytes(Klass::super_check_offset_offset());
+  int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
+
+  // If the pointers are equal, we are done (e.g., String[] elements).
+  // This self-check enables sharing of secondary supertype arrays among
+  // non-primary types such as array-of-interface. Otherwise, each such
+  // type would need its own customized SSA.
+  // We move this check to the front of the fast path because many
+  // type checks are in fact trivially successful in this manner,
+  // so we get a nicely predicted branch right at the start of the check.
+  cmpd(CCR0, sub_klass, super_klass);
+  beq(CCR0, L_success);
+
+  // Check the supertype display:
+  lwz(check_cache_offset, sco_offset, super_klass);
+  // The loaded value is the offset from KlassOopDesc.
+
+  ldx(cached_super, check_cache_offset, sub_klass);
+  cmpd(CCR0, cached_super, super_klass);
+  beq(CCR0, L_success);
+
+  // This check has worked decisively for primary supers.
+  // Secondary supers are sought in the super_cache ('super_cache_addr').
+  // (Secondary supers are interfaces and very deeply nested subtypes.)
+  // This works in the same check above because of a tricky aliasing
+  // between the super_cache and the primary super display elements.
+  // (The 'super_check_addr' can address either, as the case requires.)
+  // Note that the cache is updated below if it does not help us find
+  // what we need immediately.
+  // So if it was a primary super, we can just fail immediately.
+  // Otherwise, it's the slow path for us (no success at this point).
+
+  cmpwi(CCR0, check_cache_offset, sc_offset);
+  bne(CCR0, L_failure);
+  // bind(slow_path); // fallthru
+}
+
+void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
+                                                   Register super_klass,
+                                                   Register temp1_reg,
+                                                   Register temp2_reg,
+                                                   Label* L_success,
+                                                   Register result_reg) {
+  const Register array_ptr = temp1_reg; // current value from cache array
+  const Register temp      = temp2_reg;
+
+  assert_different_registers(sub_klass, super_klass, array_ptr, temp);
+
+  int source_offset = in_bytes(Klass::secondary_supers_offset());
+  int target_offset = in_bytes(Klass::secondary_super_cache_offset());
+
+  int length_offset = Array<Klass*>::length_offset_in_bytes();
+  int base_offset   = Array<Klass*>::base_offset_in_bytes();
+
+  Label hit, loop, failure, fallthru;
+
+  ld(array_ptr, source_offset, sub_klass);
+
+  //assert(4 == arrayOopDesc::length_length_in_bytes(), "precondition violated.");
+  lwz(temp, length_offset, array_ptr);
+  cmpwi(CCR0, temp, 0);
+  beq(CCR0, result_reg!=noreg ? failure : fallthru); // length 0
+
+  mtctr(temp); // load ctr
+
+  bind(loop);
+  // Oops in table are NO MORE compressed.
+  ld(temp, base_offset, array_ptr);
+  cmpd(CCR0, temp, super_klass);
+  beq(CCR0, hit);
+  addi(array_ptr, array_ptr, BytesPerWord);
+  bdnz(loop);
+
+  bind(failure);
+  if (result_reg!=noreg) li(result_reg, 1); // load non-zero result (indicates a miss)
+  b(fallthru);
+
+  bind(hit);
+  std(super_klass, target_offset, sub_klass); // save result to cache
+  if (result_reg != noreg) li(result_reg, 0); // load zero result (indicates a hit)
+  if (L_success != NULL) b(*L_success);
+
+  bind(fallthru);
+}
+
+// Try fast path, then go to slow one if not successful
+void MacroAssembler::check_klass_subtype(Register sub_klass,
+                         Register super_klass,
+                         Register temp1_reg,
+                         Register temp2_reg,
+                         Label& L_success) {
+  Label L_failure;
+  check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg, temp2_reg, L_success, L_failure);
+  check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg, &L_success);
+  bind(L_failure); // Fallthru if not successful.
+}
+
+void MacroAssembler::check_method_handle_type(Register mtype_reg, Register mh_reg,
+                                              Register temp_reg,
+                                              Label& wrong_method_type) {
+  assert_different_registers(mtype_reg, mh_reg, temp_reg);
+  // Compare method type against that of the receiver.
+  load_heap_oop_not_null(temp_reg, delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg), mh_reg);
+  cmpd(CCR0, temp_reg, mtype_reg);
+  bne(CCR0, wrong_method_type);
+}
+
+RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
+                                                   Register temp_reg,
+                                                   int extra_slot_offset) {
+  // cf. TemplateTable::prepare_invoke(), if (load_receiver).
+  int stackElementSize = Interpreter::stackElementSize;
+  int offset = extra_slot_offset * stackElementSize;
+  if (arg_slot.is_constant()) {
+    offset += arg_slot.as_constant() * stackElementSize;
+    return offset;
+  } else {
+    assert(temp_reg != noreg, "must specify");
+    sldi(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize));
+    if (offset != 0)
+      addi(temp_reg, temp_reg, offset);
+    return temp_reg;
+  }
+}
+
+void MacroAssembler::biased_locking_enter(ConditionRegister cr_reg, Register obj_reg,
+                                          Register mark_reg, Register temp_reg,
+                                          Register temp2_reg, Label& done, Label* slow_case) {
+  assert(UseBiasedLocking, "why call this otherwise?");
+
+#ifdef ASSERT
+  assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
+#endif
+
+  Label cas_label;
+
+  // Branch to done if fast path fails and no slow_case provided.
+  Label *slow_case_int = (slow_case != NULL) ? slow_case : &done;
+
+  // Biased locking
+  // See whether the lock is currently biased toward our thread and
+  // whether the epoch is still valid
+  // Note that the runtime guarantees sufficient alignment of JavaThread
+  // pointers to allow age to be placed into low bits
+  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits,
+         "biased locking makes assumptions about bit layout");
+
+  if (PrintBiasedLockingStatistics) {
+    load_const(temp_reg, (address) BiasedLocking::total_entry_count_addr(), temp2_reg);
+    lwz(temp2_reg, 0, temp_reg);
+    addi(temp2_reg, temp2_reg, 1);
+    stw(temp2_reg, 0, temp_reg);
+  }
+
+  andi(temp_reg, mark_reg, markOopDesc::biased_lock_mask_in_place);
+  cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
+  bne(cr_reg, cas_label);
+
+  load_klass_with_trap_null_check(temp_reg, obj_reg);
+
+  load_const_optimized(temp2_reg, ~((int) markOopDesc::age_mask_in_place));
+  ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
+  orr(temp_reg, R16_thread, temp_reg);
+  xorr(temp_reg, mark_reg, temp_reg);
+  andr(temp_reg, temp_reg, temp2_reg);
+  cmpdi(cr_reg, temp_reg, 0);
+  if (PrintBiasedLockingStatistics) {
+    Label l;
+    bne(cr_reg, l);
+    load_const(mark_reg, (address) BiasedLocking::biased_lock_entry_count_addr());
+    lwz(temp2_reg, 0, mark_reg);
+    addi(temp2_reg, temp2_reg, 1);
+    stw(temp2_reg, 0, mark_reg);
+    // restore mark_reg
+    ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
+    bind(l);
+  }
+  beq(cr_reg, done);
+
+  Label try_revoke_bias;
+  Label try_rebias;
+
+  // At this point we know that the header has the bias pattern and
+  // that we are not the bias owner in the current epoch. We need to
+  // figure out more details about the state of the header in order to
+  // know what operations can be legally performed on the object's
+  // header.
+
+  // If the low three bits in the xor result aren't clear, that means
+  // the prototype header is no longer biased and we have to revoke
+  // the bias on this object.
+  andi(temp2_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
+  cmpwi(cr_reg, temp2_reg, 0);
+  bne(cr_reg, try_revoke_bias);
+
+  // Biasing is still enabled for this data type. See whether the
+  // epoch of the current bias is still valid, meaning that the epoch
+  // bits of the mark word are equal to the epoch bits of the
+  // prototype header. (Note that the prototype header's epoch bits
+  // only change at a safepoint.) If not, attempt to rebias the object
+  // toward the current thread. Note that we must be absolutely sure
+  // that the current epoch is invalid in order to do this because
+  // otherwise the manipulations it performs on the mark word are
+  // illegal.
+
+  int shift_amount = 64 - markOopDesc::epoch_shift;
+  // rotate epoch bits to right (little) end and set other bits to 0
+  // [ big part | epoch | little part ] -> [ 0..0 | epoch ]
+  rldicl_(temp2_reg, temp_reg, shift_amount, 64 - markOopDesc::epoch_bits);
+  // branch if epoch bits are != 0, i.e. they differ, because the epoch has been incremented
+  bne(CCR0, try_rebias);
+
+  // The epoch of the current bias is still valid but we know nothing
+  // about the owner; it might be set or it might be clear. Try to
+  // acquire the bias of the object using an atomic operation. If this
+  // fails we will go in to the runtime to revoke the object's bias.
+  // Note that we first construct the presumed unbiased header so we
+  // don't accidentally blow away another thread's valid bias.
+  andi(mark_reg, mark_reg, (markOopDesc::biased_lock_mask_in_place |
+                                markOopDesc::age_mask_in_place |
+                                markOopDesc::epoch_mask_in_place));
+  orr(temp_reg, R16_thread, mark_reg);
+
+  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+  // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
+  fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
+  cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
+           /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
+           /*where=*/obj_reg,
+           MacroAssembler::MemBarAcq,
+           MacroAssembler::cmpxchgx_hint_acquire_lock(),
+           noreg, slow_case_int); // bail out if failed
+
+  // If the biasing toward our thread failed, this means that
+  // another thread succeeded in biasing it toward itself and we
+  // need to revoke that bias. The revocation will occur in the
+  // interpreter runtime in the slow case.
+  if (PrintBiasedLockingStatistics) {
+    load_const(temp_reg, (address) BiasedLocking::anonymously_biased_lock_entry_count_addr(), temp2_reg);
+    lwz(temp2_reg, 0, temp_reg);
+    addi(temp2_reg, temp2_reg, 1);
+    stw(temp2_reg, 0, temp_reg);
+  }
+  b(done);
+
+  bind(try_rebias);
+  // At this point we know the epoch has expired, meaning that the
+  // current "bias owner", if any, is actually invalid. Under these
+  // circumstances _only_, we are allowed to use the current header's
+  // value as the comparison value when doing the cas to acquire the
+  // bias in the current epoch. In other words, we allow transfer of
+  // the bias from one thread to another directly in this situation.
+  andi(temp_reg, mark_reg, markOopDesc::age_mask_in_place);
+  orr(temp_reg, R16_thread, temp_reg);
+  load_klass_with_trap_null_check(temp2_reg, obj_reg);
+  ld(temp2_reg, in_bytes(Klass::prototype_header_offset()), temp2_reg);
+  orr(temp_reg, temp_reg, temp2_reg);
+
+  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+  // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
+  fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
+  cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
+                 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
+                 /*where=*/obj_reg,
+                 MacroAssembler::MemBarAcq,
+                 MacroAssembler::cmpxchgx_hint_acquire_lock(),
+                 noreg, slow_case_int); // bail out if failed
+
+  // If the biasing toward our thread failed, this means that
+  // another thread succeeded in biasing it toward itself and we
+  // need to revoke that bias. The revocation will occur in the
+  // interpreter runtime in the slow case.
+  if (PrintBiasedLockingStatistics) {
+    load_const(temp_reg, (address) BiasedLocking::rebiased_lock_entry_count_addr(), temp2_reg);
+    lwz(temp2_reg, 0, temp_reg);
+    addi(temp2_reg, temp2_reg, 1);
+    stw(temp2_reg, 0, temp_reg);
+  }
+  b(done);
+
+  bind(try_revoke_bias);
+  // The prototype mark in the klass doesn't have the bias bit set any
+  // more, indicating that objects of this data type are not supposed
+  // to be biased any more. We are going to try to reset the mark of
+  // this object to the prototype value and fall through to the
+  // CAS-based locking scheme. Note that if our CAS fails, it means
+  // that another thread raced us for the privilege of revoking the
+  // bias of this particular object, so it's okay to continue in the
+  // normal locking code.
+  load_klass_with_trap_null_check(temp_reg, obj_reg);
+  ld(temp_reg, in_bytes(Klass::prototype_header_offset()), temp_reg);
+  andi(temp2_reg, mark_reg, markOopDesc::age_mask_in_place);
+  orr(temp_reg, temp_reg, temp2_reg);
+
+  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+  // CmpxchgX sets cr_reg to cmpX(temp2_reg, mark_reg).
+  fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
+  cmpxchgd(/*flag=*/cr_reg, /*current_value=*/temp2_reg,
+                 /*compare_value=*/mark_reg, /*exchange_value=*/temp_reg,
+                 /*where=*/obj_reg,
+                 MacroAssembler::MemBarAcq,
+                 MacroAssembler::cmpxchgx_hint_acquire_lock());
+
+  // reload markOop in mark_reg before continuing with lightweight locking
+  ld(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
+
+  // Fall through to the normal CAS-based lock, because no matter what
+  // the result of the above CAS, some thread must have succeeded in
+  // removing the bias bit from the object's header.
+  if (PrintBiasedLockingStatistics) {
+    Label l;
+    bne(cr_reg, l);
+    load_const(temp_reg, (address) BiasedLocking::revoked_lock_entry_count_addr(), temp2_reg);
+    lwz(temp2_reg, 0, temp_reg);
+    addi(temp2_reg, temp2_reg, 1);
+    stw(temp2_reg, 0, temp_reg);
+    bind(l);
+  }
+
+  bind(cas_label);
+}
+
+void MacroAssembler::biased_locking_exit (ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done) {
+  // Check for biased locking unlock case, which is a no-op
+  // Note: we do not have to check the thread ID for two reasons.
+  // First, the interpreter checks for IllegalMonitorStateException at
+  // a higher level. Second, if the bias was revoked while we held the
+  // lock, the object could not be rebiased toward another thread, so
+  // the bias bit would be clear.
+
+  ld(temp_reg, 0, mark_addr);
+  andi(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place);
+
+  cmpwi(cr_reg, temp_reg, markOopDesc::biased_lock_pattern);
+  beq(cr_reg, done);
+}
+
+// "The box" is the space on the stack where we copy the object mark.
+void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
+                                               Register temp, Register displaced_header, Register current_header) {
+  assert_different_registers(oop, box, temp, displaced_header, current_header);
+  assert(flag != CCR0, "bad condition register");
+  Label cont;
+  Label object_has_monitor;
+  Label cas_failed;
+
+  // Load markOop from object into displaced_header.
+  ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
+
+
+  // Always do locking in runtime.
+  if (EmitSync & 0x01) {
+    cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
+    return;
+  }
+
+  if (UseBiasedLocking) {
+    biased_locking_enter(flag, oop, displaced_header, temp, current_header, cont);
+  }
+
+  // Handle existing monitor.
+  if ((EmitSync & 0x02) == 0) {
+    // The object has an existing monitor iff (mark & monitor_value) != 0.
+    andi_(temp, displaced_header, markOopDesc::monitor_value);
+    bne(CCR0, object_has_monitor);
+  }
+
+  // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
+  ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
+
+  // Load Compare Value application register.
+
+  // Initialize the box. (Must happen before we update the object mark!)
+  std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
+
+  // Must fence, otherwise, preceding store(s) may float below cmpxchg.
+  // Compare object markOop with mark and if equal exchange scratch1 with object markOop.
+  // CmpxchgX sets cr_reg to cmpX(current, displaced).
+  membar(Assembler::StoreStore);
+  cmpxchgd(/*flag=*/flag,
+           /*current_value=*/current_header,
+           /*compare_value=*/displaced_header,
+           /*exchange_value=*/box,
+           /*where=*/oop,
+           MacroAssembler::MemBarAcq,
+           MacroAssembler::cmpxchgx_hint_acquire_lock(),
+           noreg,
+           &cas_failed);
+  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+  // If the compare-and-exchange succeeded, then we found an unlocked
+  // object and we have now locked it.
+  b(cont);
+
+  bind(cas_failed);
+  // We did not see an unlocked object so try the fast recursive case.
+
+  // Check if the owner is self by comparing the value in the markOop of object
+  // (current_header) with the stack pointer.
+  sub(current_header, current_header, R1_SP);
+  load_const_optimized(temp, (address) (~(os::vm_page_size()-1) |
+                                        markOopDesc::lock_mask_in_place));
+
+  and_(R0/*==0?*/, current_header, temp);
+  // If condition is true we are cont and hence we can store 0 as the
+  // displaced header in the box, which indicates that it is a recursive lock.
+  mcrf(flag,CCR0);
+  std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box);
+
+  // Handle existing monitor.
+  if ((EmitSync & 0x02) == 0) {
+    b(cont);
+
+    bind(object_has_monitor);
+    // The object's monitor m is unlocked iff m->owner == NULL,
+    // otherwise m->owner may contain a thread or a stack address.
+    //
+    // Try to CAS m->owner from NULL to current thread.
+    addi(temp, displaced_header, ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value);
+    li(displaced_header, 0);
+    // CmpxchgX sets flag to cmpX(current, displaced).
+    cmpxchgd(/*flag=*/flag,
+             /*current_value=*/current_header,
+             /*compare_value=*/displaced_header,
+             /*exchange_value=*/R16_thread,
+             /*where=*/temp,
+             MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
+             MacroAssembler::cmpxchgx_hint_acquire_lock());
+
+    // Store a non-null value into the box.
+    std(box, BasicLock::displaced_header_offset_in_bytes(), box);
+
+#   ifdef ASSERT
+    bne(flag, cont);
+    // We have acquired the monitor, check some invariants.
+    addi(/*monitor=*/temp, temp, -ObjectMonitor::owner_offset_in_bytes());
+    // Invariant 1: _recursions should be 0.
+    //assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
+    asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp,
+                            "monitor->_recursions should be 0", -1);
+    // Invariant 2: OwnerIsThread shouldn't be 0.
+    //assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size");
+    //asm_assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), temp,
+    //                           "monitor->OwnerIsThread shouldn't be 0", -1);
+#   endif
+  }
+
+  bind(cont);
+  // flag == EQ indicates success
+  // flag == NE indicates failure
+}
+
+void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
+                                                 Register temp, Register displaced_header, Register current_header) {
+  assert_different_registers(oop, box, temp, displaced_header, current_header);
+  assert(flag != CCR0, "bad condition register");
+  Label cont;
+  Label object_has_monitor;
+
+  // Always do locking in runtime.
+  if (EmitSync & 0x01) {
+    cmpdi(flag, oop, 0); // Oop can't be 0 here => always false.
+    return;
+  }
+
+  if (UseBiasedLocking) {
+    biased_locking_exit(flag, oop, current_header, cont);
+  }
+
+  // Find the lock address and load the displaced header from the stack.
+  ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
+
+  // If the displaced header is 0, we have a recursive unlock.
+  cmpdi(flag, displaced_header, 0);
+  beq(flag, cont);
+
+  // Handle existing monitor.
+  if ((EmitSync & 0x02) == 0) {
+    // The object has an existing monitor iff (mark & monitor_value) != 0.
+    ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
+    andi(temp, current_header, markOopDesc::monitor_value);
+    cmpdi(flag, temp, 0);
+    bne(flag, object_has_monitor);
+  }
+
+
+  // Check if it is still a light weight lock, this is is true if we see
+  // the stack address of the basicLock in the markOop of the object.
+  // Cmpxchg sets flag to cmpd(current_header, box).
+  cmpxchgd(/*flag=*/flag,
+           /*current_value=*/current_header,
+           /*compare_value=*/box,
+           /*exchange_value=*/displaced_header,
+           /*where=*/oop,
+           MacroAssembler::MemBarRel,
+           MacroAssembler::cmpxchgx_hint_release_lock(),
+           noreg,
+           &cont);
+
+  assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
+
+  // Handle existing monitor.
+  if ((EmitSync & 0x02) == 0) {
+    b(cont);
+
+    bind(object_has_monitor);
+    addi(current_header, current_header, -markOopDesc::monitor_value); // monitor
+    ld(temp,             ObjectMonitor::owner_offset_in_bytes(), current_header);
+    ld(displaced_header, ObjectMonitor::recursions_offset_in_bytes(), current_header);
+    xorr(temp, R16_thread, temp);      // Will be 0 if we are the owner.
+    orr(temp, temp, displaced_header); // Will be 0 if there are 0 recursions.
+    cmpdi(flag, temp, 0);
+    bne(flag, cont);
+
+    ld(temp,             ObjectMonitor::EntryList_offset_in_bytes(), current_header);
+    ld(displaced_header, ObjectMonitor::cxq_offset_in_bytes(), current_header);
+    orr(temp, temp, displaced_header); // Will be 0 if both are 0.
+    cmpdi(flag, temp, 0);
+    bne(flag, cont);
+    release();
+    std(temp, ObjectMonitor::owner_offset_in_bytes(), current_header);
+  }
+
+  bind(cont);
+  // flag == EQ indicates success
+  // flag == NE indicates failure
+}
+
+// Write serialization page so VM thread can do a pseudo remote membar.
+// We use the current thread pointer to calculate a thread specific
+// offset to write to within the page. This minimizes bus traffic
+// due to cache line collision.
+void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
+  srdi(tmp2, thread, os::get_serialize_page_shift_count());
+
+  int mask = os::vm_page_size() - sizeof(int);
+  if (Assembler::is_simm(mask, 16)) {
+    andi(tmp2, tmp2, mask);
+  } else {
+    lis(tmp1, (int)((signed short) (mask >> 16)));
+    ori(tmp1, tmp1, mask & 0x0000ffff);
+    andr(tmp2, tmp2, tmp1);
+  }
+
+  load_const(tmp1, (long) os::get_memory_serialize_page());
+  release();
+  stwx(R0, tmp1, tmp2);
+}
+
+
+// GC barrier helper macros
+
+// Write the card table byte if needed.
+void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
+  CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
+  assert(bs->kind() == BarrierSet::CardTableModRef ||
+         bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
+#ifdef ASSERT
+  cmpdi(CCR0, Rnew_val, 0);
+  asm_assert_ne("null oop not allowed", 0x321);
+#endif
+  card_table_write(bs->byte_map_base, Rtmp, Rstore_addr);
+}
+
+// Write the card table byte.
+void MacroAssembler::card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj) {
+  assert_different_registers(Robj, Rtmp, R0);
+  load_const_optimized(Rtmp, (address)byte_map_base, R0);
+  srdi(Robj, Robj, CardTableModRefBS::card_shift);
+  li(R0, 0); // dirty
+  if (UseConcMarkSweepGC) membar(Assembler::StoreStore);
+  stbx(R0, Rtmp, Robj);
+}
+
+#ifndef SERIALGC
+
+// General G1 pre-barrier generator.
+// Goal: record the previous value if it is not null.
+void MacroAssembler::g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
+                                          Register Rtmp1, Register Rtmp2, bool needs_frame) {
+  Label runtime, filtered;
+
+  // Is marking active?
+  if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
+    lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
+  } else {
+    guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
+    lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
+  }
+  cmpdi(CCR0, Rtmp1, 0);
+  beq(CCR0, filtered);
+
+  // Do we need to load the previous value?
+  if (Robj != noreg) {
+    // Load the previous value...
+    if (UseCompressedOops) {
+      lwz(Rpre_val, offset, Robj);
+    } else {
+      ld(Rpre_val, offset, Robj);
+    }
+    // Previous value has been loaded into Rpre_val.
+  }
+  assert(Rpre_val != noreg, "must have a real register");
+
+  // Is the previous value null?
+  cmpdi(CCR0, Rpre_val, 0);
+  beq(CCR0, filtered);
+
+  if (Robj != noreg && UseCompressedOops) {
+    decode_heap_oop_not_null(Rpre_val);
+  }
+
+  // OK, it's not filtered, so we'll need to call enqueue. In the normal
+  // case, pre_val will be a scratch G-reg, but there are some cases in
+  // which it's an O-reg. In the first case, do a normal call. In the
+  // latter, do a save here and call the frameless version.
+
+  // Can we store original value in the thread's buffer?
+  // Is index == 0?
+  // (The index field is typed as size_t.)
+  const Register Rbuffer = Rtmp1, Rindex = Rtmp2;
+
+  ld(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
+  cmpdi(CCR0, Rindex, 0);
+  beq(CCR0, runtime); // If index == 0, goto runtime.
+  ld(Rbuffer, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
+
+  addi(Rindex, Rindex, -wordSize); // Decrement index.
+  std(Rindex, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
+
+  // Record the previous value.
+  stdx(Rpre_val, Rbuffer, Rindex);
+  b(filtered);
+
+  bind(runtime);
+
+  // VM call need frame to access(write) O register.
+  if (needs_frame) {
+    save_LR_CR(Rtmp1);
+    push_frame_abi112(0, Rtmp2);
+  }
+
+  if (Rpre_val->is_volatile() && Robj == noreg) mr(R31, Rpre_val); // Save pre_val across C call if it was preloaded.
+  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_val, R16_thread);
+  if (Rpre_val->is_volatile() && Robj == noreg) mr(Rpre_val, R31); // restore
+
+  if (needs_frame) {
+    pop_frame();
+    restore_LR_CR(Rtmp1);
+  }
+
+  bind(filtered);
+}
+
+// General G1 post-barrier generator
+// Store cross-region card.
+void MacroAssembler::g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1, Register Rtmp2, Register Rtmp3, Label *filtered_ext) {
+  Label runtime, filtered_int;
+  Label& filtered = (filtered_ext != NULL) ? *filtered_ext : filtered_int;
+  assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2);
+
+  G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
+  assert(bs->kind() == BarrierSet::G1SATBCT ||
+         bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
+
+  // Does store cross heap regions?
+  if (G1RSBarrierRegionFilter) {
+    xorr(Rtmp1, Rstore_addr, Rnew_val);
+    srdi_(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
+    beq(CCR0, filtered);
+  }
+
+  // Crosses regions, storing NULL?
+#ifdef ASSERT
+  cmpdi(CCR0, Rnew_val, 0);
+  asm_assert_ne("null oop not allowed (G1)", 0x322); // Checked by caller on PPC64, so following branch is obsolete:
+  //beq(CCR0, filtered);
+#endif
+
+  // Storing region crossing non-NULL, is card already dirty?
+  assert(sizeof(*bs->byte_map_base) == sizeof(jbyte), "adjust this code");
+  const Register Rcard_addr = Rtmp1;
+  Register Rbase = Rtmp2;
+  load_const_optimized(Rbase, (address)bs->byte_map_base, /*temp*/ Rtmp3);
+
+  srdi(Rcard_addr, Rstore_addr, CardTableModRefBS::card_shift);
+
+  // Get the address of the card.
+  lbzx(/*card value*/ Rtmp3, Rbase, Rcard_addr);
+
+  assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
+  cmpwi(CCR0, Rtmp3 /* card value */, 0);
+  beq(CCR0, filtered);
+
+  // Storing a region crossing, non-NULL oop, card is clean.
+  // Dirty card and log.
+  li(Rtmp3, 0); // dirty
+  //release(); // G1: oops are allowed to get visible after dirty marking.
+  stbx(Rtmp3, Rbase, Rcard_addr);
+
+  add(Rcard_addr, Rbase, Rcard_addr); // This is the address which needs to get enqueued.
+  Rbase = noreg; // end of lifetime
+
+  const Register Rqueue_index = Rtmp2,
+                 Rqueue_buf   = Rtmp3;
+  ld(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
+  cmpdi(CCR0, Rqueue_index, 0);
+  beq(CCR0, runtime); // index == 0 then jump to runtime
+  ld(Rqueue_buf, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_buf()), R16_thread);
+
+  addi(Rqueue_index, Rqueue_index, -wordSize); // decrement index
+  std(Rqueue_index, in_bytes(JavaThread::dirty_card_queue_offset() + PtrQueue::byte_offset_of_index()), R16_thread);
+
+  stdx(Rcard_addr, Rqueue_buf, Rqueue_index); // store card
+  b(filtered);
+
+  bind(runtime);
+
+  // Save the live input values.
+  call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread);
+
+  bind(filtered_int);
+}
+#endif // SERIALGC
+
+// Values for last_Java_pc, and last_Java_sp must comply to the rules
+// in frame_ppc64.hpp.
+void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc) {
+  // Always set last_Java_pc and flags first because once last_Java_sp
+  // is visible has_last_Java_frame is true and users will look at the
+  // rest of the fields. (Note: flags should always be zero before we
+  // get here so doesn't need to be set.)
+
+  // Verify that last_Java_pc was zeroed on return to Java
+  asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread,
+                          "last_Java_pc not zeroed before leaving Java", 0x200);
+
+  // When returning from calling out from Java mode the frame anchor's
+  // last_Java_pc will always be set to NULL. It is set here so that
+  // if we are doing a call to native (not VM) that we capture the
+  // known pc and don't have to rely on the native call having a
+  // standard frame linkage where we can find the pc.
+  if (last_Java_pc != noreg)
+    std(last_Java_pc, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
+
+  // Set last_Java_sp last.
+  std(last_Java_sp, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
+}
+
+void MacroAssembler::reset_last_Java_frame(void) {
+  asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
+                             R16_thread, "SP was not set, still zero", 0x202);
+
+  BLOCK_COMMENT("reset_last_Java_frame {");
+  li(R0, 0);
+
+  // _last_Java_sp = 0
+  std(R0, in_bytes(JavaThread::last_Java_sp_offset()), R16_thread);
+
+  // _last_Java_pc = 0
+  std(R0, in_bytes(JavaThread::last_Java_pc_offset()), R16_thread);
+  BLOCK_COMMENT("} reset_last_Java_frame");
+}
+
+void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1) {
+  assert_different_registers(sp, tmp1);
+
+  // sp points to a TOP_IJAVA_FRAME, retrieve frame's PC via
+  // TOP_IJAVA_FRAME_ABI.
+  // FIXME: assert that we really have a TOP_IJAVA_FRAME here!
+#ifdef CC_INTERP
+  ld(tmp1/*pc*/, _top_ijava_frame_abi(frame_manager_lr), sp);
+#else
+  Unimplemented();
+#endif
+
+  set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1);
+}
+
+void MacroAssembler::get_vm_result(Register oop_result) {
+  // Read:
+  //   R16_thread
+  //   R16_thread->in_bytes(JavaThread::vm_result_offset())
+  //
+  // Updated:
+  //   oop_result
+  //   R16_thread->in_bytes(JavaThread::vm_result_offset())
+
+  ld(oop_result, in_bytes(JavaThread::vm_result_offset()), R16_thread);
+  li(R0, 0);
+  std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
+
+  verify_oop(oop_result);
+}
+
+void MacroAssembler::get_vm_result_2(Register metadata_result) {
+  // Read:
+  //   R16_thread
+  //   R16_thread->in_bytes(JavaThread::vm_result_2_offset())
+  //
+  // Updated:
+  //   metadata_result
+  //   R16_thread->in_bytes(JavaThread::vm_result_2_offset())
+
+  ld(metadata_result, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
+  li(R0, 0);
+  std(R0, in_bytes(JavaThread::vm_result_2_offset()), R16_thread);
+}
+
+
+void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
+  Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided.
+  if (Universe::narrow_klass_base() != 0) {
+    load_const(R0, Universe::narrow_klass_base(), (dst != current) ? dst : noreg); // Use dst as temp if it is free.
+    sub(dst, current, R0);
+    current = dst;
+  }
+  if (Universe::narrow_klass_shift() != 0) {
+    srdi(dst, current, Universe::narrow_klass_shift());
+    current = dst;
+  }
+  mr_if_needed(dst, current); // Move may be required.
+}
+
+void MacroAssembler::store_klass(Register dst_oop, Register klass, Register ck) {
+  if (UseCompressedClassPointers) {
+    encode_klass_not_null(ck, klass);
+    stw(ck, oopDesc::klass_offset_in_bytes(), dst_oop);
+  } else {
+    std(klass, oopDesc::klass_offset_in_bytes(), dst_oop);
+  }
+}
+
+int MacroAssembler::instr_size_for_decode_klass_not_null() {
+  if (!UseCompressedClassPointers) return 0;
+  int num_instrs = 1;  // shift or move
+  if (Universe::narrow_klass_base() != 0) num_instrs = 7;  // shift + load const + add
+  return num_instrs * BytesPerInstWord;
+}
+
+void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
+  if (src == noreg) src = dst;
+  Register shifted_src = src;
+  if (Universe::narrow_klass_shift() != 0 ||
+      Universe::narrow_klass_base() == 0 && src != dst) {  // Move required.
+    shifted_src = dst;
+    sldi(shifted_src, src, Universe::narrow_klass_shift());
+  }
+  if (Universe::narrow_klass_base() != 0) {
+    load_const(R0, Universe::narrow_klass_base());
+    add(dst, shifted_src, R0);
+  }
+}
+
+void MacroAssembler::load_klass(Register dst, Register src) {
+  if (UseCompressedClassPointers) {
+    lwz(dst, oopDesc::klass_offset_in_bytes(), src);
+    // Attention: no null check here!
+    decode_klass_not_null(dst, dst);
+  } else {
+    ld(dst, oopDesc::klass_offset_in_bytes(), src);
+  }
+}
+
+void MacroAssembler::load_klass_with_trap_null_check(Register dst, Register src) {
+  if (!os::zero_page_read_protected()) {
+    if (TrapBasedNullChecks) {
+      trap_null_check(src);
+    }
+  }
+  load_klass(dst, src);
+}
+
+void MacroAssembler::reinit_heapbase(Register d, Register tmp) {
+  if (Universe::heap() != NULL) {
+    if (Universe::narrow_oop_base() == NULL) {
+      Assembler::xorr(R30, R30, R30);
+    } else {
+      load_const(R30, Universe::narrow_ptrs_base(), tmp);
+    }
+  } else {
+    load_const(R30, Universe::narrow_ptrs_base_addr(), tmp);
+    ld(R30, 0, R30);
+  }
+}
+
+// Clear Array
+// Kills both input registers. tmp == R0 is allowed.
+void MacroAssembler::clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp) {
+  // Procedure for large arrays (uses data cache block zero instruction).
+    Label startloop, fast, fastloop, small_rest, restloop, done;
+    const int cl_size         = VM_Version::get_cache_line_size(),
+              cl_dwords       = cl_size>>3,
+              cl_dw_addr_bits = exact_log2(cl_dwords),
+              dcbz_min        = 1;                     // Min count of dcbz executions, needs to be >0.
+
+//2:
+    cmpdi(CCR1, cnt_dwords, ((dcbz_min+1)<<cl_dw_addr_bits)-1); // Big enough? (ensure >=dcbz_min lines included).
+    blt(CCR1, small_rest);                                      // Too small.
+    rldicl_(tmp, base_ptr, 64-3, 64-cl_dw_addr_bits);           // Extract dword offset within first cache line.
+    beq(CCR0, fast);                                            // Already 128byte aligned.
+
+    subfic(tmp, tmp, cl_dwords);
+    mtctr(tmp);                        // Set ctr to hit 128byte boundary (0<ctr<cl_dwords).
+    subf(cnt_dwords, tmp, cnt_dwords); // rest.
+    li(tmp, 0);
+//10:
+  bind(startloop);                     // Clear at the beginning to reach 128byte boundary.
+    std(tmp, 0, base_ptr);             // Clear 8byte aligned block.
+    addi(base_ptr, base_ptr, 8);
+    bdnz(startloop);
+//13:
+  bind(fast);                                  // Clear 128byte blocks.
+    srdi(tmp, cnt_dwords, cl_dw_addr_bits);    // Loop count for 128byte loop (>0).
+    andi(cnt_dwords, cnt_dwords, cl_dwords-1); // Rest in dwords.
+    mtctr(tmp);                                // Load counter.
+//16:
+  bind(fastloop);
+    dcbz(base_ptr);                    // Clear 128byte aligned block.
+    addi(base_ptr, base_ptr, cl_size);
+    bdnz(fastloop);
+    if (InsertEndGroupPPC64) { endgroup(); } else { nop(); }
+//20:
+  bind(small_rest);
+    cmpdi(CCR0, cnt_dwords, 0);        // size 0?
+    beq(CCR0, done);                   // rest == 0
+    li(tmp, 0);
+    mtctr(cnt_dwords);                 // Load counter.
+//24:
+  bind(restloop);                      // Clear rest.
+    std(tmp, 0, base_ptr);             // Clear 8byte aligned block.
+    addi(base_ptr, base_ptr, 8);
+    bdnz(restloop);
+//27:
+  bind(done);
+}
+
+/////////////////////////////////////////// String intrinsics ////////////////////////////////////////////
+
+// Search for a single jchar in an jchar[].
+//
+// Assumes that result differs from all other registers.
+//
+// Haystack, needle are the addresses of jchar-arrays.
+// NeedleChar is needle[0] if it is known at compile time.
+// Haycnt is the length of the haystack. We assume haycnt >=1.
+//
+// Preserves haystack, haycnt, kills all other registers.
+//
+// If needle == R0, we search for the constant needleChar.
+void MacroAssembler::string_indexof_1(Register result, Register haystack, Register haycnt,
+                                      Register needle, jchar needleChar,
+                                      Register tmp1, Register tmp2) {
+
+  assert_different_registers(result, haystack, haycnt, needle, tmp1, tmp2);
+
+  Label L_InnerLoop, L_FinalCheck, L_Found1, L_Found2, L_Found3, L_NotFound, L_End;
+  Register needle0 = needle, // Contains needle[0].
+           addr = tmp1,
+           ch1 = tmp2,
+           ch2 = R0;
+
+//2 (variable) or 3 (const):
+   if (needle != R0) lhz(needle0, 0, needle); // Preload needle character, needle has len==1.
+   dcbtct(haystack, 0x00);                        // Indicate R/O access to haystack.
+
+   srwi_(tmp2, haycnt, 1);   // Shift right by exact_log2(UNROLL_FACTOR).
+   mr(addr, haystack);
+   beq(CCR0, L_FinalCheck);
+   mtctr(tmp2);              // Move to count register.
+//8:
+  bind(L_InnerLoop);             // Main work horse (2x unrolled search loop).
+   lhz(ch1, 0, addr);        // Load characters from haystack.
+   lhz(ch2, 2, addr);
+   (needle != R0) ? cmpw(CCR0, ch1, needle0) : cmplwi(CCR0, ch1, needleChar);
+   (needle != R0) ? cmpw(CCR1, ch2, needle0) : cmplwi(CCR1, ch2, needleChar);
+   beq(CCR0, L_Found1);   // Did we find the needle?
+   beq(CCR1, L_Found2);
+   addi(addr, addr, 4);
+   bdnz(L_InnerLoop);
+//16:
+  bind(L_FinalCheck);
+   andi_(R0, haycnt, 1);
+   beq(CCR0, L_NotFound);
+   lhz(ch1, 0, addr);        // One position left at which we have to compare.
+   (needle != R0) ? cmpw(CCR1, ch1, needle0) : cmplwi(CCR1, ch1, needleChar);
+   beq(CCR1, L_Found3);
+//21:
+  bind(L_NotFound);
+   li(result, -1);           // Not found.
+   b(L_End);
+
+  bind(L_Found2);
+   addi(addr, addr, 2);
+//24:
+  bind(L_Found1);
+  bind(L_Found3);                  // Return index ...
+   subf(addr, haystack, addr); // relative to haystack,
+   srdi(result, addr, 1);      // in characters.
+  bind(L_End);
+}
+
+
+// Implementation of IndexOf for jchar arrays.
+//
+// The length of haystack and needle are not constant, i.e. passed in a register.
+//
+// Preserves registers haystack, needle.
+// Kills registers haycnt, needlecnt.
+// Assumes that result differs from all other registers.
+// Haystack, needle are the addresses of jchar-arrays.
+// Haycnt, needlecnt are the lengths of them, respectively.
+//
+// Needlecntval must be zero or 15-bit unsigned immediate and > 1.
+void MacroAssembler::string_indexof(Register result, Register haystack, Register haycnt,
+                                    Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
+                                    Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
+
+  // Ensure 0<needlecnt<=haycnt in ideal graph as prerequisite!
+  Label L_TooShort, L_Found, L_NotFound, L_End;
+  Register last_addr = haycnt, // Kill haycnt at the beginning.
+           addr      = tmp1,
+           n_start   = tmp2,
+           ch1       = tmp3,
+           ch2       = R0;
+
+  // **************************************************************************************************
+  // Prepare for main loop: optimized for needle count >=2, bail out otherwise.
+  // **************************************************************************************************
+
+//1 (variable) or 3 (const):
+   dcbtct(needle, 0x00);    // Indicate R/O access to str1.
+   dcbtct(haystack, 0x00);  // Indicate R/O access to str2.
+
+  // Compute last haystack addr to use if no match gets found.
+  if (needlecntval == 0) { // variable needlecnt
+//3:
+   subf(ch1, needlecnt, haycnt);      // Last character index to compare is haycnt-needlecnt.
+   addi(addr, haystack, -2);          // Accesses use pre-increment.
+   cmpwi(CCR6, needlecnt, 2);
+   blt(CCR6, L_TooShort);          // Variable needlecnt: handle short needle separately.
+   slwi(ch1, ch1, 1);                 // Scale to number of bytes.
+   lwz(n_start, 0, needle);           // Load first 2 characters of needle.
+   add(last_addr, haystack, ch1);     // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
+   addi(needlecnt, needlecnt, -2);    // Rest of needle.
+  } else { // constant needlecnt
+  guarantee(needlecntval != 1, "IndexOf with single-character needle must be handled separately");
+  assert((needlecntval & 0x7fff) == needlecntval, "wrong immediate");
+//5:
+   addi(ch1, haycnt, -needlecntval);  // Last character index to compare is haycnt-needlecnt.
+   lwz(n_start, 0, needle);           // Load first 2 characters of needle.
+   addi(addr, haystack, -2);          // Accesses use pre-increment.
+   slwi(ch1, ch1, 1);                 // Scale to number of bytes.
+   add(last_addr, haystack, ch1);     // Point to last address to compare (haystack+2*(haycnt-needlecnt)).
+   li(needlecnt, needlecntval-2);     // Rest of needle.
+  }
+
+  // Main Loop (now we have at least 3 characters).
+//11:
+  Label L_OuterLoop, L_InnerLoop, L_FinalCheck, L_Comp1, L_Comp2, L_Comp3;
+  bind(L_OuterLoop); // Search for 1st 2 characters.
+  Register addr_diff = tmp4;
+   subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check.
+   addi(addr, addr, 2);              // This is the new address we want to use for comparing.
+   srdi_(ch2, addr_diff, 2);
+   beq(CCR0, L_FinalCheck);       // 2 characters left?
+   mtctr(ch2);                       // addr_diff/4
+//16:
+  bind(L_InnerLoop);                // Main work horse (2x unrolled search loop)
+   lwz(ch1, 0, addr);           // Load 2 characters of haystack (ignore alignment).
+   lwz(ch2, 2, addr);
+   cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
+   cmpw(CCR1, ch2, n_start);
+   beq(CCR0, L_Comp1);       // Did we find the needle start?
+   beq(CCR1, L_Comp2);
+   addi(addr, addr, 4);
+   bdnz(L_InnerLoop);
+//24:
+  bind(L_FinalCheck);
+   rldicl_(addr_diff, addr_diff, 64-1, 63); // Remaining characters not covered by InnerLoop: (addr_diff>>1)&1.
+   beq(CCR0, L_NotFound);
+   lwz(ch1, 0, addr);                       // One position left at which we have to compare.
+   cmpw(CCR1, ch1, n_start);
+   beq(CCR1, L_Comp3);
+//29:
+  bind(L_NotFound);
+   li(result, -1); // not found
+   b(L_End);
+
+
+   // **************************************************************************************************
+   // Special Case: unfortunately, the variable needle case can be called with needlecnt<2
+   // **************************************************************************************************
+//31:
+ if ((needlecntval>>1) !=1 ) { // Const needlecnt is 2 or 3? Reduce code size.
+  int nopcnt = 5;
+  if (needlecntval !=0 ) ++nopcnt; // Balance alignment (other case: see below).
+  if (needlecntval == 0) {         // We have to handle these cases separately.
+  Label L_OneCharLoop;
+  bind(L_TooShort);
+   mtctr(haycnt);
+   lhz(n_start, 0, needle);    // First character of needle
+  bind(L_OneCharLoop);
+   lhzu(ch1, 2, addr);
+   cmpw(CCR1, ch1, n_start);
+   beq(CCR1, L_Found);      // Did we find the one character needle?
+   bdnz(L_OneCharLoop);
+   li(result, -1);             // Not found.
+   b(L_End);
+  } // 8 instructions, so no impact on alignment.
+  for (int x = 0; x < nopcnt; ++x) nop();
+ }
+
+  // **************************************************************************************************
+  // Regular Case Part II: compare rest of needle (first 2 characters have been compared already)
+  // **************************************************************************************************
+
+  // Compare the rest
+//36 if needlecntval==0, else 37:
+  bind(L_Comp2);
+   addi(addr, addr, 2); // First comparison has failed, 2nd one hit.
+  bind(L_Comp1);            // Addr points to possible needle start.
+  bind(L_Comp3);            // Could have created a copy and use a different return address but saving code size here.
+  if (needlecntval != 2) {  // Const needlecnt==2?
+   if (needlecntval != 3) {
+    if (needlecntval == 0) beq(CCR6, L_Found); // Variable needlecnt==2?
+    Register ind_reg = tmp4;
+    li(ind_reg, 2*2);   // First 2 characters are already compared, use index 2.
+    mtctr(needlecnt);   // Decremented by 2, still > 0.
+//40:
+   Label L_CompLoop;
+   bind(L_CompLoop);
+    lhzx(ch2, needle, ind_reg);
+    lhzx(ch1, addr, ind_reg);
+    cmpw(CCR1, ch1, ch2);
+    bne(CCR1, L_OuterLoop);
+    addi(ind_reg, ind_reg, 2);
+    bdnz(L_CompLoop);
+   } else { // No loop required if there's only one needle character left.
+    lhz(ch2, 2*2, needle);
+    lhz(ch1, 2*2, addr);
+    cmpw(CCR1, ch1, ch2);
+    bne(CCR1, L_OuterLoop);
+   }
+  }
+  // Return index ...
+//46:
+  bind(L_Found);
+   subf(addr, haystack, addr); // relative to haystack, ...
+   srdi(result, addr, 1);      // in characters.
+//48:
+  bind(L_End);
+}
+
+// Implementation of Compare for jchar arrays.
+//
+// Kills the registers str1, str2, cnt1, cnt2.
+// Kills cr0, ctr.
+// Assumes that result differes from the input registers.
+void MacroAssembler::string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
+                                    Register result_reg, Register tmp_reg) {
+   assert_different_registers(result_reg, str1_reg, str2_reg, cnt1_reg, cnt2_reg, tmp_reg);
+
+   Label Ldone, Lslow_case, Lslow_loop, Lfast_loop;
+   Register cnt_diff = R0,
+            limit_reg = cnt1_reg,
+            chr1_reg = result_reg,
+            chr2_reg = cnt2_reg,
+            addr_diff = str2_reg;
+
+   // Offset 0 should be 32 byte aligned.
+//-4:
+    dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
+    dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
+//-2:
+   // Compute min(cnt1, cnt2) and check if 0 (bail out if we don't need to compare characters).
+    subf(result_reg, cnt2_reg, cnt1_reg);  // difference between cnt1/2
+    subf_(addr_diff, str1_reg, str2_reg);  // alias?
+    beq(CCR0, Ldone);                   // return cnt difference if both ones are identical
+    srawi(limit_reg, result_reg, 31);      // generate signmask (cnt1/2 must be non-negative so cnt_diff can't overflow)
+    mr(cnt_diff, result_reg);
+    andr(limit_reg, result_reg, limit_reg); // difference or zero (negative): cnt1<cnt2 ? cnt1-cnt2 : 0
+    add_(limit_reg, cnt2_reg, limit_reg);  // min(cnt1, cnt2)==0?
+    beq(CCR0, Ldone);                   // return cnt difference if one has 0 length
+
+    lhz(chr1_reg, 0, str1_reg);            // optional: early out if first characters mismatch
+    lhzx(chr2_reg, str1_reg, addr_diff);   // optional: early out if first characters mismatch
+    addi(tmp_reg, limit_reg, -1);          // min(cnt1, cnt2)-1
+    subf_(result_reg, chr2_reg, chr1_reg); // optional: early out if first characters mismatch
+    bne(CCR0, Ldone);                   // optional: early out if first characters mismatch
+
+   // Set loop counter by scaling down tmp_reg
+    srawi_(chr2_reg, tmp_reg, exact_log2(4)); // (min(cnt1, cnt2)-1)/4
+    ble(CCR0, Lslow_case);                 // need >4 characters for fast loop
+    andi(limit_reg, tmp_reg, 4-1);            // remaining characters
+
+   // Adapt str1_reg str2_reg for the first loop iteration
+    mtctr(chr2_reg);                 // (min(cnt1, cnt2)-1)/4
+    addi(limit_reg, limit_reg, 4+1); // compare last 5-8 characters in slow_case if mismatch found in fast_loop
+//16:
+   // Compare the rest of the characters
+   bind(Lfast_loop);
+    ld(chr1_reg, 0, str1_reg);
+    ldx(chr2_reg, str1_reg, addr_diff);
+    cmpd(CCR0, chr2_reg, chr1_reg);
+    bne(CCR0, Lslow_case); // return chr1_reg
+    addi(str1_reg, str1_reg, 4*2);
+    bdnz(Lfast_loop);
+    addi(limit_reg, limit_reg, -4); // no mismatch found in fast_loop, only 1-4 characters missing
+//23:
+   bind(Lslow_case);
+    mtctr(limit_reg);
+//24:
+   bind(Lslow_loop);
+    lhz(chr1_reg, 0, str1_reg);
+    lhzx(chr2_reg, str1_reg, addr_diff);
+    subf_(result_reg, chr2_reg, chr1_reg);
+    bne(CCR0, Ldone); // return chr1_reg
+    addi(str1_reg, str1_reg, 1*2);
+    bdnz(Lslow_loop);
+//30:
+   // If strings are equal up to min length, return the length difference.
+    mr(result_reg, cnt_diff);
+    nop(); // alignment
+//32:
+   // Otherwise, return the difference between the first mismatched chars.
+   bind(Ldone);
+}
+
+
+// Compare char[] arrays.
+//
+// str1_reg   USE only
+// str2_reg   USE only
+// cnt_reg    USE_DEF, due to tmp reg shortage
+// result_reg DEF only, might compromise USE only registers
+void MacroAssembler::char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
+                                        Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
+                                        Register tmp5_reg) {
+
+  // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
+  assert_different_registers(result_reg, str1_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
+  assert_different_registers(result_reg, str2_reg, cnt_reg, tmp1_reg, tmp2_reg, tmp3_reg, tmp4_reg, tmp5_reg);
+
+  // Offset 0 should be 32 byte aligned.
+  Label Linit_cbc, Lcbc, Lloop, Ldone_true, Ldone_false;
+  Register index_reg = tmp5_reg;
+  Register cbc_iter  = tmp4_reg;
+
+//-1:
+  dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
+  dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
+//1:
+  andi(cbc_iter, cnt_reg, 4-1);            // Remaining iterations after 4 java characters per iteration loop.
+  li(index_reg, 0); // init
+  li(result_reg, 0); // assume false
+  srwi_(tmp2_reg, cnt_reg, exact_log2(4)); // Div: 4 java characters per iteration (main loop).
+
+  cmpwi(CCR1, cbc_iter, 0);             // CCR1 = (cbc_iter==0)
+  beq(CCR0, Linit_cbc);                 // too short
+    mtctr(tmp2_reg);
+//8:
+    bind(Lloop);
+      ldx(tmp1_reg, str1_reg, index_reg);
+      ldx(tmp2_reg, str2_reg, index_reg);
+      cmpd(CCR0, tmp1_reg, tmp2_reg);
+      bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
+      addi(index_reg, index_reg, 4*sizeof(jchar));
+      bdnz(Lloop);
+//14:
+  bind(Linit_cbc);
+  beq(CCR1, Ldone_true);
+    mtctr(cbc_iter);
+//16:
+    bind(Lcbc);
+      lhzx(tmp1_reg, str1_reg, index_reg);
+      lhzx(tmp2_reg, str2_reg, index_reg);
+      cmpw(CCR0, tmp1_reg, tmp2_reg);
+      bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
+      addi(index_reg, index_reg, 1*sizeof(jchar));
+      bdnz(Lcbc);
+    nop();
+  bind(Ldone_true);
+  li(result_reg, 1);
+//24:
+  bind(Ldone_false);
+}
+
+
+void MacroAssembler::char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
+                                           Register tmp1_reg, Register tmp2_reg) {
+  // Str1 may be the same register as str2 which can occur e.g. after scalar replacement.
+  assert_different_registers(result_reg, str1_reg, tmp1_reg, tmp2_reg);
+  assert_different_registers(result_reg, str2_reg, tmp1_reg, tmp2_reg);
+  assert(sizeof(jchar) == 2, "must be");
+  assert(cntval >= 0 && ((cntval & 0x7fff) == cntval), "wrong immediate");
+
+  Label Ldone_false;
+
+  if (cntval < 16) { // short case
+    if (cntval != 0) li(result_reg, 0); // assume false
+
+    const int num_bytes = cntval*sizeof(jchar);
+    int index = 0;
+    for (int next_index; (next_index = index + 8) <= num_bytes; index = next_index) {
+      ld(tmp1_reg, index, str1_reg);
+      ld(tmp2_reg, index, str2_reg);
+      cmpd(CCR0, tmp1_reg, tmp2_reg);
+      bne(CCR0, Ldone_false);
+    }
+    if (cntval & 2) {
+      lwz(tmp1_reg, index, str1_reg);
+      lwz(tmp2_reg, index, str2_reg);
+      cmpw(CCR0, tmp1_reg, tmp2_reg);
+      bne(CCR0, Ldone_false);
+      index += 4;
+    }
+    if (cntval & 1) {
+      lhz(tmp1_reg, index, str1_reg);
+      lhz(tmp2_reg, index, str2_reg);
+      cmpw(CCR0, tmp1_reg, tmp2_reg);
+      bne(CCR0, Ldone_false);
+    }
+    // fallthrough: true
+  } else {
+    Label Lloop;
+    Register index_reg = tmp1_reg;
+    const int loopcnt = cntval/4;
+    assert(loopcnt > 0, "must be");
+    // Offset 0 should be 32 byte aligned.
+    //2:
+    dcbtct(str1_reg, 0x00);  // Indicate R/O access to str1.
+    dcbtct(str2_reg, 0x00);  // Indicate R/O access to str2.
+    li(tmp2_reg, loopcnt);
+    li(index_reg, 0); // init
+    li(result_reg, 0); // assume false
+    mtctr(tmp2_reg);
+    //8:
+    bind(Lloop);
+    ldx(R0, str1_reg, index_reg);
+    ldx(tmp2_reg, str2_reg, index_reg);
+    cmpd(CCR0, R0, tmp2_reg);
+    bne(CCR0, Ldone_false);  // Unequal char pair found -> done.
+    addi(index_reg, index_reg, 4*sizeof(jchar));
+    bdnz(Lloop);
+    //14:
+    if (cntval & 2) {
+      lwzx(R0, str1_reg, index_reg);
+      lwzx(tmp2_reg, str2_reg, index_reg);
+      cmpw(CCR0, R0, tmp2_reg);
+      bne(CCR0, Ldone_false);
+      if (cntval & 1) addi(index_reg, index_reg, 2*sizeof(jchar));
+    }
+    if (cntval & 1) {
+      lhzx(R0, str1_reg, index_reg);
+      lhzx(tmp2_reg, str2_reg, index_reg);
+      cmpw(CCR0, R0, tmp2_reg);
+      bne(CCR0, Ldone_false);
+    }
+    // fallthru: true
+  }
+  li(result_reg, 1);
+  bind(Ldone_false);
+}
+
+
+void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
+#ifdef ASSERT
+  Label ok;
+  if (check_equal) {
+    beq(CCR0, ok);
+  } else {
+    bne(CCR0, ok);
+  }
+  stop(msg, id);
+  bind(ok);
+#endif
+}
+
+void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset,
+                                          Register mem_base, const char* msg, int id) {
+#ifdef ASSERT
+  switch (size) {
+    case 4:
+      lwz(R0, mem_offset, mem_base);
+      cmpwi(CCR0, R0, 0);
+      break;
+    case 8:
+      ld(R0, mem_offset, mem_base);
+      cmpdi(CCR0, R0, 0);
+      break;
+    default:
+      ShouldNotReachHere();
+  }
+  asm_assert(check_equal, msg, id);
+#endif // ASSERT
+}
+
+void MacroAssembler::verify_thread() {
+  if (VerifyThread) {
+    unimplemented("'VerifyThread' currently not implemented on PPC");
+  }
+}
+
+// READ: oop. KILL: R0. Volatile floats perhaps.
+void MacroAssembler::verify_oop(Register oop, const char* msg) {
+  if (!VerifyOops) {
+    return;
+  }
+  // Will be preserved.
+  Register tmp = R11;
+  assert(oop != tmp, "precondition");
+  unsigned int nbytes_save = 10*8; // 10 volatile gprs
+  address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
+  // save tmp
+  mr(R0, tmp);
+  // kill tmp
+  save_LR_CR(tmp);
+  push_frame_abi112(nbytes_save, tmp);
+  // restore tmp
+  mr(tmp, R0);
+  save_volatile_gprs(R1_SP, 112); // except R0
+  // load FunctionDescriptor**
+  load_const(tmp, fd);
+  // load FunctionDescriptor*
+  ld(tmp, 0, tmp);
+  mr(R4_ARG2, oop);
+  load_const(R3_ARG1, (address)msg);
+  // call destination for its side effect
+  call_c(tmp);
+  restore_volatile_gprs(R1_SP, 112); // except R0
+  pop_frame();
+  // save tmp
+  mr(R0, tmp);
+  // kill tmp
+  restore_LR_CR(tmp);
+  // restore tmp
+  mr(tmp, R0);
+}
+
+const char* stop_types[] = {
+  "stop",
+  "untested",
+  "unimplemented",
+  "shouldnotreachhere"
+};
+
+static void stop_on_request(int tp, const char* msg) {
+  tty->print("PPC assembly code requires stop: (%s) %s\n", (void *)stop_types[tp%/*stop_end*/4], msg);
+  guarantee(false, err_msg("PPC assembly code requires stop: %s", msg));
+}
+
+// Call a C-function that prints output.
+void MacroAssembler::stop(int type, const char* msg, int id) {
+#ifndef PRODUCT
+  block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg));
+#else
+  block_comment("stop {");
+#endif
+
+  // setup arguments
+  load_const_optimized(R3_ARG1, type);
+  load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0);
+  call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2);
+  illtrap();
+  emit_int32(id);
+  block_comment("} stop;");
+}
+
+#ifndef PRODUCT
+// Write pattern 0x0101010101010101 in memory region [low-before, high+after].
+// Val, addr are temp registers.
+// If low == addr, addr is killed.
+// High is preserved.
+void MacroAssembler::zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) {
+  if (!ZapMemory) return;
+
+  assert_different_registers(low, val);
+
+  BLOCK_COMMENT("zap memory region {");
+  load_const_optimized(val, 0x0101010101010101);
+  int size = before + after;
+  if (low == high && size < 5 && size > 0) {
+    int offset = -before*BytesPerWord;
+    for (int i = 0; i < size; ++i) {
+      std(val, offset, low);
+      offset += (1*BytesPerWord);
+    }
+  } else {
+    addi(addr, low, -before*BytesPerWord);
+    assert_different_registers(high, val);
+    if (after) addi(high, high, after * BytesPerWord);
+    Label loop;
+    bind(loop);
+    std(val, 0, addr);
+    addi(addr, addr, 8);
+    cmpd(CCR6, addr, high);
+    ble(CCR6, loop);
+    if (after) addi(high, high, -after * BytesPerWord);  // Correct back to old value.
+  }
+  BLOCK_COMMENT("} zap memory region");
+}
+
+#endif // !PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
+#define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
+
+#include "asm/assembler.hpp"
+
+// MacroAssembler extends Assembler by a few frequently used macros.
+
+class ciTypeArray;
+
+class MacroAssembler: public Assembler {
+ public:
+  MacroAssembler(CodeBuffer* code) : Assembler(code) {}
+
+  //
+  // Optimized instruction emitters
+  //
+
+  inline static int largeoffset_si16_si16_hi(int si31) { return (si31 + (1<<15)) >> 16; }
+  inline static int largeoffset_si16_si16_lo(int si31) { return si31 - (((si31 + (1<<15)) >> 16) << 16); }
+
+  // load d = *[a+si31]
+  // Emits several instructions if the offset is not encodable in one instruction.
+  void ld_largeoffset_unchecked(Register d, int si31, Register a, int emit_filler_nop);
+  void ld_largeoffset          (Register d, int si31, Register a, int emit_filler_nop);
+  inline static bool is_ld_largeoffset(address a);
+  inline static int get_ld_largeoffset_offset(address a);
+
+  inline void round_to(Register r, int modulus);
+
+  // Load/store with type given by parameter.
+  void load_sized_value( Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes, bool is_signed);
+  void store_sized_value(Register dst, RegisterOrConstant offs, Register base, size_t size_in_bytes);
+
+  // Move register if destination register and target register are different
+  inline void mr_if_needed(Register rd, Register rs);
+  inline void fmr_if_needed(FloatRegister rd, FloatRegister rs);
+  // This is dedicated for emitting scheduled mach nodes. For better
+  // readability of the ad file I put it here.
+  // Endgroups are not needed if
+  //  - the scheduler is off
+  //  - the scheduler found that there is a natural group end, in that
+  //    case it reduced the size of the instruction used in the test
+  //    yielding 'needed'.
+  inline void endgroup_if_needed(bool needed);
+
+  // Memory barriers.
+  inline void membar(int bits);
+  inline void release();
+  inline void acquire();
+  inline void fence();
+
+  // nop padding
+  void align(int modulus, int max = 252, int rem = 0);
+
+  //
+  // Constants, loading constants, TOC support
+  //
+
+  // Address of the global TOC.
+  inline static address global_toc();
+  // Offset of given address to the global TOC.
+  inline static int offset_to_global_toc(const address addr);
+
+  // Address of TOC of the current method.
+  inline address method_toc();
+  // Offset of given address to TOC of the current method.
+  inline int offset_to_method_toc(const address addr);
+
+  // Global TOC.
+  void calculate_address_from_global_toc(Register dst, address addr,
+                                         bool hi16 = true, bool lo16 = true,
+                                         bool add_relocation = true, bool emit_dummy_addr = false);
+  inline void calculate_address_from_global_toc_hi16only(Register dst, address addr) {
+    calculate_address_from_global_toc(dst, addr, true, false);
+  };
+  inline void calculate_address_from_global_toc_lo16only(Register dst, address addr) {
+    calculate_address_from_global_toc(dst, addr, false, true);
+  };
+
+  inline static bool is_calculate_address_from_global_toc_at(address a, address bound);
+  static int patch_calculate_address_from_global_toc_at(address a, address addr, address bound);
+  static address get_address_of_calculate_address_from_global_toc_at(address a, address addr);
+
+#ifdef _LP64
+  // Patch narrow oop constant.
+  inline static bool is_set_narrow_oop(address a, address bound);
+  static int patch_set_narrow_oop(address a, address bound, narrowOop data);
+  static narrowOop get_narrow_oop(address a, address bound);
+#endif
+
+  inline static bool is_load_const_at(address a);
+
+  // Emits an oop const to the constant pool, loads the constant, and
+  // sets a relocation info with address current_pc.
+  void load_const_from_method_toc(Register dst, AddressLiteral& a, Register toc);
+  void load_toc_from_toc(Register dst, AddressLiteral& a, Register toc) {
+    assert(dst == R2_TOC, "base register must be TOC");
+    load_const_from_method_toc(dst, a, toc);
+  }
+
+  static bool is_load_const_from_method_toc_at(address a);
+  static int get_offset_of_load_const_from_method_toc_at(address a);
+
+  // Get the 64 bit constant from a `load_const' sequence.
+  static long get_const(address load_const);
+
+  // Patch the 64 bit constant of a `load_const' sequence. This is a
+  // low level procedure. It neither flushes the instruction cache nor
+  // is it atomic.
+  static void patch_const(address load_const, long x);
+
+  // Metadata in code that we have to keep track of.
+  AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
+  AddressLiteral constant_metadata_address(Metadata* obj); // find_index
+  // Oops used directly in compiled code are stored in the constant pool,
+  // and loaded from there.
+  // Allocate new entry for oop in constant pool. Generate relocation.
+  AddressLiteral allocate_oop_address(jobject obj);
+  // Find oop obj in constant pool. Return relocation with it's index.
+  AddressLiteral constant_oop_address(jobject obj);
+
+  // Find oop in constant pool and emit instructions to load it.
+  // Uses constant_oop_address.
+  inline void set_oop_constant(jobject obj, Register d);
+  // Same as load_address.
+  inline void set_oop         (AddressLiteral obj_addr, Register d);
+
+  // Read runtime constant:  Issue load if constant not yet established,
+  // else use real constant.
+  virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
+                                                Register tmp,
+                                                int offset);
+
+  //
+  // branch, jump
+  //
+
+  inline void pd_patch_instruction(address branch, address target);
+  NOT_PRODUCT(static void pd_print_patched_instruction(address branch);)
+
+  // Conditional far branch for destinations encodable in 24+2 bits.
+  // Same interface as bc, e.g. no inverse boint-field.
+  enum {
+    bc_far_optimize_not         = 0,
+    bc_far_optimize_on_relocate = 1
+  };
+  // optimize: flag for telling the conditional far branch to optimize
+  //           itself when relocated.
+  void bc_far(int boint, int biint, Label& dest, int optimize);
+  // Relocation of conditional far branches.
+  static bool    is_bc_far_at(address instruction_addr);
+  static address get_dest_of_bc_far_at(address instruction_addr);
+  static void    set_dest_of_bc_far_at(address instruction_addr, address dest);
+ private:
+  static bool inline is_bc_far_variant1_at(address instruction_addr);
+  static bool inline is_bc_far_variant2_at(address instruction_addr);
+  static bool inline is_bc_far_variant3_at(address instruction_addr);
+ public:
+
+  // Convenience bc_far versions.
+  inline void blt_far(ConditionRegister crx, Label& L, int optimize);
+  inline void bgt_far(ConditionRegister crx, Label& L, int optimize);
+  inline void beq_far(ConditionRegister crx, Label& L, int optimize);
+  inline void bso_far(ConditionRegister crx, Label& L, int optimize);
+  inline void bge_far(ConditionRegister crx, Label& L, int optimize);
+  inline void ble_far(ConditionRegister crx, Label& L, int optimize);
+  inline void bne_far(ConditionRegister crx, Label& L, int optimize);
+  inline void bns_far(ConditionRegister crx, Label& L, int optimize);
+
+  // Emit, identify and patch a NOT mt-safe patchable 64 bit absolute call/jump.
+ private:
+  enum {
+    bxx64_patchable_instruction_count = (2/*load_codecache_const*/ + 3/*5load_const*/ + 1/*mtctr*/ + 1/*bctrl*/),
+    bxx64_patchable_size              = bxx64_patchable_instruction_count * BytesPerInstWord,
+    bxx64_patchable_ret_addr_offset   = bxx64_patchable_size
+  };
+  void bxx64_patchable(address target, relocInfo::relocType rt, bool link);
+  static bool is_bxx64_patchable_at(            address instruction_addr, bool link);
+  // Does the instruction use a pc-relative encoding of the destination?
+  static bool is_bxx64_patchable_pcrelative_at( address instruction_addr, bool link);
+  static bool is_bxx64_patchable_variant1_at(   address instruction_addr, bool link);
+  // Load destination relative to global toc.
+  static bool is_bxx64_patchable_variant1b_at(  address instruction_addr, bool link);
+  static bool is_bxx64_patchable_variant2_at(   address instruction_addr, bool link);
+  static void set_dest_of_bxx64_patchable_at(   address instruction_addr, address target, bool link);
+  static address get_dest_of_bxx64_patchable_at(address instruction_addr, bool link);
+
+ public:
+  // call
+  enum {
+    bl64_patchable_instruction_count = bxx64_patchable_instruction_count,
+    bl64_patchable_size              = bxx64_patchable_size,
+    bl64_patchable_ret_addr_offset   = bxx64_patchable_ret_addr_offset
+  };
+  inline void bl64_patchable(address target, relocInfo::relocType rt) {
+    bxx64_patchable(target, rt, /*link=*/true);
+  }
+  inline static bool is_bl64_patchable_at(address instruction_addr) {
+    return is_bxx64_patchable_at(instruction_addr, /*link=*/true);
+  }
+  inline static bool is_bl64_patchable_pcrelative_at(address instruction_addr) {
+    return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/true);
+  }
+  inline static void set_dest_of_bl64_patchable_at(address instruction_addr, address target) {
+    set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/true);
+  }
+  inline static address get_dest_of_bl64_patchable_at(address instruction_addr) {
+    return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/true);
+  }
+  // jump
+  enum {
+    b64_patchable_instruction_count = bxx64_patchable_instruction_count,
+    b64_patchable_size              = bxx64_patchable_size,
+  };
+  inline void b64_patchable(address target, relocInfo::relocType rt) {
+    bxx64_patchable(target, rt, /*link=*/false);
+  }
+  inline static bool is_b64_patchable_at(address instruction_addr) {
+    return is_bxx64_patchable_at(instruction_addr, /*link=*/false);
+  }
+  inline static bool is_b64_patchable_pcrelative_at(address instruction_addr) {
+    return is_bxx64_patchable_pcrelative_at(instruction_addr, /*link=*/false);
+  }
+  inline static void set_dest_of_b64_patchable_at(address instruction_addr, address target) {
+    set_dest_of_bxx64_patchable_at(instruction_addr, target, /*link=*/false);
+  }
+  inline static address get_dest_of_b64_patchable_at(address instruction_addr) {
+    return get_dest_of_bxx64_patchable_at(instruction_addr, /*link=*/false);
+  }
+
+  //
+  // Support for frame handling
+  //
+
+  // some ABI-related functions
+  void save_nonvolatile_gprs(   Register dst_base, int offset);
+  void restore_nonvolatile_gprs(Register src_base, int offset);
+  void save_volatile_gprs(   Register dst_base, int offset);
+  void restore_volatile_gprs(Register src_base, int offset);
+  void save_LR_CR(   Register tmp);     // tmp contains LR on return.
+  void restore_LR_CR(Register tmp);
+
+  // Get current PC using bl-next-instruction trick.
+  address get_PC_trash_LR(Register result);
+
+  // Resize current frame either relatively wrt to current SP or absolute.
+  void resize_frame(Register offset, Register tmp);
+  void resize_frame(int      offset, Register tmp);
+  void resize_frame_absolute(Register addr, Register tmp1, Register tmp2);
+
+  // Push a frame of size bytes.
+  void push_frame(Register bytes, Register tmp);
+
+  // Push a frame of size `bytes'. No abi space provided.
+  void push_frame(unsigned int bytes, Register tmp);
+
+  // Push a frame of size `bytes' plus abi112 on top.
+  void push_frame_abi112(unsigned int bytes, Register tmp);
+
+  // Setup up a new C frame with a spill area for non-volatile GPRs and additional
+  // space for local variables
+  void push_frame_abi112_nonvolatiles(unsigned int bytes, Register tmp);
+
+  // pop current C frame
+  void pop_frame();
+
+  //
+  // Calls
+  //
+
+ private:
+  address _last_calls_return_pc;
+
+  // Generic version of a call to C function via a function descriptor
+  // with variable support for C calling conventions (TOC, ENV, etc.).
+  // updates and returns _last_calls_return_pc.
+  address branch_to(Register function_descriptor, bool and_link, bool save_toc_before_call,
+                    bool restore_toc_after_call, bool load_toc_of_callee, bool load_env_of_callee);
+
+ public:
+
+  // Get the pc where the last call will return to. returns _last_calls_return_pc.
+  inline address last_calls_return_pc();
+
+  // Call a C function via a function descriptor and use full C
+  // calling conventions. Updates and returns _last_calls_return_pc.
+  address call_c(Register function_descriptor);
+  // For tail calls: only branch, don't link, so callee returns to caller of this function.
+  address call_c_and_return_to_caller(Register function_descriptor);
+  address call_c(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt);
+  address call_c_using_toc(const FunctionDescriptor* function_descriptor, relocInfo::relocType rt,
+                           Register toc);
+
+ protected:
+
+  // It is imperative that all calls into the VM are handled via the
+  // call_VM macros. They make sure that the stack linkage is setup
+  // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points
+  // while call_VM_leaf's correspond to LEAF entry points.
+  //
+  // This is the base routine called by the different versions of
+  // call_VM. The interpreter may customize this version by overriding
+  // it for its purposes (e.g., to save/restore additional registers
+  // when doing a VM call).
+  //
+  // If no last_java_sp is specified (noreg) then SP will be used instead.
+  virtual void call_VM_base(
+     // where an oop-result ends up if any; use noreg otherwise
+    Register        oop_result,
+    // to set up last_Java_frame in stubs; use noreg otherwise
+    Register        last_java_sp,
+    // the entry point
+    address         entry_point,
+    // flag which indicates if exception should be checked
+    bool            check_exception = true
+  );
+
+  // Support for VM calls. This is the base routine called by the
+  // different versions of call_VM_leaf. The interpreter may customize
+  // this version by overriding it for its purposes (e.g., to
+  // save/restore additional registers when doing a VM call).
+  void call_VM_leaf_base(address entry_point);
+
+ public:
+  // Call into the VM.
+  // Passes the thread pointer (in R3_ARG1) as a prepended argument.
+  // Makes sure oop return values are visible to the GC.
+  void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
+  void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
+  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+  void call_VM_leaf(address entry_point);
+  void call_VM_leaf(address entry_point, Register arg_1);
+  void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
+  void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
+
+  // Call a stub function via a function descriptor, but don't save
+  // TOC before call, don't setup TOC and ENV for call, and don't
+  // restore TOC after call. Updates and returns _last_calls_return_pc.
+  inline address call_stub(Register function_entry);
+  inline void call_stub_and_return_to(Register function_entry, Register return_pc);
+
+  //
+  // Java utilities
+  //
+
+  // Read from the polling page, its address is already in a register.
+  inline void load_from_polling_page(Register polling_page_address, int offset = 0);
+  // Check whether instruction is a read access to the polling page
+  // which was emitted by load_from_polling_page(..).
+  static bool is_load_from_polling_page(int instruction, void* ucontext/*may be NULL*/,
+                                        address* polling_address_ptr = NULL);
+
+  // Check whether instruction is a write access to the memory
+  // serialization page realized by one of the instructions stw, stwu,
+  // stwx, or stwux.
+  static bool is_memory_serialization(int instruction, JavaThread* thread, void* ucontext);
+
+  // Support for NULL-checks
+  //
+  // Generates code that causes a NULL OS exception if the content of reg is NULL.
+  // If the accessed location is M[reg + offset] and the offset is known, provide the
+  // offset. No explicit code generation is needed if the offset is within a certain
+  // range (0 <= offset <= page_size).
+
+  // Stack overflow checking
+  void bang_stack_with_offset(int offset);
+
+  // If instruction is a stack bang of the form ld, stdu, or
+  // stdux, return the banged address. Otherwise, return 0.
+  static address get_stack_bang_address(int instruction, void* ucontext);
+
+  // Atomics
+  // CmpxchgX sets condition register to cmpX(current, compare).
+  // (flag == ne) => (dest_current_value != compare_value), (!swapped)
+  // (flag == eq) => (dest_current_value == compare_value), ( swapped)
+  static inline bool cmpxchgx_hint_acquire_lock()  { return true; }
+  // The stxcx will probably not be succeeded by a releasing store.
+  static inline bool cmpxchgx_hint_release_lock()  { return false; }
+  static inline bool cmpxchgx_hint_atomic_update() { return false; }
+
+  // Cmpxchg semantics
+  enum {
+    MemBarNone = 0,
+    MemBarRel  = 1,
+    MemBarAcq  = 2,
+    MemBarFenceAfter = 4 // use powers of 2
+  };
+  void cmpxchgw(ConditionRegister flag,
+                Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
+                int semantics, bool cmpxchgx_hint = false,
+                Register int_flag_success = noreg, bool contention_hint = false);
+  void cmpxchgd(ConditionRegister flag,
+                Register dest_current_value, Register compare_value, Register exchange_value, Register addr_base,
+                int semantics, bool cmpxchgx_hint = false,
+                Register int_flag_success = noreg, Label* failed = NULL, bool contention_hint = false);
+
+  // interface method calling
+  void lookup_interface_method(Register recv_klass,
+                               Register intf_klass,
+                               RegisterOrConstant itable_index,
+                               Register method_result,
+                               Register temp_reg, Register temp2_reg,
+                               Label& no_such_interface);
+
+  // virtual method calling
+  void lookup_virtual_method(Register recv_klass,
+                             RegisterOrConstant vtable_index,
+                             Register method_result);
+
+  // Test sub_klass against super_klass, with fast and slow paths.
+
+  // The fast path produces a tri-state answer: yes / no / maybe-slow.
+  // One of the three labels can be NULL, meaning take the fall-through.
+  // If super_check_offset is -1, the value is loaded up from super_klass.
+  // No registers are killed, except temp_reg and temp2_reg.
+  // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
+  void check_klass_subtype_fast_path(Register sub_klass,
+                                     Register super_klass,
+                                     Register temp1_reg,
+                                     Register temp2_reg,
+                                     Label& L_success,
+                                     Label& L_failure);
+
+  // The rest of the type check; must be wired to a corresponding fast path.
+  // It does not repeat the fast path logic, so don't use it standalone.
+  // The temp_reg can be noreg, if no temps are available.
+  // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
+  // Updates the sub's secondary super cache as necessary.
+  void check_klass_subtype_slow_path(Register sub_klass,
+                                     Register super_klass,
+                                     Register temp1_reg,
+                                     Register temp2_reg,
+                                     Label* L_success = NULL,
+                                     Register result_reg = noreg);
+
+  // Simplified, combined version, good for typical uses.
+  // Falls through on failure.
+  void check_klass_subtype(Register sub_klass,
+                           Register super_klass,
+                           Register temp1_reg,
+                           Register temp2_reg,
+                           Label& L_success);
+
+  // Method handle support (JSR 292).
+  void check_method_handle_type(Register mtype_reg, Register mh_reg, Register temp_reg, Label& wrong_method_type);
+
+  RegisterOrConstant argument_offset(RegisterOrConstant arg_slot, Register temp_reg, int extra_slot_offset = 0);
+
+  // Biased locking support
+  // Upon entry,obj_reg must contain the target object, and mark_reg
+  // must contain the target object's header.
+  // Destroys mark_reg if an attempt is made to bias an anonymously
+  // biased lock. In this case a failure will go either to the slow
+  // case or fall through with the notEqual condition code set with
+  // the expectation that the slow case in the runtime will be called.
+  // In the fall-through case where the CAS-based lock is done,
+  // mark_reg is not destroyed.
+  void biased_locking_enter(ConditionRegister cr_reg, Register obj_reg, Register mark_reg, Register temp_reg,
+                            Register temp2_reg, Label& done, Label* slow_case = NULL);
+  // Upon entry, the base register of mark_addr must contain the oop.
+  // Destroys temp_reg.
+  // If allow_delay_slot_filling is set to true, the next instruction
+  // emitted after this one will go in an annulled delay slot if the
+  // biased locking exit case failed.
+  void biased_locking_exit(ConditionRegister cr_reg, Register mark_addr, Register temp_reg, Label& done);
+
+  void compiler_fast_lock_object(  ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
+  void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box, Register tmp1, Register tmp2, Register tmp3);
+
+  // Support for serializing memory accesses between threads
+  void serialize_memory(Register thread, Register tmp1, Register tmp2);
+
+  // GC barrier support.
+  void card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp);
+  void card_table_write(jbyte* byte_map_base, Register Rtmp, Register Robj);
+
+#ifndef SERIALGC
+  // General G1 pre-barrier generator.
+  void g1_write_barrier_pre(Register Robj, RegisterOrConstant offset, Register Rpre_val,
+                            Register Rtmp1, Register Rtmp2, bool needs_frame = false);
+  // General G1 post-barrier generator
+  void g1_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp1,
+                             Register Rtmp2, Register Rtmp3, Label *filtered_ext = NULL);
+#endif // SERIALGC
+
+  // Support for managing the JavaThread pointer (i.e.; the reference to
+  // thread-local information).
+
+  // Support for last Java frame (but use call_VM instead where possible):
+  // access R16_thread->last_Java_sp.
+  void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
+  void reset_last_Java_frame(void);
+  void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
+
+  // Read vm result from thread: oop_result = R16_thread->result;
+  void get_vm_result  (Register oop_result);
+  void get_vm_result_2(Register metadata_result);
+
+  static bool needs_explicit_null_check(intptr_t offset);
+
+  // Trap-instruction-based checks.
+  // Range checks can be distinguished from zero checks as they check 32 bit,
+  // zero checks all 64 bits (tw, td).
+  inline void trap_null_check(Register a, trap_to_bits cmp = traptoEqual);
+  static bool is_trap_null_check(int x) {
+    return is_tdi(x, traptoEqual,               -1/*any reg*/, 0) ||
+           is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
+  }
+
+  inline void trap_zombie_not_entrant();
+  static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
+
+  inline void trap_should_not_reach_here();
+  static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
+
+  inline void trap_ic_miss_check(Register a, Register b);
+  static bool is_trap_ic_miss_check(int x) {
+    return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
+  }
+
+  // Implicit or explicit null check, jumps to static address exception_entry.
+  inline void null_check_throw(Register a, int offset, Register temp_reg, address exception_entry);
+
+  // Check accessed object for null. Use SIGTRAP-based null checks on AIX.
+  inline void load_with_trap_null_check(Register d, int si16, Register s1);
+
+  // Load heap oop and decompress. Loaded oop may not be null.
+  inline void load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1 = noreg);
+
+  // Null allowed.
+  inline void load_heap_oop(Register d, RegisterOrConstant offs, Register s1 = noreg);
+
+  // Encode/decode heap oop. Oop may not be null, else en/decoding goes wrong.
+  inline void encode_heap_oop_not_null(Register d);
+  inline void decode_heap_oop_not_null(Register d);
+
+  // Null allowed.
+  inline void decode_heap_oop(Register d);
+
+  // Load/Store klass oop from klass field. Compress.
+  void load_klass(Register dst, Register src);
+  void load_klass_with_trap_null_check(Register dst, Register src);
+  void store_klass(Register dst_oop, Register klass, Register tmp = R0);
+  static int instr_size_for_decode_klass_not_null();
+  void decode_klass_not_null(Register dst, Register src = noreg);
+  void encode_klass_not_null(Register dst, Register src = noreg);
+
+  // Load common heap base into register.
+  void reinit_heapbase(Register d, Register tmp = noreg);
+
+  // SIGTRAP-based range checks for arrays.
+  inline void trap_range_check_l(Register a, Register b);
+  inline void trap_range_check_l(Register a, int si16);
+  static bool is_trap_range_check_l(int x) {
+    return (is_tw (x, traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
+            is_twi(x, traptoLessThanUnsigned, -1/*any reg*/)                  );
+  }
+  inline void trap_range_check_le(Register a, int si16);
+  static bool is_trap_range_check_le(int x) {
+    return is_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
+  }
+  inline void trap_range_check_g(Register a, int si16);
+  static bool is_trap_range_check_g(int x) {
+    return is_twi(x, traptoGreaterThanUnsigned, -1/*any reg*/);
+  }
+  inline void trap_range_check_ge(Register a, Register b);
+  inline void trap_range_check_ge(Register a, int si16);
+  static bool is_trap_range_check_ge(int x) {
+    return (is_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
+            is_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/)                  );
+  }
+  static bool is_trap_range_check(int x) {
+    return is_trap_range_check_l(x) || is_trap_range_check_le(x) ||
+           is_trap_range_check_g(x) || is_trap_range_check_ge(x);
+  }
+
+  void clear_memory_doubleword(Register base_ptr, Register cnt_dwords, Register tmp = R0);
+
+  // Needle of length 1.
+  void string_indexof_1(Register result, Register haystack, Register haycnt,
+                        Register needle, jchar needleChar,
+                        Register tmp1, Register tmp2);
+  // General indexof, eventually with constant needle length.
+  void string_indexof(Register result, Register haystack, Register haycnt,
+                      Register needle, ciTypeArray* needle_values, Register needlecnt, int needlecntval,
+                      Register tmp1, Register tmp2, Register tmp3, Register tmp4);
+  void string_compare(Register str1_reg, Register str2_reg, Register cnt1_reg, Register cnt2_reg,
+                      Register result_reg, Register tmp_reg);
+  void char_arrays_equals(Register str1_reg, Register str2_reg, Register cnt_reg, Register result_reg,
+                          Register tmp1_reg, Register tmp2_reg, Register tmp3_reg, Register tmp4_reg,
+                          Register tmp5_reg);
+  void char_arrays_equalsImm(Register str1_reg, Register str2_reg, int cntval, Register result_reg,
+                             Register tmp1_reg, Register tmp2_reg);
+
+  //
+  // Debugging
+  //
+
+  // assert on cr0
+  void asm_assert(bool check_equal, const char* msg, int id);
+  void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
+  void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
+
+ private:
+  void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
+                            const char* msg, int id);
+
+ public:
+
+  void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) {
+    asm_assert_mems_zero(true,  8, mem_offset, mem_base, msg, id);
+  }
+  void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) {
+    asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id);
+  }
+
+  // Verify R16_thread contents.
+  void verify_thread();
+
+  // Emit code to verify that reg contains a valid oop if +VerifyOops is set.
+  void verify_oop(Register reg, const char* s = "broken oop");
+
+  // TODO: verify method and klass metadata (compare against vptr?)
+  void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
+  void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
+
+#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
+#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
+
+ private:
+
+  enum {
+    stop_stop                = 0,
+    stop_untested            = 1,
+    stop_unimplemented       = 2,
+    stop_shouldnotreachhere  = 3,
+    stop_end                 = 4
+  };
+  void stop(int type, const char* msg, int id);
+
+ public:
+  // Prints msg, dumps registers and stops execution.
+  void stop         (const char* msg = "", int id = 0) { stop(stop_stop,               msg, id); }
+  void untested     (const char* msg = "", int id = 0) { stop(stop_untested,           msg, id); }
+  void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented,      msg, id); }
+  void should_not_reach_here()                         { stop(stop_shouldnotreachhere,  "", -1); }
+
+  void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
+};
+
+#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/macroAssembler_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
+#define CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
+
+#include "asm/assembler.inline.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/codeBuffer.hpp"
+#include "code/codeCache.hpp"
+
+inline bool MacroAssembler::is_ld_largeoffset(address a) {
+  const int inst1 = *(int *)a;
+  const int inst2 = *(int *)(a+4);
+  return (is_ld(inst1)) ||
+         (is_addis(inst1) && is_ld(inst2) && inv_ra_field(inst2) == inv_rt_field(inst1));
+}
+
+inline int MacroAssembler::get_ld_largeoffset_offset(address a) {
+  assert(MacroAssembler::is_ld_largeoffset(a), "must be ld with large offset");
+
+  const int inst1 = *(int *)a;
+  if (is_ld(inst1)) {
+    return inv_d1_field(inst1);
+  } else {
+    const int inst2 = *(int *)(a+4);
+    return (inv_d1_field(inst1) << 16) + inv_d1_field(inst2);
+  }
+}
+
+inline void MacroAssembler::round_to(Register r, int modulus) {
+  assert(is_power_of_2_long((jlong)modulus), "must be power of 2");
+  addi(r, r, modulus-1);
+  clrrdi(r, r, log2_long((jlong)modulus));
+}
+
+// Move register if destination register and target register are different.
+inline void MacroAssembler::mr_if_needed(Register rd, Register rs) {
+  if (rs != rd) mr(rd, rs);
+}
+inline void MacroAssembler::fmr_if_needed(FloatRegister rd, FloatRegister rs) {
+  if (rs != rd) fmr(rd, rs);
+}
+inline void MacroAssembler::endgroup_if_needed(bool needed) {
+  if (needed) {
+    endgroup();
+  }
+}
+
+inline void MacroAssembler::membar(int bits) {
+  // TODO: use elemental_membar(bits) for Power 8 and disable optimization of acquire-release
+  // (Matcher::post_membar_release where we use PPC64_ONLY(xop == Op_MemBarRelease ||))
+  if (bits & StoreLoad) sync(); else lwsync();
+}
+inline void MacroAssembler::release() { membar(LoadStore | StoreStore); }
+inline void MacroAssembler::acquire() { membar(LoadLoad | LoadStore); }
+inline void MacroAssembler::fence()   { membar(LoadLoad | LoadStore | StoreLoad | StoreStore); }
+
+// Address of the global TOC.
+inline address MacroAssembler::global_toc() {
+  return CodeCache::low_bound();
+}
+
+// Offset of given address to the global TOC.
+inline int MacroAssembler::offset_to_global_toc(const address addr) {
+  intptr_t offset = (intptr_t)addr - (intptr_t)MacroAssembler::global_toc();
+  assert(Assembler::is_simm((long)offset, 31) && offset >= 0, "must be in range");
+  return (int)offset;
+}
+
+// Address of current method's TOC.
+inline address MacroAssembler::method_toc() {
+  return code()->consts()->start();
+}
+
+// Offset of given address to current method's TOC.
+inline int MacroAssembler::offset_to_method_toc(address addr) {
+  intptr_t offset = (intptr_t)addr - (intptr_t)method_toc();
+  assert(is_simm((long)offset, 31) && offset >= 0, "must be in range");
+  return (int)offset;
+}
+
+inline bool MacroAssembler::is_calculate_address_from_global_toc_at(address a, address bound) {
+  const address inst2_addr = a;
+  const int inst2 = *(int *) a;
+
+  // The relocation points to the second instruction, the addi.
+  if (!is_addi(inst2)) return false;
+
+  // The addi reads and writes the same register dst.
+  const int dst = inv_rt_field(inst2);
+  if (inv_ra_field(inst2) != dst) return false;
+
+  // Now, find the preceding addis which writes to dst.
+  int inst1 = 0;
+  address inst1_addr = inst2_addr - BytesPerInstWord;
+  while (inst1_addr >= bound) {
+    inst1 = *(int *) inst1_addr;
+    if (is_addis(inst1) && inv_rt_field(inst1) == dst) {
+      // stop, found the addis which writes dst
+      break;
+    }
+    inst1_addr -= BytesPerInstWord;
+  }
+
+  if (!(inst1 == 0 || inv_ra_field(inst1) == 29 /* R29 */)) return false;
+  return is_addis(inst1);
+}
+
+#ifdef _LP64
+// Detect narrow oop constants.
+inline bool MacroAssembler::is_set_narrow_oop(address a, address bound) {
+  const address inst2_addr = a;
+  const int inst2 = *(int *)a;
+  // The relocation points to the second instruction, the ori.
+  if (!is_ori(inst2)) return false;
+
+  // The ori reads and writes the same register dst.
+  const int dst = inv_rta_field(inst2);
+  if (inv_rs_field(inst2) != dst) return false;
+
+  // Now, find the preceding addis which writes to dst.
+  int inst1 = 0;
+  address inst1_addr = inst2_addr - BytesPerInstWord;
+  while (inst1_addr >= bound) {
+    inst1 = *(int *) inst1_addr;
+    if (is_lis(inst1) && inv_rs_field(inst1) == dst) return true;
+    inst1_addr -= BytesPerInstWord;
+  }
+  return false;
+}
+#endif
+
+
+inline bool MacroAssembler::is_load_const_at(address a) {
+  const int* p_inst = (int *) a;
+  bool b = is_lis(*p_inst++);
+  if (is_ori(*p_inst)) {
+    p_inst++;
+    b = b && is_rldicr(*p_inst++); // TODO: could be made more precise: `sldi'!
+    b = b && is_oris(*p_inst++);
+    b = b && is_ori(*p_inst);
+  } else if (is_lis(*p_inst)) {
+    p_inst++;
+    b = b && is_ori(*p_inst++);
+    b = b && is_ori(*p_inst);
+    // TODO: could enhance reliability by adding is_insrdi
+  } else return false;
+  return b;
+}
+
+inline void MacroAssembler::set_oop_constant(jobject obj, Register d) {
+  set_oop(constant_oop_address(obj), d);
+}
+
+inline void MacroAssembler::set_oop(AddressLiteral obj_addr, Register d) {
+  assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
+  load_const(d, obj_addr);
+}
+
+inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
+  jint& stub_inst = *(jint*) branch;
+  stub_inst = patched_branch(target - branch, stub_inst, 0);
+}
+
+// Relocation of conditional far branches.
+inline bool MacroAssembler::is_bc_far_variant1_at(address instruction_addr) {
+  // Variant 1, the 1st instruction contains the destination address:
+  //
+  //    bcxx  DEST
+  //    endgroup
+  //
+  const int instruction_1 = *(int*)(instruction_addr);
+  const int instruction_2 = *(int*)(instruction_addr + 4);
+  return is_bcxx(instruction_1) &&
+         (inv_bd_field(instruction_1, (intptr_t)instruction_addr) != (intptr_t)(instruction_addr + 2*4)) &&
+         is_endgroup(instruction_2);
+}
+
+// Relocation of conditional far branches.
+inline bool MacroAssembler::is_bc_far_variant2_at(address instruction_addr) {
+  // Variant 2, the 2nd instruction contains the destination address:
+  //
+  //    b!cxx SKIP
+  //    bxx   DEST
+  //  SKIP:
+  //
+  const int instruction_1 = *(int*)(instruction_addr);
+  const int instruction_2 = *(int*)(instruction_addr + 4);
+  return is_bcxx(instruction_1) &&
+         (inv_bd_field(instruction_1, (intptr_t)instruction_addr) == (intptr_t)(instruction_addr + 2*4)) &&
+         is_bxx(instruction_2);
+}
+
+// Relocation for conditional branches
+inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) {
+  // Variant 3, far cond branch to the next instruction, already patched to nops:
+  //
+  //    nop
+  //    endgroup
+  //  SKIP/DEST:
+  //
+  const int instruction_1 = *(int*)(instruction_addr);
+  const int instruction_2 = *(int*)(instruction_addr + 4);
+  return is_nop(instruction_1) &&
+         is_endgroup(instruction_2);
+}
+
+
+// Convenience bc_far versions
+inline void MacroAssembler::blt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, less), L, optimize); }
+inline void MacroAssembler::bgt_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, greater), L, optimize); }
+inline void MacroAssembler::beq_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, equal), L, optimize); }
+inline void MacroAssembler::bso_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs1, bi0(crx, summary_overflow), L, optimize); }
+inline void MacroAssembler::bge_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, less), L, optimize); }
+inline void MacroAssembler::ble_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, greater), L, optimize); }
+inline void MacroAssembler::bne_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, equal), L, optimize); }
+inline void MacroAssembler::bns_far(ConditionRegister crx, Label& L, int optimize) { MacroAssembler::bc_far(bcondCRbiIs0, bi0(crx, summary_overflow), L, optimize); }
+
+inline address MacroAssembler::call_stub(Register function_entry) {
+  mtctr(function_entry);
+  bctrl();
+  return pc();
+}
+
+inline void MacroAssembler::call_stub_and_return_to(Register function_entry, Register return_pc) {
+  assert_different_registers(function_entry, return_pc);
+  mtlr(return_pc);
+  mtctr(function_entry);
+  bctr();
+}
+
+// Get the pc where the last emitted call will return to.
+inline address MacroAssembler::last_calls_return_pc() {
+  return _last_calls_return_pc;
+}
+
+// Read from the polling page, its address is already in a register.
+inline void MacroAssembler::load_from_polling_page(Register polling_page_address, int offset) {
+  ld(R0, offset, polling_page_address);
+}
+
+// Trap-instruction-based checks.
+
+inline void MacroAssembler::trap_null_check(Register a, trap_to_bits cmp) {
+  assert(TrapBasedNullChecks, "sanity");
+  tdi(cmp, a/*reg a*/, 0);
+}
+inline void MacroAssembler::trap_zombie_not_entrant() {
+  tdi(traptoUnconditional, 0/*reg 0*/, 1);
+}
+inline void MacroAssembler::trap_should_not_reach_here() {
+  tdi_unchecked(traptoUnconditional, 0/*reg 0*/, 2);
+}
+
+inline void MacroAssembler::trap_ic_miss_check(Register a, Register b) {
+  td(traptoGreaterThanUnsigned | traptoLessThanUnsigned, a, b);
+}
+
+// Do an explicit null check if access to a+offset will not raise a SIGSEGV.
+// Either issue a trap instruction that raises SIGTRAP, or do a compare that
+// branches to exception_entry.
+// No support for compressed oops (base page of heap). Does not distinguish
+// loads and stores.
+inline void MacroAssembler::null_check_throw(Register a, int offset, Register temp_reg,
+                                             address exception_entry) {
+  if (!ImplicitNullChecks || needs_explicit_null_check(offset) || !os::zero_page_read_protected()) {
+    if (TrapBasedNullChecks) {
+      assert(UseSIGTRAP, "sanity");
+      trap_null_check(a);
+    } else {
+      Label ok;
+      cmpdi(CCR0, a, 0);
+      bne(CCR0, ok);
+      load_const_optimized(temp_reg, exception_entry);
+      mtctr(temp_reg);
+      bctr();
+      bind(ok);
+    }
+  }
+}
+
+inline void MacroAssembler::load_with_trap_null_check(Register d, int si16, Register s1) {
+  if (!os::zero_page_read_protected()) {
+    if (TrapBasedNullChecks) {
+      trap_null_check(s1);
+    }
+  }
+  ld(d, si16, s1);
+}
+
+inline void MacroAssembler::load_heap_oop_not_null(Register d, RegisterOrConstant offs, Register s1) {
+  if (UseCompressedOops) {
+    lwz(d, offs, s1);
+    // Attention: no null check here!
+    decode_heap_oop_not_null(d);
+  } else {
+    ld(d, offs, s1);
+  }
+}
+
+inline void MacroAssembler::load_heap_oop(Register d, RegisterOrConstant offs, Register s1) {
+  if (UseCompressedOops) {
+    lwz(d, offs, s1);
+    decode_heap_oop(d);
+  } else {
+    ld(d, offs, s1);
+  }
+}
+
+inline void MacroAssembler::encode_heap_oop_not_null(Register d) {
+  if (Universe::narrow_oop_base() != NULL) {
+    sub(d, d, R30);
+  }
+  if (Universe::narrow_oop_shift() != 0) {
+    srdi(d, d, LogMinObjAlignmentInBytes);
+  }
+}
+
+inline void MacroAssembler::decode_heap_oop_not_null(Register d) {
+  if (Universe::narrow_oop_shift() != 0) {
+    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+    sldi(d, d, LogMinObjAlignmentInBytes);
+  }
+  if (Universe::narrow_oop_base() != NULL) {
+    add(d, d, R30);
+  }
+}
+
+inline void MacroAssembler::decode_heap_oop(Register d) {
+  Label isNull;
+  if (Universe::narrow_oop_base() != NULL) {
+    cmpwi(CCR0, d, 0);
+    beq(CCR0, isNull);
+  }
+  if (Universe::narrow_oop_shift() != 0) {
+    assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
+    sldi(d, d, LogMinObjAlignmentInBytes);
+  }
+  if (Universe::narrow_oop_base() != NULL) {
+    add(d, d, R30);
+  }
+  bind(isNull);
+}
+
+// SIGTRAP-based range checks for arrays.
+inline void MacroAssembler::trap_range_check_l(Register a, Register b) {
+  tw (traptoLessThanUnsigned,                  a/*reg a*/, b/*reg b*/);
+}
+inline void MacroAssembler::trap_range_check_l(Register a, int si16) {
+  twi(traptoLessThanUnsigned,                  a/*reg a*/, si16);
+}
+inline void MacroAssembler::trap_range_check_le(Register a, int si16) {
+  twi(traptoEqual | traptoLessThanUnsigned,    a/*reg a*/, si16);
+}
+inline void MacroAssembler::trap_range_check_g(Register a, int si16) {
+  twi(traptoGreaterThanUnsigned,               a/*reg a*/, si16);
+}
+inline void MacroAssembler::trap_range_check_ge(Register a, Register b) {
+  tw (traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, b/*reg b*/);
+}
+inline void MacroAssembler::trap_range_check_ge(Register a, int si16) {
+  twi(traptoEqual | traptoGreaterThanUnsigned, a/*reg a*/, si16);
+}
+
+#endif // CPU_PPC_VM_MACROASSEMBLER_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/metaspaceShared_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "asm/codeBuffer.hpp"
+#include "memory/metaspaceShared.hpp"
+
+// Generate the self-patching vtable method:
+//
+// This method will be called (as any other Klass virtual method) with
+// the Klass itself as the first argument.  Example:
+//
+//   oop obj;
+//   int size = obj->klass()->klass_part()->oop_size(this);
+//
+// for which the virtual method call is Klass::oop_size();
+//
+// The dummy method is called with the Klass object as the first
+// operand, and an object as the second argument.
+//
+
+//=====================================================================
+
+// All of the dummy methods in the vtable are essentially identical,
+// differing only by an ordinal constant, and they bear no releationship
+// to the original method which the caller intended. Also, there needs
+// to be 'vtbl_list_size' instances of the vtable in order to
+// differentiate between the 'vtable_list_size' original Klass objects.
+
+void MetaspaceShared::generate_vtable_methods(void** vtbl_list,
+                                              void** vtable,
+                                              char** md_top,
+                                              char* md_end,
+                                              char** mc_top,
+                                              char* mc_end) {
+  Unimplemented();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/methodHandles_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "memory/allocation.inline.hpp"
+#include "prims/methodHandles.hpp"
+
+#define __ _masm->
+
+#ifdef CC_INTERP
+#define EXCEPTION_ENTRY StubRoutines::throw_NullPointerException_at_call_entry()
+#else
+#define EXCEPTION_ENTRY Interpreter::throw_NullPointerException_entry()
+#endif
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) // nothing
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
+inline static RegisterOrConstant constant(int value) {
+  return RegisterOrConstant(value);
+}
+
+void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) {
+  if (VerifyMethodHandles)
+    verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class), temp_reg, temp2_reg,
+                 "MH argument is a Class");
+  __ ld(klass_reg, java_lang_Class::klass_offset_in_bytes(), klass_reg);
+}
+
+#ifdef ASSERT
+static int check_nonzero(const char* xname, int x) {
+  assert(x != 0, err_msg("%s should be nonzero", xname));
+  return x;
+}
+#define NONZERO(x) check_nonzero(#x, x)
+#else //ASSERT
+#define NONZERO(x) (x)
+#endif //ASSERT
+
+#ifdef ASSERT
+void MethodHandles::verify_klass(MacroAssembler* _masm,
+                                 Register obj_reg, SystemDictionary::WKID klass_id,
+                                 Register temp_reg, Register temp2_reg,
+                                 const char* error_message) {
+  Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
+  KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
+  Label L_ok, L_bad;
+  BLOCK_COMMENT("verify_klass {");
+  __ verify_oop(obj_reg);
+  __ cmpdi(CCR0, obj_reg, 0);
+  __ beq(CCR0, L_bad);
+  __ load_klass(temp_reg, obj_reg);
+  __ load_const_optimized(temp2_reg, (address) klass_addr);
+  __ ld(temp2_reg, 0, temp2_reg);
+  __ cmpd(CCR0, temp_reg, temp2_reg);
+  __ beq(CCR0, L_ok);
+  __ ld(temp_reg, klass->super_check_offset(), temp_reg);
+  __ cmpd(CCR0, temp_reg, temp2_reg);
+  __ beq(CCR0, L_ok);
+  __ BIND(L_bad);
+  __ stop(error_message);
+  __ BIND(L_ok);
+  BLOCK_COMMENT("} verify_klass");
+}
+
+void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
+  Label L;
+  BLOCK_COMMENT("verify_ref_kind {");
+  __ load_sized_value(temp, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes()), member_reg,
+                      sizeof(u4), /*is_signed*/ false);
+  // assert(sizeof(u4) == sizeof(java.lang.invoke.MemberName.flags), "");
+  __ srwi( temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
+  __ andi(temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
+  __ cmpwi(CCR1, temp, ref_kind);
+  __ beq(CCR1, L);
+  { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
+    jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
+    if (ref_kind == JVM_REF_invokeVirtual ||
+        ref_kind == JVM_REF_invokeSpecial)
+      // could do this for all ref_kinds, but would explode assembly code size
+      trace_method_handle(_masm, buf);
+    __ stop(buf);
+  }
+  BLOCK_COMMENT("} verify_ref_kind");
+  __ BIND(L);
+}
+
+#endif // ASSERT
+
+void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
+                                            bool for_compiler_entry) {
+  assert(method == R19_method, "interpreter calling convention");
+  assert_different_registers(method, target, temp);
+
+  if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
+    Label run_compiled_code;
+    // JVMTI events, such as single-stepping, are implemented partly by avoiding running
+    // compiled code in threads for which the event is enabled.  Check here for
+    // interp_only_mode if these events CAN be enabled.
+    __ verify_thread();
+    __ lwz(temp, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
+    __ cmplwi(CCR0, temp, 0);
+    __ beq(CCR0, run_compiled_code);
+    __ ld(target, in_bytes(Method::interpreter_entry_offset()), R19_method);
+    __ mtctr(target);
+    __ bctr();
+    __ BIND(run_compiled_code);
+  }
+
+  const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
+                                                     Method::from_interpreted_offset();
+  __ ld(target, in_bytes(entry_offset), R19_method);
+  __ mtctr(target);
+  __ bctr();
+}
+
+
+void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
+                                        Register recv, Register method_temp,
+                                        Register temp2, Register temp3,
+                                        bool for_compiler_entry) {
+  BLOCK_COMMENT("jump_to_lambda_form {");
+  // This is the initial entry point of a lazy method handle.
+  // After type checking, it picks up the invoker from the LambdaForm.
+  assert_different_registers(recv, method_temp, temp2);  // temp3 is only passed on
+  assert(method_temp == R19_method, "required register for loading method");
+
+  // Load the invoker, as MH -> MH.form -> LF.vmentry
+  __ verify_oop(recv);
+  __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv);
+  __ verify_oop(method_temp);
+  __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp);
+  __ verify_oop(method_temp);
+  // the following assumes that a Method* is normally compressed in the vmtarget field:
+  __ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp);
+
+  if (VerifyMethodHandles && !for_compiler_entry) {
+    // make sure recv is already on stack
+    __ ld(temp2, in_bytes(Method::const_offset()), method_temp);
+    __ load_sized_value(temp2, in_bytes(ConstMethod::size_of_parameters_offset()), temp2,
+                        sizeof(u2), /*is_signed*/ false);
+    // assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), "");
+    Label L;
+    __ ld(temp2, __ argument_offset(temp2, temp2, 0), CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
+    __ cmpd(CCR1, temp2, recv);
+    __ beq(CCR1, L);
+    __ stop("receiver not on stack");
+    __ BIND(L);
+  }
+
+  jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry);
+  BLOCK_COMMENT("} jump_to_lambda_form");
+}
+
+
+
+// Code generation
+address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
+                                                                vmIntrinsics::ID iid) {
+  const bool not_for_compiler_entry = false;  // this is the interpreter entry
+  assert(is_signature_polymorphic(iid), "expected invoke iid");
+  if (iid == vmIntrinsics::_invokeGeneric ||
+      iid == vmIntrinsics::_compiledLambdaForm) {
+    // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
+    // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
+    // They all allow an appendix argument.
+    __ stop("Should not reach here");           // empty stubs make SG sick
+    return NULL;
+  }
+
+  Register argbase    = CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp); // parameter (preserved)
+  Register argslot    = R3;
+  Register temp1      = R6;
+  Register param_size = R7;
+
+  // here's where control starts out:
+  __ align(CodeEntryAlignment);
+  address entry_point = __ pc();
+
+  if (VerifyMethodHandles) {
+    Label L;
+    BLOCK_COMMENT("verify_intrinsic_id {");
+    __ load_sized_value(temp1, Method::intrinsic_id_offset_in_bytes(), R19_method,
+                        sizeof(u1), /*is_signed*/ false);
+    // assert(sizeof(u1) == sizeof(Method::_intrinsic_id), "");
+    __ cmpwi(CCR1, temp1, (int) iid);
+    __ beq(CCR1, L);
+    if (iid == vmIntrinsics::_linkToVirtual ||
+        iid == vmIntrinsics::_linkToSpecial) {
+      // could do this for all kinds, but would explode assembly code size
+      trace_method_handle(_masm, "bad Method*:intrinsic_id");
+    }
+    __ stop("bad Method*::intrinsic_id");
+    __ BIND(L);
+    BLOCK_COMMENT("} verify_intrinsic_id");
+  }
+
+  // First task:  Find out how big the argument list is.
+  int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
+  assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
+  if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
+    __ ld(param_size, in_bytes(Method::const_offset()), R19_method);
+    __ load_sized_value(param_size, in_bytes(ConstMethod::size_of_parameters_offset()), param_size,
+                        sizeof(u2), /*is_signed*/ false);
+    // assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), "");
+  } else {
+    DEBUG_ONLY(param_size = noreg);
+  }
+
+  Register tmp_mh = noreg;
+  if (!is_signature_polymorphic_static(iid)) {
+    __ ld(tmp_mh = temp1, __ argument_offset(param_size, param_size, 0), argbase);
+    DEBUG_ONLY(param_size = noreg);
+  }
+
+  if (TraceMethodHandles) {
+    if (tmp_mh != noreg)
+      __ mr(R23_method_handle, tmp_mh);  // make stub happy
+    trace_method_handle_interpreter_entry(_masm, iid);
+  }
+
+  if (iid == vmIntrinsics::_invokeBasic) {
+    generate_method_handle_dispatch(_masm, iid, tmp_mh, noreg, not_for_compiler_entry);
+
+  } else {
+    // Adjust argument list by popping the trailing MemberName argument.
+    Register tmp_recv = noreg;
+    if (MethodHandles::ref_kind_has_receiver(ref_kind)) {
+      // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack.
+      __ ld(tmp_recv = temp1, __ argument_offset(param_size, param_size, 0), argbase);
+      DEBUG_ONLY(param_size = noreg);
+    }
+    Register R19_member = R19_method;  // MemberName ptr; incoming method ptr is dead now
+    __ ld(R19_member, RegisterOrConstant((intptr_t)8), argbase);
+    __ add(argbase, Interpreter::stackElementSize, argbase);
+    generate_method_handle_dispatch(_masm, iid, tmp_recv, R19_member, not_for_compiler_entry);
+  }
+
+  return entry_point;
+}
+
+void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
+                                                    vmIntrinsics::ID iid,
+                                                    Register receiver_reg,
+                                                    Register member_reg,
+                                                    bool for_compiler_entry) {
+  assert(is_signature_polymorphic(iid), "expected invoke iid");
+  Register temp1 = (for_compiler_entry ? R25_tmp5 : R7);
+  Register temp2 = (for_compiler_entry ? R22_tmp2 : R8);
+  Register temp3 = (for_compiler_entry ? R23_tmp3 : R9);
+  Register temp4 = (for_compiler_entry ? R24_tmp4 : R10);
+  if (receiver_reg != noreg)  assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg);
+  if (member_reg   != noreg)  assert_different_registers(temp1, temp2, temp3, temp4, member_reg);
+
+  if (iid == vmIntrinsics::_invokeBasic) {
+    // indirect through MH.form.vmentry.vmtarget
+    jump_to_lambda_form(_masm, receiver_reg, R19_method, temp1, temp2, for_compiler_entry);
+  } else {
+    // The method is a member invoker used by direct method handles.
+    if (VerifyMethodHandles) {
+      // make sure the trailing argument really is a MemberName (caller responsibility)
+      verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(MemberName_klass),
+                   temp1, temp2,
+                   "MemberName required for invokeVirtual etc.");
+    }
+
+    Register temp1_recv_klass = temp1;
+    if (iid != vmIntrinsics::_linkToStatic) {
+      __ verify_oop(receiver_reg);
+      if (iid == vmIntrinsics::_linkToSpecial) {
+        // Don't actually load the klass; just null-check the receiver.
+        __ null_check_throw(receiver_reg, -1, temp1, EXCEPTION_ENTRY);
+      } else {
+        // load receiver klass itself
+        __ null_check_throw(receiver_reg, oopDesc::klass_offset_in_bytes(), temp1, EXCEPTION_ENTRY);
+        __ load_klass(temp1_recv_klass, receiver_reg);
+        __ verify_klass_ptr(temp1_recv_klass);
+      }
+      BLOCK_COMMENT("check_receiver {");
+      // The receiver for the MemberName must be in receiver_reg.
+      // Check the receiver against the MemberName.clazz
+      if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
+        // Did not load it above...
+        __ load_klass(temp1_recv_klass, receiver_reg);
+        __ verify_klass_ptr(temp1_recv_klass);
+      }
+      if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
+        Label L_ok;
+        Register temp2_defc = temp2;
+        __ load_heap_oop_not_null(temp2_defc, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg);
+        load_klass_from_Class(_masm, temp2_defc, temp3, temp4);
+        __ verify_klass_ptr(temp2_defc);
+        __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok);
+        // If we get here, the type check failed!
+        __ stop("receiver class disagrees with MemberName.clazz");
+        __ BIND(L_ok);
+      }
+      BLOCK_COMMENT("} check_receiver");
+    }
+    if (iid == vmIntrinsics::_linkToSpecial ||
+        iid == vmIntrinsics::_linkToStatic) {
+      DEBUG_ONLY(temp1_recv_klass = noreg);  // these guys didn't load the recv_klass
+    }
+
+    // Live registers at this point:
+    //  member_reg - MemberName that was the trailing argument
+    //  temp1_recv_klass - klass of stacked receiver, if needed
+    //  O5_savedSP - interpreter linkage (if interpreted)
+    //  O0..O5 - compiler arguments (if compiled)
+
+    Label L_incompatible_class_change_error;
+    switch (iid) {
+    case vmIntrinsics::_linkToSpecial:
+      if (VerifyMethodHandles) {
+        verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2);
+      }
+      __ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
+      break;
+
+    case vmIntrinsics::_linkToStatic:
+      if (VerifyMethodHandles) {
+        verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2);
+      }
+      __ ld(R19_method, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), member_reg);
+      break;
+
+    case vmIntrinsics::_linkToVirtual:
+    {
+      // same as TemplateTable::invokevirtual,
+      // minus the CP setup and profiling:
+
+      if (VerifyMethodHandles) {
+        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp2);
+      }
+
+      // pick out the vtable index from the MemberName, and then we can discard it:
+      Register temp2_index = temp2;
+      __ ld(temp2_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()), member_reg);
+
+      if (VerifyMethodHandles) {
+        Label L_index_ok;
+        __ cmpdi(CCR1, temp2_index, 0);
+        __ bge(CCR1, L_index_ok);
+        __ stop("no virtual index");
+        __ BIND(L_index_ok);
+      }
+
+      // Note:  The verifier invariants allow us to ignore MemberName.clazz and vmtarget
+      // at this point.  And VerifyMethodHandles has already checked clazz, if needed.
+
+      // get target Method* & entry point
+      __ lookup_virtual_method(temp1_recv_klass, temp2_index, R19_method);
+      break;
+    }
+
+    case vmIntrinsics::_linkToInterface:
+    {
+      // same as TemplateTable::invokeinterface
+      // (minus the CP setup and profiling, with different argument motion)
+      if (VerifyMethodHandles) {
+        verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp2);
+      }
+
+      Register temp2_intf = temp2;
+      __ load_heap_oop_not_null(temp2_intf, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()), member_reg);
+      load_klass_from_Class(_masm, temp2_intf, temp3, temp4);
+      __ verify_klass_ptr(temp2_intf);
+
+      Register vtable_index = R19_method;
+      __ ld(vtable_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()), member_reg);
+      if (VerifyMethodHandles) {
+        Label L_index_ok;
+        __ cmpdi(CCR1, vtable_index, 0);
+        __ bge(CCR1, L_index_ok);
+        __ stop("invalid vtable index for MH.invokeInterface");
+        __ BIND(L_index_ok);
+      }
+
+      // given intf, index, and recv klass, dispatch to the implementation method
+      __ lookup_interface_method(temp1_recv_klass, temp2_intf,
+                                 // note: next two args must be the same:
+                                 vtable_index, R19_method,
+                                 temp3, temp4,
+                                 L_incompatible_class_change_error);
+      break;
+    }
+
+    default:
+      fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
+      break;
+    }
+
+    // Live at this point:
+    //   R19_method
+    //   O5_savedSP (if interpreted)
+
+    // After figuring out which concrete method to call, jump into it.
+    // Note that this works in the interpreter with no data motion.
+    // But the compiled version will require that rcx_recv be shifted out.
+    __ verify_method_ptr(R19_method);
+    jump_from_method_handle(_masm, R19_method, temp1, temp2, for_compiler_entry);
+
+    if (iid == vmIntrinsics::_linkToInterface) {
+      __ BIND(L_incompatible_class_change_error);
+      __ load_const_optimized(temp1, StubRoutines::throw_IncompatibleClassChangeError_entry());
+      __ mtctr(temp1);
+      __ bctr();
+    }
+  }
+}
+
+#ifndef PRODUCT
+void trace_method_handle_stub(const char* adaptername,
+                              oopDesc* mh,
+                              intptr_t* entry_sp,
+                              intptr_t* saved_regs) {
+
+  bool has_mh = (strstr(adaptername, "/static") == NULL &&
+                 strstr(adaptername, "linkTo") == NULL);    // static linkers don't have MH
+  const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23";
+  tty->print_cr("MH %s %s="INTPTR_FORMAT " sp=" INTPTR_FORMAT,
+                adaptername, mh_reg_name, (intptr_t) mh, entry_sp);
+
+  if (Verbose) {
+    tty->print_cr("Registers:");
+    const int abi_offset = frame::abi_112_size / 8;
+    for (int i = R3->encoding(); i <= R12->encoding(); i++) {
+      Register r = as_Register(i);
+      int count = i - R3->encoding();
+      // The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_112_size)).
+      tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[abi_offset + count]);
+      if ((count + 1) % 4 == 0) {
+        tty->cr();
+      } else {
+        tty->print(", ");
+      }
+    }
+    tty->cr();
+
+    {
+      // dumping last frame with frame::describe
+
+      JavaThread* p = JavaThread::active();
+
+      ResourceMark rm;
+      PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
+      FrameValues values;
+
+      // Note: We want to allow trace_method_handle from any call site.
+      // While trace_method_handle creates a frame, it may be entered
+      // without a PC on the stack top (e.g. not just after a call).
+      // Walking that frame could lead to failures due to that invalid PC.
+      // => carefully detect that frame when doing the stack walking
+
+      // Current C frame
+      frame cur_frame = os::current_frame();
+
+      // Robust search of trace_calling_frame (independant of inlining).
+      // Assumes saved_regs comes from a pusha in the trace_calling_frame.
+      assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
+      frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
+      while (trace_calling_frame.fp() < saved_regs) {
+        trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
+      }
+
+      // Safely create a frame and call frame::describe.
+      intptr_t *dump_sp = trace_calling_frame.sender_sp();
+
+      frame dump_frame = frame(dump_sp);
+      dump_frame.describe(values, 1);
+
+      values.describe(-1, saved_regs, "raw top of stack");
+
+      tty->print_cr("Stack layout:");
+      values.print(p);
+    }
+
+    if (has_mh && mh->is_oop()) {
+      mh->print();
+      if (java_lang_invoke_MethodHandle::is_instance(mh)) {
+        if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
+          java_lang_invoke_MethodHandle::form(mh)->print();
+      }
+    }
+  }
+}
+
+void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
+  if (!TraceMethodHandles) return;
+
+  BLOCK_COMMENT("trace_method_handle {");
+
+  int nbytes_save = 10 * 8;             // 10 volatile gprs
+  __ save_LR_CR(R0);
+  __ mr(R0, R1_SP);                     // saved_sp
+  assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0");
+  // push_frame_abi112 only uses R0 if nbytes_save is wider than 16 bit
+  __ push_frame_abi112(nbytes_save, R0);
+  __ save_volatile_gprs(R1_SP, frame::abi_112_size); // Except R0.
+
+  __ load_const(R3_ARG1, (address)adaptername);
+  __ mr(R4_ARG2, R23_method_handle);
+  __ mr(R5_ARG3, R0);        // saved_sp
+  __ mr(R6_ARG4, R1_SP);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub));
+
+  __ restore_volatile_gprs(R1_SP, 112); // Except R0.
+  __ pop_frame();
+  __ restore_LR_CR(R0);
+
+  BLOCK_COMMENT("} trace_method_handle");
+}
+#endif // PRODUCT
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/methodHandles_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Platform-specific definitions for method handles.
+// These definitions are inlined into class MethodHandles.
+
+// Adapters
+//static unsigned int adapter_code_size() {
+//  return 32*K DEBUG_ONLY(+ 16*K) + (TraceMethodHandles ? 16*K : 0) + (VerifyMethodHandles ? 32*K : 0);
+//}
+enum /* platform_dependent_constants */ {
+  adapter_code_size = NOT_LP64(16000 DEBUG_ONLY(+ 25000)) LP64_ONLY(32000 DEBUG_ONLY(+ 150000))
+};
+
+// Additional helper methods for MethodHandles code generation:
+public:
+  static void load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg);
+
+  static void verify_klass(MacroAssembler* _masm,
+                           Register obj_reg, SystemDictionary::WKID klass_id,
+                           Register temp_reg, Register temp2_reg,
+                           const char* error_message = "wrong klass") NOT_DEBUG_RETURN;
+
+  static void verify_method_handle(MacroAssembler* _masm, Register mh_reg,
+                                   Register temp_reg, Register temp2_reg) {
+    Unimplemented();
+  }
+
+  static void verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) NOT_DEBUG_RETURN;
+
+  // Similar to InterpreterMacroAssembler::jump_from_interpreted.
+  // Takes care of special dispatch from single stepping too.
+  static void jump_from_method_handle(MacroAssembler* _masm, Register method,
+                                      Register temp, Register temp2,
+                                      bool for_compiler_entry);
+
+  static void jump_to_lambda_form(MacroAssembler* _masm,
+                                  Register recv, Register method_temp,
+                                  Register temp2, Register temp3,
+                                  bool for_compiler_entry);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/nativeInst_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,391 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "nativeInst_ppc.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/handles.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "utilities/ostream.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif
+
+// We use an illtrap for marking a method as not_entrant or zombie iff !UseSIGTRAP
+// Work around a C++ compiler bug which changes 'this'
+bool NativeInstruction::is_sigill_zombie_not_entrant_at(address addr) {
+  assert(!UseSIGTRAP, "precondition");
+  if (*(int*)addr != 0 /*illtrap*/) return false;
+  CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
+  if (cb == NULL || !cb->is_nmethod()) return false;
+  nmethod *nm = (nmethod *)cb;
+  // This method is not_entrant or zombie iff the illtrap instruction is
+  // located at the verified entry point.
+  return nm->verified_entry_point() == addr;
+}
+
+#ifdef ASSERT
+void NativeInstruction::verify() {
+  // Make sure code pattern is actually an instruction address.
+  address addr = addr_at(0);
+  if (addr == 0 || ((intptr_t)addr & 3) != 0) {
+    fatal("not an instruction address");
+  }
+}
+#endif // ASSERT
+
+// Extract call destination from a NativeCall. The call might use a trampoline stub.
+address NativeCall::destination() const {
+  address addr = (address)this;
+  address destination = Assembler::bxx_destination(addr);
+
+  // Do we use a trampoline stub for this call?
+  CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // Else we get assertion if nmethod is zombie.
+  assert(cb && cb->is_nmethod(), "sanity");
+  nmethod *nm = (nmethod *)cb;
+  if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
+    // Yes we do, so get the destination from the trampoline stub.
+    const address trampoline_stub_addr = destination;
+    destination = NativeCallTrampolineStub_at(trampoline_stub_addr)->destination(nm);
+  }
+
+  return destination;
+}
+
+// Similar to replace_mt_safe, but just changes the destination. The
+// important thing is that free-running threads are able to execute this
+// call instruction at all times. Thus, the displacement field must be
+// instruction-word-aligned.
+//
+// Used in the runtime linkage of calls; see class CompiledIC.
+//
+// Add parameter assert_lock to switch off assertion
+// during code generation, where no patching lock is needed.
+void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
+  assert(!assert_lock ||
+         (Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()),
+         "concurrent code patching");
+
+  ResourceMark rm;
+  int code_size = 1 * BytesPerInstWord;
+  address addr_call = addr_at(0);
+  assert(MacroAssembler::is_bl(*(int*)addr_call), "unexpected code at call-site");
+
+  CodeBuffer cb(addr_call, code_size + 1);
+  MacroAssembler* a = new MacroAssembler(&cb);
+
+  // Patch the call.
+  if (ReoptimizeCallSequences &&
+      a->is_within_range_of_b(dest, addr_call)) {
+    a->bl(dest);
+  } else {
+    address trampoline_stub_addr = get_trampoline();
+
+    // We did not find a trampoline stub because the current codeblob
+    // does not provide this information. The branch will be patched
+    // later during a final fixup, when all necessary information is
+    // available.
+    if (trampoline_stub_addr == 0)
+      return;
+
+    // Patch the constant in the call's trampoline stub.
+    NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
+
+    a->bl(trampoline_stub_addr);
+  }
+  ICache::ppc64_flush_icache_bytes(addr_call, code_size);
+}
+
+address NativeCall::get_trampoline() {
+  address call_addr = addr_at(0);
+
+  CodeBlob *code = CodeCache::find_blob(call_addr);
+  assert(code != NULL, "Could not find the containing code blob");
+
+  // There are no relocations available when the code gets relocated
+  // because of CodeBuffer expansion.
+  if (code->relocation_size() == 0)
+    return NULL;
+
+  address bl_destination = Assembler::bxx_destination(call_addr);
+  if (code->content_contains(bl_destination) &&
+      is_NativeCallTrampolineStub_at(bl_destination))
+    return bl_destination;
+
+  // If the codeBlob is not a nmethod, this is because we get here from the
+  // CodeBlob constructor, which is called within the nmethod constructor.
+  return trampoline_stub_Relocation::get_trampoline_for(call_addr, (nmethod*)code);
+}
+
+#ifdef ASSERT
+void NativeCall::verify() {
+  address addr = addr_at(0);
+
+  if (!NativeCall::is_call_at(addr)) {
+    tty->print_cr("not a NativeCall at " PTR_FORMAT, addr);
+    // TODO: PPC port: Disassembler::decode(addr - 20, addr + 20, tty);
+    fatal(err_msg("not a NativeCall at " PTR_FORMAT, addr));
+  }
+}
+#endif // ASSERT
+
+#ifdef ASSERT
+void NativeFarCall::verify() {
+  address addr = addr_at(0);
+
+  NativeInstruction::verify();
+  if (!NativeFarCall::is_far_call_at(addr)) {
+    tty->print_cr("not a NativeFarCall at " PTR_FORMAT, addr);
+    // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
+    fatal(err_msg("not a NativeFarCall at " PTR_FORMAT, addr));
+  }
+}
+#endif // ASSERT
+
+address NativeMovConstReg::next_instruction_address() const {
+#ifdef ASSERT
+  CodeBlob* nm = CodeCache::find_blob(instruction_address());
+  assert(!MacroAssembler::is_set_narrow_oop(addr_at(0), nm->content_begin()), "Should not patch narrow oop here");
+#endif
+
+  if (MacroAssembler::is_load_const_from_method_toc_at(addr_at(0))) {
+    return addr_at(load_const_from_method_toc_instruction_size);
+  } else {
+    return addr_at(load_const_instruction_size);
+  }
+}
+
+intptr_t NativeMovConstReg::data() const {
+  address   addr = addr_at(0);
+
+  if (MacroAssembler::is_load_const_at(addr)) {
+    return MacroAssembler::get_const(addr);
+  }
+
+  CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
+  if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) {
+    narrowOop no = (narrowOop)MacroAssembler::get_narrow_oop(addr, cb->content_begin());
+    return cast_from_oop<intptr_t>(oopDesc::decode_heap_oop(no));
+  } else {
+    assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool");
+
+    address ctable = cb->content_begin();
+    int offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr);
+    return *(intptr_t *)(ctable + offset);
+  }
+}
+
+address NativeMovConstReg::set_data_plain(intptr_t data, CodeBlob *cb) {
+  address addr         = instruction_address();
+  address next_address = NULL;
+  if (!cb) cb = CodeCache::find_blob(addr);
+
+  if (cb != NULL && MacroAssembler::is_load_const_from_method_toc_at(addr)) {
+    // A load from the method's TOC (ctable).
+    assert(cb->is_nmethod(), "must be nmethod");
+    const address ctable = cb->content_begin();
+    const int toc_offset = MacroAssembler::get_offset_of_load_const_from_method_toc_at(addr);
+    *(intptr_t *)(ctable + toc_offset) = data;
+    next_address = addr + BytesPerInstWord;
+  } else if (cb != NULL &&
+             MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) {
+    // A calculation relative to the global TOC.
+    if (MacroAssembler::get_address_of_calculate_address_from_global_toc_at(addr, cb->content_begin()) !=
+        (address)data) {
+      const int invalidated_range =
+        MacroAssembler::patch_calculate_address_from_global_toc_at(addr, cb->content_begin(),
+                                                                   (address)data);
+      const address start = invalidated_range < 0 ? addr + invalidated_range : addr;
+      // FIXME:
+      const int range = invalidated_range < 0 ? 4 - invalidated_range : 8;
+      ICache::ppc64_flush_icache_bytes(start, range);
+    }
+    next_address = addr + 1 * BytesPerInstWord;
+  } else if (MacroAssembler::is_load_const_at(addr)) {
+    // A normal 5 instruction load_const code sequence.
+    if (MacroAssembler::get_const(addr) != (long)data) {
+      // This is not mt safe, ok in methods like CodeBuffer::copy_code().
+      MacroAssembler::patch_const(addr, (long)data);
+      ICache::ppc64_flush_icache_bytes(addr, load_const_instruction_size);
+    }
+    next_address = addr + 5 * BytesPerInstWord;
+  } else if (MacroAssembler::is_bl(* (int*) addr)) {
+    // A single branch-and-link instruction.
+    ResourceMark rm;
+    const int code_size = 1 * BytesPerInstWord;
+    CodeBuffer cb(addr, code_size + 1);
+    MacroAssembler* a = new MacroAssembler(&cb);
+    a->bl((address) data);
+    ICache::ppc64_flush_icache_bytes(addr, code_size);
+    next_address = addr + code_size;
+  } else {
+    ShouldNotReachHere();
+  }
+
+  return next_address;
+}
+
+void NativeMovConstReg::set_data(intptr_t data) {
+  // Store the value into the instruction stream.
+  CodeBlob *cb = CodeCache::find_blob(instruction_address());
+  address next_address = set_data_plain(data, cb);
+
+  // Also store the value into an oop_Relocation cell, if any.
+  if (cb && cb->is_nmethod()) {
+    RelocIterator iter((nmethod *) cb, instruction_address(), next_address);
+    oop* oop_addr = NULL;
+    Metadata** metadata_addr = NULL;
+    while (iter.next()) {
+      if (iter.type() == relocInfo::oop_type) {
+        oop_Relocation *r = iter.oop_reloc();
+        if (oop_addr == NULL) {
+          oop_addr = r->oop_addr();
+          *oop_addr = cast_to_oop(data);
+        } else {
+          assert(oop_addr == r->oop_addr(), "must be only one set-oop here") ;
+        }
+      }
+      if (iter.type() == relocInfo::metadata_type) {
+        metadata_Relocation *r = iter.metadata_reloc();
+        if (metadata_addr == NULL) {
+          metadata_addr = r->metadata_addr();
+          *metadata_addr = (Metadata*)data;
+        } else {
+          assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
+        }
+      }
+    }
+  }
+}
+
+void NativeMovConstReg::set_narrow_oop(narrowOop data, CodeBlob *code /* = NULL */) {
+  address   addr = addr_at(0);
+  CodeBlob* cb = (code) ? code : CodeCache::find_blob(instruction_address());
+  if (MacroAssembler::get_narrow_oop(addr, cb->content_begin()) == (long)data) return;
+  const int invalidated_range =
+    MacroAssembler::patch_set_narrow_oop(addr, cb->content_begin(), (long)data);
+  const address start = invalidated_range < 0 ? addr + invalidated_range : addr;
+  // FIXME:
+  const int range = invalidated_range < 0 ? 4 - invalidated_range : 8;
+  ICache::ppc64_flush_icache_bytes(start, range);
+}
+
+// Do not use an assertion here. Let clients decide whether they only
+// want this when assertions are enabled.
+#ifdef ASSERT
+void NativeMovConstReg::verify() {
+  address   addr = addr_at(0);
+  if (! MacroAssembler::is_load_const_at(addr) &&
+      ! MacroAssembler::is_load_const_from_method_toc_at(addr)) {
+    CodeBlob* cb = CodeCache::find_blob_unsafe(addr);   // find_nmethod() asserts if nmethod is zombie.
+    if (! (cb != NULL && MacroAssembler::is_calculate_address_from_global_toc_at(addr, cb->content_begin())) &&
+        ! (cb != NULL && MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) &&
+        ! MacroAssembler::is_bl(*((int*) addr))) {
+      tty->print_cr("not a NativeMovConstReg at " PTR_FORMAT, addr);
+      // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
+      fatal(err_msg("not a NativeMovConstReg at " PTR_FORMAT, addr));
+    }
+  }
+}
+#endif // ASSERT
+
+void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
+  ResourceMark rm;
+  int code_size = 1 * BytesPerInstWord;
+  CodeBuffer cb(verified_entry, code_size + 1);
+  MacroAssembler* a = new MacroAssembler(&cb);
+#ifdef COMPILER2
+  assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
+#endif
+  // Patch this nmethod atomically. Always use illtrap/trap in debug build.
+  if (DEBUG_ONLY(false &&) a->is_within_range_of_b(dest, a->pc())) {
+    a->b(dest);
+  } else {
+    // The signal handler will continue at dest=OptoRuntime::handle_wrong_method_stub().
+    if (TrapBasedNotEntrantChecks) {
+      // We use a special trap for marking a method as not_entrant or zombie.
+      a->trap_zombie_not_entrant();
+    } else {
+      // We use an illtrap for marking a method as not_entrant or zombie.
+      a->illtrap();
+    }
+  }
+  ICache::ppc64_flush_icache_bytes(verified_entry, code_size);
+}
+
+#ifdef ASSERT
+void NativeJump::verify() {
+  address addr = addr_at(0);
+
+  NativeInstruction::verify();
+  if (!NativeJump::is_jump_at(addr)) {
+    tty->print_cr("not a NativeJump at " PTR_FORMAT, addr);
+    // TODO: PPC port: Disassembler::decode(addr, 20, 20, tty);
+    fatal(err_msg("not a NativeJump at " PTR_FORMAT, addr));
+  }
+}
+#endif // ASSERT
+
+//-------------------------------------------------------------------
+
+// Call trampoline stubs.
+//
+// Layout and instructions of a call trampoline stub:
+//    0:  load the TOC (part 1)
+//    4:  load the TOC (part 2)
+//    8:  load the call target from the constant pool (part 1)
+//  [12:  load the call target from the constant pool (part 2, optional)]
+//   ..:  branch via CTR
+//
+
+address NativeCallTrampolineStub::encoded_destination_addr() const {
+  address instruction_addr = addr_at(2 * BytesPerInstWord);
+  assert(MacroAssembler::is_ld_largeoffset(instruction_addr),
+         "must be a ld with large offset (from the constant pool)");
+
+  return instruction_addr;
+}
+
+address NativeCallTrampolineStub::destination(nmethod *nm) const {
+  CodeBlob* cb = nm ? nm : CodeCache::find_blob_unsafe(addr_at(0));
+  address ctable = cb->content_begin();
+
+  return *(address*)(ctable + destination_toc_offset());
+}
+
+int NativeCallTrampolineStub::destination_toc_offset() const {
+  return MacroAssembler::get_ld_largeoffset_offset(encoded_destination_addr());
+}
+
+void NativeCallTrampolineStub::set_destination(address new_destination) {
+  CodeBlob* cb = CodeCache::find_blob(addr_at(0));
+  address ctable = cb->content_begin();
+
+  *(address*)(ctable + destination_toc_offset()) = new_destination;
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/nativeInst_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,398 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_NATIVEINST_PPC_HPP
+#define CPU_PPC_VM_NATIVEINST_PPC_HPP
+
+#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/icache.hpp"
+#include "runtime/os.hpp"
+#include "utilities/top.hpp"
+
+// We have interfaces for the following instructions:
+//
+// - NativeInstruction
+//   - NativeCall
+//   - NativeFarCall
+//   - NativeMovConstReg
+//   - NativeJump
+//   - NativeIllegalInstruction
+//   - NativeConditionalFarBranch
+//   - NativeCallTrampolineStub
+
+// The base class for different kinds of native instruction abstractions.
+// It provides the primitive operations to manipulate code relative to this.
+class NativeInstruction VALUE_OBJ_CLASS_SPEC {
+  friend class Relocation;
+
+ public:
+  bool is_sigtrap_ic_miss_check() {
+    assert(UseSIGTRAP, "precondition");
+    return MacroAssembler::is_trap_ic_miss_check(long_at(0));
+  }
+
+  bool is_sigtrap_null_check() {
+    assert(UseSIGTRAP && TrapBasedNullChecks, "precondition");
+    return MacroAssembler::is_trap_null_check(long_at(0));
+  }
+
+  // We use a special trap for marking a method as not_entrant or zombie
+  // iff UseSIGTRAP.
+  bool is_sigtrap_zombie_not_entrant() {
+    assert(UseSIGTRAP, "precondition");
+    return MacroAssembler::is_trap_zombie_not_entrant(long_at(0));
+  }
+
+  // We use an illtrap for marking a method as not_entrant or zombie
+  // iff !UseSIGTRAP.
+  bool is_sigill_zombie_not_entrant() {
+    assert(!UseSIGTRAP, "precondition");
+    // Work around a C++ compiler bug which changes 'this'.
+    return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0));
+  }
+  static bool is_sigill_zombie_not_entrant_at(address addr);
+
+#ifdef COMPILER2
+  // SIGTRAP-based implicit range checks
+  bool is_sigtrap_range_check() {
+    assert(UseSIGTRAP && TrapBasedRangeChecks, "precondition");
+    return MacroAssembler::is_trap_range_check(long_at(0));
+  }
+#endif
+
+  // 'should not reach here'.
+  bool is_sigtrap_should_not_reach_here() {
+    return MacroAssembler::is_trap_should_not_reach_here(long_at(0));
+  }
+
+  bool is_safepoint_poll() {
+    // Is the current instruction a POTENTIAL read access to the polling page?
+    // The current arguments of the instruction are not checked!
+    return MacroAssembler::is_load_from_polling_page(long_at(0), NULL);
+  }
+
+  bool is_memory_serialization(JavaThread *thread, void *ucontext) {
+    // Is the current instruction a write access of thread to the
+    // memory serialization page?
+    return MacroAssembler::is_memory_serialization(long_at(0), thread, ucontext);
+  }
+
+  address get_stack_bang_address(void *ucontext) {
+    // If long_at(0) is not a stack bang, return 0. Otherwise, return
+    // banged address.
+    return MacroAssembler::get_stack_bang_address(long_at(0), ucontext);
+  }
+
+ protected:
+  address  addr_at(int offset) const    { return address(this) + offset; }
+  int      long_at(int offset) const    { return *(int*)addr_at(offset); }
+
+ public:
+  void verify() NOT_DEBUG_RETURN;
+};
+
+inline NativeInstruction* nativeInstruction_at(address address) {
+  NativeInstruction* inst = (NativeInstruction*)address;
+  inst->verify();
+  return inst;
+}
+
+// The NativeCall is an abstraction for accessing/manipulating call
+// instructions. It is used to manipulate inline caches, primitive &
+// dll calls, etc.
+//
+// Sparc distinguishes `NativeCall' and `NativeFarCall'. On PPC64,
+// at present, we provide a single class `NativeCall' representing the
+// sequence `load_const, mtctr, bctrl' or the sequence 'ld_from_toc,
+// mtctr, bctrl'.
+class NativeCall: public NativeInstruction {
+ public:
+
+  enum ppc_specific_constants {
+    load_const_instruction_size                 = 28,
+    load_const_from_method_toc_instruction_size = 16,
+    instruction_size                            = 16 // Used in shared code for calls with reloc_info.
+  };
+
+  static bool is_call_at(address a) {
+    return Assembler::is_bl(*(int*)(a));
+  }
+
+  static bool is_call_before(address return_address) {
+    return NativeCall::is_call_at(return_address - 4);
+  }
+
+  address instruction_address() const {
+    return addr_at(0);
+  }
+
+  address next_instruction_address() const {
+    // We have only bl.
+    assert(MacroAssembler::is_bl(*(int*)instruction_address()), "Should be bl instruction!");
+    return addr_at(4);
+  }
+
+  address return_address() const {
+    return next_instruction_address();
+  }
+
+  address destination() const;
+
+  // The parameter assert_lock disables the assertion during code generation.
+  void set_destination_mt_safe(address dest, bool assert_lock = true);
+
+  address get_trampoline();
+
+  void verify_alignment() {} // do nothing on ppc
+  void verify() NOT_DEBUG_RETURN;
+};
+
+inline NativeCall* nativeCall_at(address instr) {
+  NativeCall* call = (NativeCall*)instr;
+  call->verify();
+  return call;
+}
+
+inline NativeCall* nativeCall_before(address return_address) {
+  NativeCall* call = NULL;
+  if (MacroAssembler::is_bl(*(int*)(return_address - 4)))
+    call = (NativeCall*)(return_address - 4);
+  call->verify();
+  return call;
+}
+
+// The NativeFarCall is an abstraction for accessing/manipulating native
+// call-anywhere instructions.
+// Used to call native methods which may be loaded anywhere in the address
+// space, possibly out of reach of a call instruction.
+class NativeFarCall: public NativeInstruction {
+ public:
+  // We use MacroAssembler::bl64_patchable() for implementing a
+  // call-anywhere instruction.
+
+  // Checks whether instr points at a NativeFarCall instruction.
+  static bool is_far_call_at(address instr) {
+    return MacroAssembler::is_bl64_patchable_at(instr);
+  }
+
+  // Does the NativeFarCall implementation use a pc-relative encoding
+  // of the call destination?
+  // Used when relocating code.
+  bool is_pcrelative() {
+    assert(MacroAssembler::is_bl64_patchable_at((address)this),
+           "unexpected call type");
+    return MacroAssembler::is_bl64_patchable_pcrelative_at((address)this);
+  }
+
+  // Returns the NativeFarCall's destination.
+  address destination() const {
+    assert(MacroAssembler::is_bl64_patchable_at((address)this),
+           "unexpected call type");
+    return MacroAssembler::get_dest_of_bl64_patchable_at((address)this);
+  }
+
+  // Sets the NativeCall's destination, not necessarily mt-safe.
+  // Used when relocating code.
+  void set_destination(address dest) {
+    // Set new destination (implementation of call may change here).
+    assert(MacroAssembler::is_bl64_patchable_at((address)this),
+           "unexpected call type");
+    MacroAssembler::set_dest_of_bl64_patchable_at((address)this, dest);
+  }
+
+  void verify() NOT_DEBUG_RETURN;
+};
+
+// Instantiates a NativeFarCall object starting at the given instruction
+// address and returns the NativeFarCall object.
+inline NativeFarCall* nativeFarCall_at(address instr) {
+  NativeFarCall* call = (NativeFarCall*)instr;
+  call->verify();
+  return call;
+}
+
+// An interface for accessing/manipulating native set_oop imm, reg instructions.
+// (used to manipulate inlined data references, etc.)
+class NativeMovConstReg: public NativeInstruction {
+ public:
+
+  enum ppc_specific_constants {
+    load_const_instruction_size                 = 20,
+    load_const_from_method_toc_instruction_size =  8,
+    instruction_size                            =  8 // Used in shared code for calls with reloc_info.
+  };
+
+  address instruction_address() const {
+    return addr_at(0);
+  }
+
+  address next_instruction_address() const;
+
+  // (The [set_]data accessor respects oop_type relocs also.)
+  intptr_t data() const;
+
+  // Patch the code stream.
+  address set_data_plain(intptr_t x, CodeBlob *code);
+  // Patch the code stream and oop pool.
+  void set_data(intptr_t x);
+
+  // Patch narrow oop constants. Use this also for narrow klass.
+  void set_narrow_oop(narrowOop data, CodeBlob *code = NULL);
+
+  void verify() NOT_DEBUG_RETURN;
+};
+
+inline NativeMovConstReg* nativeMovConstReg_at(address address) {
+  NativeMovConstReg* test = (NativeMovConstReg*)address;
+  test->verify();
+  return test;
+}
+
+// The NativeJump is an abstraction for accessing/manipulating native
+// jump-anywhere instructions.
+class NativeJump: public NativeInstruction {
+ public:
+  // We use MacroAssembler::b64_patchable() for implementing a
+  // jump-anywhere instruction.
+
+  enum ppc_specific_constants {
+    instruction_size = MacroAssembler::b64_patchable_size
+  };
+
+  // Checks whether instr points at a NativeJump instruction.
+  static bool is_jump_at(address instr) {
+    return MacroAssembler::is_b64_patchable_at(instr)
+      || (   MacroAssembler::is_load_const_from_method_toc_at(instr)
+          && Assembler::is_mtctr(*(int*)(instr + 2 * 4))
+          && Assembler::is_bctr(*(int*)(instr + 3 * 4)));
+  }
+
+  // Does the NativeJump implementation use a pc-relative encoding
+  // of the call destination?
+  // Used when relocating code or patching jumps.
+  bool is_pcrelative() {
+    return MacroAssembler::is_b64_patchable_pcrelative_at((address)this);
+  }
+
+  // Returns the NativeJump's destination.
+  address jump_destination() const {
+    if (MacroAssembler::is_b64_patchable_at((address)this)) {
+      return MacroAssembler::get_dest_of_b64_patchable_at((address)this);
+    } else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
+               && Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
+               && Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
+      return (address)((NativeMovConstReg *)this)->data();
+    } else {
+      ShouldNotReachHere();
+      return NULL;
+    }
+  }
+
+  // Sets the NativeJump's destination, not necessarily mt-safe.
+  // Used when relocating code or patching jumps.
+  void set_jump_destination(address dest) {
+    // Set new destination (implementation of call may change here).
+    if (MacroAssembler::is_b64_patchable_at((address)this)) {
+      MacroAssembler::set_dest_of_b64_patchable_at((address)this, dest);
+    } else if (MacroAssembler::is_load_const_from_method_toc_at((address)this)
+               && Assembler::is_mtctr(*(int*)((address)this + 2 * 4))
+               && Assembler::is_bctr(*(int*)((address)this + 3 * 4))) {
+      ((NativeMovConstReg *)this)->set_data((intptr_t)dest);
+    } else {
+      ShouldNotReachHere();
+    }
+  }
+
+  // MT-safe insertion of native jump at verified method entry
+  static void patch_verified_entry(address entry, address verified_entry, address dest);
+
+  void verify() NOT_DEBUG_RETURN;
+
+  static void check_verified_entry_alignment(address entry, address verified_entry) {
+    // We just patch one instruction on ppc64, so the jump doesn't have to
+    // be aligned. Nothing to do here.
+  }
+};
+
+// Instantiates a NativeJump object starting at the given instruction
+// address and returns the NativeJump object.
+inline NativeJump* nativeJump_at(address instr) {
+  NativeJump* call = (NativeJump*)instr;
+  call->verify();
+  return call;
+}
+
+// NativeConditionalFarBranch is abstraction for accessing/manipulating
+// conditional far branches.
+class NativeConditionalFarBranch : public NativeInstruction {
+ public:
+
+  static bool is_conditional_far_branch_at(address instr) {
+    return MacroAssembler::is_bc_far_at(instr);
+  }
+
+  address branch_destination() const {
+    return MacroAssembler::get_dest_of_bc_far_at((address)this);
+  }
+
+  void set_branch_destination(address dest) {
+    MacroAssembler::set_dest_of_bc_far_at((address)this, dest);
+  }
+};
+
+inline NativeConditionalFarBranch* NativeConditionalFarBranch_at(address address) {
+  assert(NativeConditionalFarBranch::is_conditional_far_branch_at(address),
+         "must be a conditional far branch");
+  return (NativeConditionalFarBranch*)address;
+}
+
+// Call trampoline stubs.
+class NativeCallTrampolineStub : public NativeInstruction {
+ private:
+
+  address encoded_destination_addr() const;
+
+ public:
+
+  address destination(nmethod *nm = NULL) const;
+  int destination_toc_offset() const;
+
+  void set_destination(address new_destination);
+};
+
+inline bool is_NativeCallTrampolineStub_at(address address) {
+  int first_instr = *(int*)address;
+  return Assembler::is_addis(first_instr) &&
+    (Register)(intptr_t)Assembler::inv_rt_field(first_instr) == R12_scratch2;
+}
+
+inline NativeCallTrampolineStub* NativeCallTrampolineStub_at(address address) {
+  assert(is_NativeCallTrampolineStub_at(address), "no call trampoline found");
+  return (NativeCallTrampolineStub*)address;
+}
+
+#endif // CPU_PPC_VM_NATIVEINST_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/ppc.ad	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,12032 @@
+//
+// Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+// Copyright 2012, 2013 SAP AG. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+//
+
+//
+// PPC64 Architecture Description File
+//
+
+//----------REGISTER DEFINITION BLOCK------------------------------------------
+// This information is used by the matcher and the register allocator to
+// describe individual registers and classes of registers within the target
+// architecture.
+register %{
+//----------Architecture Description Register Definitions----------------------
+// General Registers
+// "reg_def"  name (register save type, C convention save type,
+//                  ideal register type, encoding);
+//
+// Register Save Types:
+//
+//   NS  = No-Save:     The register allocator assumes that these registers
+//                      can be used without saving upon entry to the method, &
+//                      that they do not need to be saved at call sites.
+//
+//   SOC = Save-On-Call: The register allocator assumes that these registers
+//                      can be used without saving upon entry to the method,
+//                      but that they must be saved at call sites.
+//                      These are called "volatiles" on ppc.
+//
+//   SOE = Save-On-Entry: The register allocator assumes that these registers
+//                      must be saved before using them upon entry to the
+//                      method, but they do not need to be saved at call
+//                      sites.
+//                      These are called "nonvolatiles" on ppc.
+//
+//   AS  = Always-Save:   The register allocator assumes that these registers
+//                      must be saved before using them upon entry to the
+//                      method, & that they must be saved at call sites.
+//
+// Ideal Register Type is used to determine how to save & restore a
+// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
+// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
+//
+// The encoding number is the actual bit-pattern placed into the opcodes.
+//
+// PPC64 register definitions, based on the 64-bit PowerPC ELF ABI
+// Supplement Version 1.7 as of 2003-10-29.
+//
+// For each 64-bit register we must define two registers: the register
+// itself, e.g. R3, and a corresponding virtual other (32-bit-)'half',
+// e.g. R3_H, which is needed by the allocator, but is not used
+// for stores, loads, etc.
+
+// ----------------------------
+// Integer/Long Registers
+// ----------------------------
+
+  // PPC64 has 32 64-bit integer registers.
+
+  // types: v = volatile, nv = non-volatile, s = system
+  reg_def R0   ( SOC, SOC, Op_RegI,  0, R0->as_VMReg()         );  // v   used in prologs
+  reg_def R0_H ( SOC, SOC, Op_RegI, 99, R0->as_VMReg()->next() );
+  reg_def R1   ( NS,  NS,  Op_RegI,  1, R1->as_VMReg()         );  // s   SP
+  reg_def R1_H ( NS,  NS,  Op_RegI, 99, R1->as_VMReg()->next() );
+  reg_def R2   ( SOC, SOC, Op_RegI,  2, R2->as_VMReg()         );  // v   TOC
+  reg_def R2_H ( SOC, SOC, Op_RegI, 99, R2->as_VMReg()->next() );
+  reg_def R3   ( SOC, SOC, Op_RegI,  3, R3->as_VMReg()         );  // v   iarg1 & iret
+  reg_def R3_H ( SOC, SOC, Op_RegI, 99, R3->as_VMReg()->next() );
+  reg_def R4   ( SOC, SOC, Op_RegI,  4, R4->as_VMReg()         );  //     iarg2
+  reg_def R4_H ( SOC, SOC, Op_RegI, 99, R4->as_VMReg()->next() );
+  reg_def R5   ( SOC, SOC, Op_RegI,  5, R5->as_VMReg()         );  // v   iarg3
+  reg_def R5_H ( SOC, SOC, Op_RegI, 99, R5->as_VMReg()->next() );
+  reg_def R6   ( SOC, SOC, Op_RegI,  6, R6->as_VMReg()         );  // v   iarg4
+  reg_def R6_H ( SOC, SOC, Op_RegI, 99, R6->as_VMReg()->next() );
+  reg_def R7   ( SOC, SOC, Op_RegI,  7, R7->as_VMReg()         );  // v   iarg5
+  reg_def R7_H ( SOC, SOC, Op_RegI, 99, R7->as_VMReg()->next() );
+  reg_def R8   ( SOC, SOC, Op_RegI,  8, R8->as_VMReg()         );  // v   iarg6
+  reg_def R8_H ( SOC, SOC, Op_RegI, 99, R8->as_VMReg()->next() );
+  reg_def R9   ( SOC, SOC, Op_RegI,  9, R9->as_VMReg()         );  // v   iarg7
+  reg_def R9_H ( SOC, SOC, Op_RegI, 99, R9->as_VMReg()->next() );
+  reg_def R10  ( SOC, SOC, Op_RegI, 10, R10->as_VMReg()        );  // v   iarg8
+  reg_def R10_H( SOC, SOC, Op_RegI, 99, R10->as_VMReg()->next());
+  reg_def R11  ( SOC, SOC, Op_RegI, 11, R11->as_VMReg()        );  // v   ENV / scratch
+  reg_def R11_H( SOC, SOC, Op_RegI, 99, R11->as_VMReg()->next());
+  reg_def R12  ( SOC, SOC, Op_RegI, 12, R12->as_VMReg()        );  // v   scratch
+  reg_def R12_H( SOC, SOC, Op_RegI, 99, R12->as_VMReg()->next());
+  reg_def R13  ( NS,  NS,  Op_RegI, 13, R13->as_VMReg()        );  // s   system thread id
+  reg_def R13_H( NS,  NS,  Op_RegI, 99, R13->as_VMReg()->next());
+  reg_def R14  ( SOC, SOE, Op_RegI, 14, R14->as_VMReg()        );  // nv
+  reg_def R14_H( SOC, SOE, Op_RegI, 99, R14->as_VMReg()->next());
+  reg_def R15  ( SOC, SOE, Op_RegI, 15, R15->as_VMReg()        );  // nv
+  reg_def R15_H( SOC, SOE, Op_RegI, 99, R15->as_VMReg()->next());
+  reg_def R16  ( SOC, SOE, Op_RegI, 16, R16->as_VMReg()        );  // nv
+  reg_def R16_H( SOC, SOE, Op_RegI, 99, R16->as_VMReg()->next());
+  reg_def R17  ( SOC, SOE, Op_RegI, 17, R17->as_VMReg()        );  // nv
+  reg_def R17_H( SOC, SOE, Op_RegI, 99, R17->as_VMReg()->next());
+  reg_def R18  ( SOC, SOE, Op_RegI, 18, R18->as_VMReg()        );  // nv
+  reg_def R18_H( SOC, SOE, Op_RegI, 99, R18->as_VMReg()->next());
+  reg_def R19  ( SOC, SOE, Op_RegI, 19, R19->as_VMReg()        );  // nv
+  reg_def R19_H( SOC, SOE, Op_RegI, 99, R19->as_VMReg()->next());
+  reg_def R20  ( SOC, SOE, Op_RegI, 20, R20->as_VMReg()        );  // nv
+  reg_def R20_H( SOC, SOE, Op_RegI, 99, R20->as_VMReg()->next());
+  reg_def R21  ( SOC, SOE, Op_RegI, 21, R21->as_VMReg()        );  // nv
+  reg_def R21_H( SOC, SOE, Op_RegI, 99, R21->as_VMReg()->next());
+  reg_def R22  ( SOC, SOE, Op_RegI, 22, R22->as_VMReg()        );  // nv
+  reg_def R22_H( SOC, SOE, Op_RegI, 99, R22->as_VMReg()->next());
+  reg_def R23  ( SOC, SOE, Op_RegI, 23, R23->as_VMReg()        );  // nv
+  reg_def R23_H( SOC, SOE, Op_RegI, 99, R23->as_VMReg()->next());
+  reg_def R24  ( SOC, SOE, Op_RegI, 24, R24->as_VMReg()        );  // nv
+  reg_def R24_H( SOC, SOE, Op_RegI, 99, R24->as_VMReg()->next());
+  reg_def R25  ( SOC, SOE, Op_RegI, 25, R25->as_VMReg()        );  // nv
+  reg_def R25_H( SOC, SOE, Op_RegI, 99, R25->as_VMReg()->next());
+  reg_def R26  ( SOC, SOE, Op_RegI, 26, R26->as_VMReg()        );  // nv
+  reg_def R26_H( SOC, SOE, Op_RegI, 99, R26->as_VMReg()->next());
+  reg_def R27  ( SOC, SOE, Op_RegI, 27, R27->as_VMReg()        );  // nv
+  reg_def R27_H( SOC, SOE, Op_RegI, 99, R27->as_VMReg()->next());
+  reg_def R28  ( SOC, SOE, Op_RegI, 28, R28->as_VMReg()        );  // nv
+  reg_def R28_H( SOC, SOE, Op_RegI, 99, R28->as_VMReg()->next());
+  reg_def R29  ( SOC, SOE, Op_RegI, 29, R29->as_VMReg()        );  // nv
+  reg_def R29_H( SOC, SOE, Op_RegI, 99, R29->as_VMReg()->next());
+  reg_def R30  ( SOC, SOE, Op_RegI, 30, R30->as_VMReg()        );  // nv
+  reg_def R30_H( SOC, SOE, Op_RegI, 99, R30->as_VMReg()->next());
+  reg_def R31  ( SOC, SOE, Op_RegI, 31, R31->as_VMReg()        );  // nv
+  reg_def R31_H( SOC, SOE, Op_RegI, 99, R31->as_VMReg()->next());
+
+
+// ----------------------------
+// Float/Double Registers
+// ----------------------------
+
+  // Double Registers
+  // The rules of ADL require that double registers be defined in pairs.
+  // Each pair must be two 32-bit values, but not necessarily a pair of
+  // single float registers. In each pair, ADLC-assigned register numbers
+  // must be adjacent, with the lower number even. Finally, when the
+  // CPU stores such a register pair to memory, the word associated with
+  // the lower ADLC-assigned number must be stored to the lower address.
+
+  // PPC64 has 32 64-bit floating-point registers. Each can store a single
+  // or double precision floating-point value.
+
+  // types: v = volatile, nv = non-volatile, s = system
+  reg_def F0   ( SOC, SOC, Op_RegF,  0, F0->as_VMReg()         );  // v   scratch
+  reg_def F0_H ( SOC, SOC, Op_RegF, 99, F0->as_VMReg()->next() );
+  reg_def F1   ( SOC, SOC, Op_RegF,  1, F1->as_VMReg()         );  // v   farg1 & fret
+  reg_def F1_H ( SOC, SOC, Op_RegF, 99, F1->as_VMReg()->next() );
+  reg_def F2   ( SOC, SOC, Op_RegF,  2, F2->as_VMReg()         );  // v   farg2
+  reg_def F2_H ( SOC, SOC, Op_RegF, 99, F2->as_VMReg()->next() );
+  reg_def F3   ( SOC, SOC, Op_RegF,  3, F3->as_VMReg()         );  // v   farg3
+  reg_def F3_H ( SOC, SOC, Op_RegF, 99, F3->as_VMReg()->next() );
+  reg_def F4   ( SOC, SOC, Op_RegF,  4, F4->as_VMReg()         );  // v   farg4
+  reg_def F4_H ( SOC, SOC, Op_RegF, 99, F4->as_VMReg()->next() );
+  reg_def F5   ( SOC, SOC, Op_RegF,  5, F5->as_VMReg()         );  // v   farg5
+  reg_def F5_H ( SOC, SOC, Op_RegF, 99, F5->as_VMReg()->next() );
+  reg_def F6   ( SOC, SOC, Op_RegF,  6, F6->as_VMReg()         );  // v   farg6
+  reg_def F6_H ( SOC, SOC, Op_RegF, 99, F6->as_VMReg()->next() );
+  reg_def F7   ( SOC, SOC, Op_RegF,  7, F7->as_VMReg()         );  // v   farg7
+  reg_def F7_H ( SOC, SOC, Op_RegF, 99, F7->as_VMReg()->next() );
+  reg_def F8   ( SOC, SOC, Op_RegF,  8, F8->as_VMReg()         );  // v   farg8
+  reg_def F8_H ( SOC, SOC, Op_RegF, 99, F8->as_VMReg()->next() );
+  reg_def F9   ( SOC, SOC, Op_RegF,  9, F9->as_VMReg()         );  // v   farg9
+  reg_def F9_H ( SOC, SOC, Op_RegF, 99, F9->as_VMReg()->next() );
+  reg_def F10  ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()        );  // v   farg10
+  reg_def F10_H( SOC, SOC, Op_RegF, 99, F10->as_VMReg()->next());
+  reg_def F11  ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()        );  // v   farg11
+  reg_def F11_H( SOC, SOC, Op_RegF, 99, F11->as_VMReg()->next());
+  reg_def F12  ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()        );  // v   farg12
+  reg_def F12_H( SOC, SOC, Op_RegF, 99, F12->as_VMReg()->next());
+  reg_def F13  ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()        );  // v   farg13
+  reg_def F13_H( SOC, SOC, Op_RegF, 99, F13->as_VMReg()->next());
+  reg_def F14  ( SOC, SOE, Op_RegF, 14, F14->as_VMReg()        );  // nv
+  reg_def F14_H( SOC, SOE, Op_RegF, 99, F14->as_VMReg()->next());
+  reg_def F15  ( SOC, SOE, Op_RegF, 15, F15->as_VMReg()        );  // nv
+  reg_def F15_H( SOC, SOE, Op_RegF, 99, F15->as_VMReg()->next());
+  reg_def F16  ( SOC, SOE, Op_RegF, 16, F16->as_VMReg()        );  // nv
+  reg_def F16_H( SOC, SOE, Op_RegF, 99, F16->as_VMReg()->next());
+  reg_def F17  ( SOC, SOE, Op_RegF, 17, F17->as_VMReg()        );  // nv
+  reg_def F17_H( SOC, SOE, Op_RegF, 99, F17->as_VMReg()->next());
+  reg_def F18  ( SOC, SOE, Op_RegF, 18, F18->as_VMReg()        );  // nv
+  reg_def F18_H( SOC, SOE, Op_RegF, 99, F18->as_VMReg()->next());
+  reg_def F19  ( SOC, SOE, Op_RegF, 19, F19->as_VMReg()        );  // nv
+  reg_def F19_H( SOC, SOE, Op_RegF, 99, F19->as_VMReg()->next());
+  reg_def F20  ( SOC, SOE, Op_RegF, 20, F20->as_VMReg()        );  // nv
+  reg_def F20_H( SOC, SOE, Op_RegF, 99, F20->as_VMReg()->next());
+  reg_def F21  ( SOC, SOE, Op_RegF, 21, F21->as_VMReg()        );  // nv
+  reg_def F21_H( SOC, SOE, Op_RegF, 99, F21->as_VMReg()->next());
+  reg_def F22  ( SOC, SOE, Op_RegF, 22, F22->as_VMReg()        );  // nv
+  reg_def F22_H( SOC, SOE, Op_RegF, 99, F22->as_VMReg()->next());
+  reg_def F23  ( SOC, SOE, Op_RegF, 23, F23->as_VMReg()        );  // nv
+  reg_def F23_H( SOC, SOE, Op_RegF, 99, F23->as_VMReg()->next());
+  reg_def F24  ( SOC, SOE, Op_RegF, 24, F24->as_VMReg()        );  // nv
+  reg_def F24_H( SOC, SOE, Op_RegF, 99, F24->as_VMReg()->next());
+  reg_def F25  ( SOC, SOE, Op_RegF, 25, F25->as_VMReg()        );  // nv
+  reg_def F25_H( SOC, SOE, Op_RegF, 99, F25->as_VMReg()->next());
+  reg_def F26  ( SOC, SOE, Op_RegF, 26, F26->as_VMReg()        );  // nv
+  reg_def F26_H( SOC, SOE, Op_RegF, 99, F26->as_VMReg()->next());
+  reg_def F27  ( SOC, SOE, Op_RegF, 27, F27->as_VMReg()        );  // nv
+  reg_def F27_H( SOC, SOE, Op_RegF, 99, F27->as_VMReg()->next());
+  reg_def F28  ( SOC, SOE, Op_RegF, 28, F28->as_VMReg()        );  // nv
+  reg_def F28_H( SOC, SOE, Op_RegF, 99, F28->as_VMReg()->next());
+  reg_def F29  ( SOC, SOE, Op_RegF, 29, F29->as_VMReg()        );  // nv
+  reg_def F29_H( SOC, SOE, Op_RegF, 99, F29->as_VMReg()->next());
+  reg_def F30  ( SOC, SOE, Op_RegF, 30, F30->as_VMReg()        );  // nv
+  reg_def F30_H( SOC, SOE, Op_RegF, 99, F30->as_VMReg()->next());
+  reg_def F31  ( SOC, SOE, Op_RegF, 31, F31->as_VMReg()        );  // nv
+  reg_def F31_H( SOC, SOE, Op_RegF, 99, F31->as_VMReg()->next());
+
+// ----------------------------
+// Special Registers
+// ----------------------------
+
+// Condition Codes Flag Registers
+
+  // PPC64 has 8 condition code "registers" which are all contained
+  // in the CR register.
+
+  // types: v = volatile, nv = non-volatile, s = system
+  reg_def CCR0(SOC, SOC, Op_RegFlags, 0, CCR0->as_VMReg());  // v
+  reg_def CCR1(SOC, SOC, Op_RegFlags, 1, CCR1->as_VMReg());  // v
+  reg_def CCR2(SOC, SOC, Op_RegFlags, 2, CCR2->as_VMReg());  // nv
+  reg_def CCR3(SOC, SOC, Op_RegFlags, 3, CCR3->as_VMReg());  // nv
+  reg_def CCR4(SOC, SOC, Op_RegFlags, 4, CCR4->as_VMReg());  // nv
+  reg_def CCR5(SOC, SOC, Op_RegFlags, 5, CCR5->as_VMReg());  // v
+  reg_def CCR6(SOC, SOC, Op_RegFlags, 6, CCR6->as_VMReg());  // v
+  reg_def CCR7(SOC, SOC, Op_RegFlags, 7, CCR7->as_VMReg());  // v
+
+  // Special registers of PPC64
+
+  reg_def SR_XER(    SOC, SOC, Op_RegP, 0, SR_XER->as_VMReg());     // v
+  reg_def SR_LR(     SOC, SOC, Op_RegP, 1, SR_LR->as_VMReg());      // v
+  reg_def SR_CTR(    SOC, SOC, Op_RegP, 2, SR_CTR->as_VMReg());     // v
+  reg_def SR_VRSAVE( SOC, SOC, Op_RegP, 3, SR_VRSAVE->as_VMReg());  // v
+  reg_def SR_SPEFSCR(SOC, SOC, Op_RegP, 4, SR_SPEFSCR->as_VMReg()); // v
+  reg_def SR_PPR(    SOC, SOC, Op_RegP, 5, SR_PPR->as_VMReg());     // v
+
+
+// ----------------------------
+// Specify priority of register selection within phases of register
+// allocation. Highest priority is first. A useful heuristic is to
+// give registers a low priority when they are required by machine
+// instructions, like EAX and EDX on I486, and choose no-save registers
+// before save-on-call, & save-on-call before save-on-entry. Registers
+// which participate in fixed calling sequences should come last.
+// Registers which are used as pairs must fall on an even boundary.
+
+// It's worth about 1% on SPEC geomean to get this right.
+
+// Chunk0, chunk1, and chunk2 form the MachRegisterNumbers enumeration
+// in adGlobals_ppc64.hpp which defines the <register>_num values, e.g.
+// R3_num. Therefore, R3_num may not be (and in reality is not)
+// the same as R3->encoding()! Furthermore, we cannot make any
+// assumptions on ordering, e.g. R3_num may be less than R2_num.
+// Additionally, the function
+//   static enum RC rc_class(OptoReg::Name reg )
+// maps a given <register>_num value to its chunk type (except for flags)
+// and its current implementation relies on chunk0 and chunk1 having a
+// size of 64 each.
+
+// If you change this allocation class, please have a look at the
+// default values for the parameters RoundRobinIntegerRegIntervalStart
+// and RoundRobinFloatRegIntervalStart
+
+alloc_class chunk0 (
+  // Chunk0 contains *all* 64 integer registers halves.
+
+  // "non-volatile" registers
+  R14, R14_H,
+  R15, R15_H,
+  R17, R17_H,
+  R18, R18_H,
+  R19, R19_H,
+  R20, R20_H,
+  R21, R21_H,
+  R22, R22_H,
+  R23, R23_H,
+  R24, R24_H,
+  R25, R25_H,
+  R26, R26_H,
+  R27, R27_H,
+  R28, R28_H,
+  R29, R29_H,
+  R30, R30_H,
+  R31, R31_H,
+
+  // scratch/special registers
+  R11, R11_H,
+  R12, R12_H,
+
+  // argument registers
+  R10, R10_H,
+  R9,  R9_H,
+  R8,  R8_H,
+  R7,  R7_H,
+  R6,  R6_H,
+  R5,  R5_H,
+  R4,  R4_H,
+  R3,  R3_H,
+
+  // special registers, not available for allocation
+  R16, R16_H,     // R16_thread
+  R13, R13_H,     // system thread id
+  R2,  R2_H,      // may be used for TOC
+  R1,  R1_H,      // SP
+  R0,  R0_H       // R0 (scratch)
+);
+
+// If you change this allocation class, please have a look at the
+// default values for the parameters RoundRobinIntegerRegIntervalStart
+// and RoundRobinFloatRegIntervalStart
+
+alloc_class chunk1 (
+  // Chunk1 contains *all* 64 floating-point registers halves.
+
+  // scratch register
+  F0,  F0_H,
+
+  // argument registers
+  F13, F13_H,
+  F12, F12_H,
+  F11, F11_H,
+  F10, F10_H,
+  F9,  F9_H,
+  F8,  F8_H,
+  F7,  F7_H,
+  F6,  F6_H,
+  F5,  F5_H,
+  F4,  F4_H,
+  F3,  F3_H,
+  F2,  F2_H,
+  F1,  F1_H,
+
+  // non-volatile registers
+  F14, F14_H,
+  F15, F15_H,
+  F16, F16_H,
+  F17, F17_H,
+  F18, F18_H,
+  F19, F19_H,
+  F20, F20_H,
+  F21, F21_H,
+  F22, F22_H,
+  F23, F23_H,
+  F24, F24_H,
+  F25, F25_H,
+  F26, F26_H,
+  F27, F27_H,
+  F28, F28_H,
+  F29, F29_H,
+  F30, F30_H,
+  F31, F31_H
+);
+
+alloc_class chunk2 (
+  // Chunk2 contains *all* 8 condition code registers.
+
+  CCR0,
+  CCR1,
+  CCR2,
+  CCR3,
+  CCR4,
+  CCR5,
+  CCR6,
+  CCR7
+);
+
+alloc_class chunk3 (
+  // special registers
+  // These registers are not allocated, but used for nodes generated by postalloc expand.
+  SR_XER,
+  SR_LR,
+  SR_CTR,
+  SR_VRSAVE,
+  SR_SPEFSCR,
+  SR_PPR
+);
+
+//-------Architecture Description Register Classes-----------------------
+
+// Several register classes are automatically defined based upon
+// information in this architecture description.
+
+// 1) reg_class inline_cache_reg           ( as defined in frame section )
+// 2) reg_class compiler_method_oop_reg    ( as defined in frame section )
+// 2) reg_class interpreter_method_oop_reg ( as defined in frame section )
+// 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
+//
+
+// ----------------------------
+// 32 Bit Register Classes
+// ----------------------------
+
+// We specify registers twice, once as read/write, and once read-only.
+// We use the read-only registers for source operands. With this, we
+// can include preset read only registers in this class, as a hard-coded
+// '0'-register. (We used to simulate this on ppc.)
+
+// 32 bit registers that can be read and written i.e. these registers
+// can be dest (or src) of normal instructions.
+reg_class bits32_reg_rw(
+/*R0*/              // R0
+/*R1*/              // SP
+  R2,               // TOC
+  R3,
+  R4,
+  R5,
+  R6,
+  R7,
+  R8,
+  R9,
+  R10,
+  R11,
+  R12,
+/*R13*/             // system thread id
+  R14,
+  R15,
+/*R16*/             // R16_thread
+  R17,
+  R18,
+  R19,
+  R20,
+  R21,
+  R22,
+  R23,
+  R24,
+  R25,
+  R26,
+  R27,
+  R28,
+/*R29*/             // global TOC
+/*R30*/             // Narrow Oop Base
+  R31
+);
+
+// 32 bit registers that can only be read i.e. these registers can
+// only be src of all instructions.
+reg_class bits32_reg_ro(
+/*R0*/              // R0
+/*R1*/              // SP
+  R2                // TOC
+  R3,
+  R4,
+  R5,
+  R6,
+  R7,
+  R8,
+  R9,
+  R10,
+  R11,
+  R12,
+/*R13*/             // system thread id
+  R14,
+  R15,
+/*R16*/             // R16_thread
+  R17,
+  R18,
+  R19,
+  R20,
+  R21,
+  R22,
+  R23,
+  R24,
+  R25,
+  R26,
+  R27,
+  R28,
+/*R29*/
+/*R30*/             // Narrow Oop Base
+  R31
+);
+
+// Complement-required-in-pipeline operands for narrow oops.
+reg_class bits32_reg_ro_not_complement (
+/*R0*/     // R0
+  R1,      // SP
+  R2,      // TOC
+  R3,
+  R4,
+  R5,
+  R6,
+  R7,
+  R8,
+  R9,
+  R10,
+  R11,
+  R12,
+/*R13,*/   // system thread id
+  R14,
+  R15,
+  R16,    // R16_thread
+  R17,
+  R18,
+  R19,
+  R20,
+  R21,
+  R22,
+/*R23,
+  R24,
+  R25,
+  R26,
+  R27,
+  R28,*/
+/*R29,*/ // TODO: let allocator handle TOC!!
+/*R30,*/
+  R31
+);
+
+// Complement-required-in-pipeline operands for narrow oops.
+// See 64-bit declaration.
+reg_class bits32_reg_ro_complement (
+  R23,
+  R24,
+  R25,
+  R26,
+  R27,
+  R28
+);
+
+reg_class rscratch1_bits32_reg(R11);
+reg_class rscratch2_bits32_reg(R12);
+reg_class rarg1_bits32_reg(R3);
+reg_class rarg2_bits32_reg(R4);
+reg_class rarg3_bits32_reg(R5);
+reg_class rarg4_bits32_reg(R6);
+
+// ----------------------------
+// 64 Bit Register Classes
+// ----------------------------
+// 64-bit build means 64-bit pointers means hi/lo pairs
+
+reg_class rscratch1_bits64_reg(R11_H, R11);
+reg_class rscratch2_bits64_reg(R12_H, R12);
+reg_class rarg1_bits64_reg(R3_H, R3);
+reg_class rarg2_bits64_reg(R4_H, R4);
+reg_class rarg3_bits64_reg(R5_H, R5);
+reg_class rarg4_bits64_reg(R6_H, R6);
+// Thread register, 'written' by tlsLoadP, see there.
+reg_class thread_bits64_reg(R16_H, R16);
+
+reg_class r19_bits64_reg(R19_H, R19);
+
+// 64 bit registers that can be read and written i.e. these registers
+// can be dest (or src) of normal instructions.
+reg_class bits64_reg_rw(
+/*R0_H,  R0*/     // R0
+/*R1_H,  R1*/     // SP
+  R2_H,  R2,      // TOC
+  R3_H,  R3,
+  R4_H,  R4,
+  R5_H,  R5,
+  R6_H,  R6,
+  R7_H,  R7,
+  R8_H,  R8,
+  R9_H,  R9,
+  R10_H, R10,
+  R11_H, R11,
+  R12_H, R12,
+/*R13_H, R13*/   // system thread id
+  R14_H, R14,
+  R15_H, R15,
+/*R16_H, R16*/   // R16_thread
+  R17_H, R17,
+  R18_H, R18,
+  R19_H, R19,
+  R20_H, R20,
+  R21_H, R21,
+  R22_H, R22,
+  R23_H, R23,
+  R24_H, R24,
+  R25_H, R25,
+  R26_H, R26,
+  R27_H, R27,
+  R28_H, R28,
+/*R29_H, R29*/
+/*R30_H, R30*/
+  R31_H, R31
+);
+
+// 64 bit registers used excluding r2, r11 and r12
+// Used to hold the TOC to avoid collisions with expanded LeafCall which uses
+// r2, r11 and r12 internally.
+reg_class bits64_reg_leaf_call(
+/*R0_H,  R0*/     // R0
+/*R1_H,  R1*/     // SP
+/*R2_H,  R2*/     // TOC
+  R3_H,  R3,
+  R4_H,  R4,
+  R5_H,  R5,
+  R6_H,  R6,
+  R7_H,  R7,
+  R8_H,  R8,
+  R9_H,  R9,
+  R10_H, R10,
+/*R11_H, R11*/
+/*R12_H, R12*/
+/*R13_H, R13*/   // system thread id
+  R14_H, R14,
+  R15_H, R15,
+/*R16_H, R16*/   // R16_thread
+  R17_H, R17,
+  R18_H, R18,
+  R19_H, R19,
+  R20_H, R20,
+  R21_H, R21,
+  R22_H, R22,
+  R23_H, R23,
+  R24_H, R24,
+  R25_H, R25,
+  R26_H, R26,
+  R27_H, R27,
+  R28_H, R28,
+/*R29_H, R29*/
+/*R30_H, R30*/
+  R31_H, R31
+);
+
+// Used to hold the TOC to avoid collisions with expanded DynamicCall
+// which uses r19 as inline cache internally and expanded LeafCall which uses
+// r2, r11 and r12 internally.
+reg_class bits64_constant_table_base(
+/*R0_H,  R0*/     // R0
+/*R1_H,  R1*/     // SP
+/*R2_H,  R2*/     // TOC
+  R3_H,  R3,
+  R4_H,  R4,
+  R5_H,  R5,
+  R6_H,  R6,
+  R7_H,  R7,
+  R8_H,  R8,
+  R9_H,  R9,
+  R10_H, R10,
+/*R11_H, R11*/
+/*R12_H, R12*/
+/*R13_H, R13*/   // system thread id
+  R14_H, R14,
+  R15_H, R15,
+/*R16_H, R16*/   // R16_thread
+  R17_H, R17,
+  R18_H, R18,
+/*R19_H, R19*/
+  R20_H, R20,
+  R21_H, R21,
+  R22_H, R22,
+  R23_H, R23,
+  R24_H, R24,
+  R25_H, R25,
+  R26_H, R26,
+  R27_H, R27,
+  R28_H, R28,
+/*R29_H, R29*/
+/*R30_H, R30*/
+  R31_H, R31
+);
+
+// 64 bit registers that can only be read i.e. these registers can
+// only be src of all instructions.
+reg_class bits64_reg_ro(
+/*R0_H,  R0*/     // R0
+  R1_H,  R1,
+  R2_H,  R2,       // TOC
+  R3_H,  R3,
+  R4_H,  R4,
+  R5_H,  R5,
+  R6_H,  R6,
+  R7_H,  R7,
+  R8_H,  R8,
+  R9_H,  R9,
+  R10_H, R10,
+  R11_H, R11,
+  R12_H, R12,
+/*R13_H, R13*/   // system thread id
+  R14_H, R14,
+  R15_H, R15,
+  R16_H, R16,    // R16_thread
+  R17_H, R17,
+  R18_H, R18,
+  R19_H, R19,
+  R20_H, R20,
+  R21_H, R21,
+  R22_H, R22,
+  R23_H, R23,
+  R24_H, R24,
+  R25_H, R25,
+  R26_H, R26,
+  R27_H, R27,
+  R28_H, R28,
+/*R29_H, R29*/ // TODO: let allocator handle TOC!!
+/*R30_H, R30,*/
+  R31_H, R31
+);
+
+// Complement-required-in-pipeline operands.
+reg_class bits64_reg_ro_not_complement (
+/*R0_H,  R0*/     // R0
+  R1_H,  R1,      // SP
+  R2_H,  R2,      // TOC
+  R3_H,  R3,
+  R4_H,  R4,
+  R5_H,  R5,
+  R6_H,  R6,
+  R7_H,  R7,
+  R8_H,  R8,
+  R9_H,  R9,
+  R10_H, R10,
+  R11_H, R11,
+  R12_H, R12,
+/*R13_H, R13*/   // system thread id
+  R14_H, R14,
+  R15_H, R15,
+  R16_H, R16,    // R16_thread
+  R17_H, R17,
+  R18_H, R18,
+  R19_H, R19,
+  R20_H, R20,
+  R21_H, R21,
+  R22_H, R22,
+/*R23_H, R23,
+  R24_H, R24,
+  R25_H, R25,
+  R26_H, R26,
+  R27_H, R27,
+  R28_H, R28,*/
+/*R29_H, R29*/ // TODO: let allocator handle TOC!!
+/*R30_H, R30,*/
+  R31_H, R31
+);
+
+// Complement-required-in-pipeline operands.
+// This register mask is used for the trap instructions that implement
+// the null checks on AIX. The trap instruction first computes the
+// complement of the value it shall trap on. Because of this, the
+// instruction can not be scheduled in the same cycle as an other
+// instruction reading the normal value of the same register. So we
+// force the value to check into 'bits64_reg_ro_not_complement'
+// and then copy it to 'bits64_reg_ro_complement' for the trap.
+reg_class bits64_reg_ro_complement (
+  R23_H, R23,
+  R24_H, R24,
+  R25_H, R25,
+  R26_H, R26,
+  R27_H, R27,
+  R28_H, R28
+);
+
+
+// ----------------------------
+// Special Class for Condition Code Flags Register
+
+reg_class int_flags(
+/*CCR0*/             // scratch
+/*CCR1*/             // scratch
+/*CCR2*/             // nv!
+/*CCR3*/             // nv!
+/*CCR4*/             // nv!
+  CCR5,
+  CCR6,
+  CCR7
+);
+
+reg_class int_flags_CR0(CCR0);
+reg_class int_flags_CR1(CCR1);
+reg_class int_flags_CR6(CCR6);
+reg_class ctr_reg(SR_CTR);
+
+// ----------------------------
+// Float Register Classes
+// ----------------------------
+
+reg_class flt_reg(
+/*F0*/              // scratch
+  F1,
+  F2,
+  F3,
+  F4,
+  F5,
+  F6,
+  F7,
+  F8,
+  F9,
+  F10,
+  F11,
+  F12,
+  F13,
+  F14,              // nv!
+  F15,              // nv!
+  F16,              // nv!
+  F17,              // nv!
+  F18,              // nv!
+  F19,              // nv!
+  F20,              // nv!
+  F21,              // nv!
+  F22,              // nv!
+  F23,              // nv!
+  F24,              // nv!
+  F25,              // nv!
+  F26,              // nv!
+  F27,              // nv!
+  F28,              // nv!
+  F29,              // nv!
+  F30,              // nv!
+  F31               // nv!
+);
+
+// Double precision float registers have virtual `high halves' that
+// are needed by the allocator.
+reg_class dbl_reg(
+/*F0,  F0_H*/     // scratch
+  F1,  F1_H,
+  F2,  F2_H,
+  F3,  F3_H,
+  F4,  F4_H,
+  F5,  F5_H,
+  F6,  F6_H,
+  F7,  F7_H,
+  F8,  F8_H,
+  F9,  F9_H,
+  F10, F10_H,
+  F11, F11_H,
+  F12, F12_H,
+  F13, F13_H,
+  F14, F14_H,    // nv!
+  F15, F15_H,    // nv!
+  F16, F16_H,    // nv!
+  F17, F17_H,    // nv!
+  F18, F18_H,    // nv!
+  F19, F19_H,    // nv!
+  F20, F20_H,    // nv!
+  F21, F21_H,    // nv!
+  F22, F22_H,    // nv!
+  F23, F23_H,    // nv!
+  F24, F24_H,    // nv!
+  F25, F25_H,    // nv!
+  F26, F26_H,    // nv!
+  F27, F27_H,    // nv!
+  F28, F28_H,    // nv!
+  F29, F29_H,    // nv!
+  F30, F30_H,    // nv!
+  F31, F31_H     // nv!
+);
+
+ %}
+
+//----------DEFINITION BLOCK---------------------------------------------------
+// Define name --> value mappings to inform the ADLC of an integer valued name
+// Current support includes integer values in the range [0, 0x7FFFFFFF]
+// Format:
+//        int_def  <name>         ( <int_value>, <expression>);
+// Generated Code in ad_<arch>.hpp
+//        #define  <name>   (<expression>)
+//        // value == <int_value>
+// Generated code in ad_<arch>.cpp adlc_verification()
+//        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
+//
+definitions %{
+  // The default cost (of an ALU instruction).
+  int_def DEFAULT_COST_LOW        (     30,      30);
+  int_def DEFAULT_COST            (    100,     100);
+  int_def HUGE_COST               (1000000, 1000000);
+
+  // Memory refs
+  int_def MEMORY_REF_COST_LOW     (    200, DEFAULT_COST * 2);
+  int_def MEMORY_REF_COST         (    300, DEFAULT_COST * 3);
+
+  // Branches are even more expensive.
+  int_def BRANCH_COST             (    900, DEFAULT_COST * 9);
+  int_def CALL_COST               (   1300, DEFAULT_COST * 13);
+%}
+
+
+//----------SOURCE BLOCK-------------------------------------------------------
+// This is a block of C++ code which provides values, functions, and
+// definitions necessary in the rest of the architecture description.
+source_hpp %{
+  // Returns true if Node n is followed by a MemBar node that 
+  // will do an acquire. If so, this node must not do the acquire
+  // operation.
+  bool followed_by_acquire(const Node *n);
+%}
+
+source %{
+
+// Optimize load-acquire.
+//
+// Check if acquire is unnecessary due to following operation that does 
+// acquire anyways.
+// Walk the pattern:
+//
+//      n: Load.acq
+//           |
+//      MemBarAcquire
+//       |         |
+//  Proj(ctrl)  Proj(mem)
+//       |         |
+//   MemBarRelease/Volatile
+// 
+bool followed_by_acquire(const Node *load) {
+  assert(load->is_Load(), "So far implemented only for loads.");
+
+  // Find MemBarAcquire.
+  const Node *mba = NULL;         
+  for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) {
+    const Node *out = load->fast_out(i);
+    if (out->Opcode() == Op_MemBarAcquire) {
+      if (out->in(0) == load) continue; // Skip control edge, membar should be found via precedence edge.
+      mba = out;
+      break;
+    }
+  }
+  if (!mba) return false;
+
+  // Find following MemBar node.
+  //
+  // The following node must be reachable by control AND memory 
+  // edge to assure no other operations are in between the two nodes.
+  //
+  // So first get the Proj node, mem_proj, to use it to iterate forward.
+  Node *mem_proj = NULL;
+  for (DUIterator_Fast imax, i = mba->fast_outs(imax); i < imax; i++) {
+    mem_proj = mba->fast_out(i);      // Throw out-of-bounds if proj not found
+    assert(mem_proj->is_Proj(), "only projections here");
+    ProjNode *proj = mem_proj->as_Proj();
+    if (proj->_con == TypeFunc::Memory &&
+        !Compile::current()->node_arena()->contains(mem_proj)) // Unmatched old-space only
+      break;
+  }
+  assert(mem_proj->as_Proj()->_con == TypeFunc::Memory, "Graph broken");
+
+  // Search MemBar behind Proj. If there are other memory operations
+  // behind the Proj we lost.
+  for (DUIterator_Fast jmax, j = mem_proj->fast_outs(jmax); j < jmax; j++) {
+    Node *x = mem_proj->fast_out(j);
+    // Proj might have an edge to a store or load node which precedes the membar.
+    if (x->is_Mem()) return false;
+
+    // On PPC64 release and volatile are implemented by an instruction
+    // that also has acquire semantics. I.e. there is no need for an
+    // acquire before these.
+    int xop = x->Opcode();
+    if (xop == Op_MemBarRelease || xop == Op_MemBarVolatile) {
+      // Make sure we're not missing Call/Phi/MergeMem by checking
+      // control edges. The control edge must directly lead back
+      // to the MemBarAcquire
+      Node *ctrl_proj = x->in(0);
+      if (ctrl_proj->is_Proj() && ctrl_proj->in(0) == mba) {
+        return true;
+      }
+    }
+  }
+
+  return false;
+}
+
+#define __ _masm.
+
+// Tertiary op of a LoadP or StoreP encoding.
+#define REGP_OP true
+
+// ****************************************************************************
+
+// REQUIRED FUNCTIONALITY
+
+// !!!!! Special hack to get all type of calls to specify the byte offset
+//       from the start of the call to the point where the return address
+//       will point.
+
+// PPC port: Removed use of lazy constant construct.
+
+int MachCallStaticJavaNode::ret_addr_offset() {
+  // It's only a single branch-and-link instruction.
+  return 4;
+}
+
+int MachCallDynamicJavaNode::ret_addr_offset() {
+  // Offset is 4 with postalloc expanded calls (bl is one instruction). We use
+  // postalloc expanded calls if we use inline caches and do not update method data.
+  if (UseInlineCaches)
+    return 4;
+
+  int vtable_index = this->_vtable_index;
+  if (vtable_index < 0) {
+    // Must be invalid_vtable_index, not nonvirtual_vtable_index.
+    assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
+    return 12;
+  } else {
+    assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
+    return 24;
+  }
+}
+
+int MachCallRuntimeNode::ret_addr_offset() {
+  return 40;
+}
+
+//=============================================================================
+
+// condition code conversions
+
+static int cc_to_boint(int cc) {
+  return Assembler::bcondCRbiIs0 | (cc & 8);
+}
+
+static int cc_to_inverse_boint(int cc) {
+  return Assembler::bcondCRbiIs0 | (8-(cc & 8));
+}
+
+static int cc_to_biint(int cc, int flags_reg) {
+  return (flags_reg << 2) | (cc & 3);
+}
+
+//=============================================================================
+
+// Compute padding required for nodes which need alignment. The padding
+// is the number of bytes (not instructions) which will be inserted before
+// the instruction. The padding must match the size of a NOP instruction.
+
+int string_indexOf_imm1_charNode::compute_padding(int current_offset) const {
+  return (3*4-current_offset)&31;
+}
+
+int string_indexOf_imm1Node::compute_padding(int current_offset) const {
+  return (2*4-current_offset)&31;
+}
+
+int string_indexOf_immNode::compute_padding(int current_offset) const {
+  return (3*4-current_offset)&31;
+}
+
+int string_indexOfNode::compute_padding(int current_offset) const {
+  return (1*4-current_offset)&31;
+}
+
+int string_compareNode::compute_padding(int current_offset) const {
+  return (4*4-current_offset)&31;
+}
+
+int string_equals_immNode::compute_padding(int current_offset) const {
+  if (opnd_array(3)->constant() < 16) return 0; // Don't insert nops for short version (loop completely unrolled).
+  return (2*4-current_offset)&31;
+}
+
+int string_equalsNode::compute_padding(int current_offset) const {
+  return (7*4-current_offset)&31;
+}
+
+int inlineCallClearArrayNode::compute_padding(int current_offset) const {
+  return (2*4-current_offset)&31;
+}
+
+//=============================================================================
+
+// Indicate if the safepoint node needs the polling page as an input.
+bool SafePointNode::needs_polling_address_input() {
+  // The address is loaded from thread by a seperate node.
+  return true;
+}
+
+//=============================================================================
+
+// Emit an interrupt that is caught by the debugger (for debugging compiler).
+void emit_break(CodeBuffer &cbuf) {
+  MacroAssembler _masm(&cbuf);
+  __ illtrap();
+}
+
+#ifndef PRODUCT
+void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  st->print("BREAKPOINT");
+}
+#endif
+
+void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  emit_break(cbuf);
+}
+
+uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
+  return MachNode::size(ra_);
+}
+
+//=============================================================================
+
+void emit_nop(CodeBuffer &cbuf) {
+  MacroAssembler _masm(&cbuf);
+  __ nop();
+}
+
+static inline void emit_long(CodeBuffer &cbuf, int value) {
+  *((int*)(cbuf.insts_end())) = value;
+  cbuf.set_insts_end(cbuf.insts_end() + BytesPerInstWord);
+}
+
+//=============================================================================
+
+// Emit a trampoline stub for a call to a target which is too far away.
+//
+// code sequences:
+//
+// call-site:
+//   branch-and-link to <destination> or <trampoline stub>
+//
+// Related trampoline stub for this call-site in the stub section:
+//   load the call target from the constant pool
+//   branch via CTR (LR/link still points to the call-site above)
+
+const uint trampoline_stub_size = 6 * BytesPerInstWord;
+
+void emit_trampoline_stub(MacroAssembler &_masm, int destination_toc_offset, int insts_call_instruction_offset) {
+  // Start the stub.
+  address stub = __ start_a_stub(Compile::MAX_stubs_size/2);
+  if (stub == NULL) {
+    Compile::current()->env()->record_out_of_memory_failure();
+    return;
+  }
+
+  // For java_to_interp stubs we use R11_scratch1 as scratch register
+  // and in call trampoline stubs we use R12_scratch2. This way we
+  // can distinguish them (see is_NativeCallTrampolineStub_at()).
+  Register reg_scratch = R12_scratch2;
+
+  // Create a trampoline stub relocation which relates this trampoline stub
+  // with the call instruction at insts_call_instruction_offset in the
+  // instructions code-section.
+  __ relocate(trampoline_stub_Relocation::spec(__ code()->insts()->start() + insts_call_instruction_offset));
+  const int stub_start_offset = __ offset();
+
+  // Now, create the trampoline stub's code:
+  // - load the TOC
+  // - load the call target from the constant pool
+  // - call
+  __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
+  __ ld_largeoffset_unchecked(reg_scratch, destination_toc_offset, reg_scratch, false);
+  __ mtctr(reg_scratch);
+  __ bctr();
+
+  const address stub_start_addr = __ addr_at(stub_start_offset);
+
+  // FIXME: Assert that the trampoline stub can be identified and patched.
+
+  // Assert that the encoded destination_toc_offset can be identified and that it is correct.
+  assert(destination_toc_offset == NativeCallTrampolineStub_at(stub_start_addr)->destination_toc_offset(),
+         "encoded offset into the constant pool must match");
+  // Trampoline_stub_size should be good.
+  assert((uint)(__ offset() - stub_start_offset) <= trampoline_stub_size, "should be good size");
+  assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
+
+  // End the stub.
+  __ end_a_stub();
+}
+
+// Size of trampoline stub, this doesn't need to be accurate but it must
+// be larger or equal to the real size of the stub.
+// Used for optimization in Compile::Shorten_branches.
+uint size_call_trampoline() {
+  return trampoline_stub_size;
+}
+
+// Number of relocation entries needed by trampoline stub.
+// Used for optimization in Compile::Shorten_branches.
+uint reloc_call_trampoline() {
+  return 5;
+}
+
+//=============================================================================
+
+// Emit an inline branch-and-link call and a related trampoline stub.
+//
+// code sequences:
+//
+// call-site:
+//   branch-and-link to <destination> or <trampoline stub>
+//
+// Related trampoline stub for this call-site in the stub section:
+//   load the call target from the constant pool
+//   branch via CTR (LR/link still points to the call-site above)
+//
+
+typedef struct {
+  int insts_call_instruction_offset;
+  int ret_addr_offset;
+} EmitCallOffsets;
+
+// Emit a branch-and-link instruction that branches to a trampoline.
+// - Remember the offset of the branch-and-link instruction.
+// - Add a relocation at the branch-and-link instruction.
+// - Emit a branch-and-link.
+// - Remember the return pc offset.
+EmitCallOffsets emit_call_with_trampoline_stub(MacroAssembler &_masm, address entry_point, relocInfo::relocType rtype) {
+  EmitCallOffsets offsets = { -1, -1 };
+  const int start_offset = __ offset();
+  offsets.insts_call_instruction_offset = __ offset();
+
+  // No entry point given, use the current pc.
+  if (entry_point == NULL) entry_point = __ pc();
+
+  if (!Compile::current()->in_scratch_emit_size()) {
+    // Put the entry point as a constant into the constant pool.
+    const address entry_point_toc_addr   = __ address_constant(entry_point, RelocationHolder::none);
+    const int     entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
+
+    // Emit the trampoline stub which will be related to the branch-and-link below.
+    emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
+    __ relocate(rtype);
+  }
+
+  // Note: At this point we do not have the address of the trampoline
+  // stub, and the entry point might be too far away for bl, so __ pc()
+  // serves as dummy and the bl will be patched later.
+  __ bl((address) __ pc());
+
+  offsets.ret_addr_offset = __ offset() - start_offset;
+
+  return offsets;
+}
+
+//=============================================================================
+
+// Factory for creating loadConL* nodes for large/small constant pool.
+
+static inline jlong replicate_immF(float con) {
+  // Replicate float con 2 times and pack into vector.
+  int val = *((int*)&con);
+  jlong lval = val;
+  lval = (lval << 32) | (lval & 0xFFFFFFFFl);
+  return lval;
+}
+
+//=============================================================================
+
+const RegMask& MachConstantBaseNode::_out_RegMask = BITS64_CONSTANT_TABLE_BASE_mask();
+int Compile::ConstantTable::calculate_table_base_offset() const {
+  return 0;  // absolute addressing, no offset
+}
+
+bool MachConstantBaseNode::requires_postalloc_expand() const { return true; }
+void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
+  Compile *C = ra_->C;
+
+  iRegPdstOper *op_dst = new (C) iRegPdstOper();
+  MachNode *m1 = new (C) loadToc_hiNode();
+  MachNode *m2 = new (C) loadToc_loNode();
+
+  m1->add_req(NULL);
+  m2->add_req(NULL, m1);
+  m1->_opnds[0] = op_dst;
+  m2->_opnds[0] = op_dst;
+  m2->_opnds[1] = op_dst;
+  ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+  ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+  nodes->push(m1);
+  nodes->push(m2);
+}
+
+void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+  // Is postalloc expanded.
+  ShouldNotReachHere();
+}
+
+uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
+  return 0;
+}
+
+#ifndef PRODUCT
+void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
+  st->print("-- \t// MachConstantBaseNode (empty encoding)");
+}
+#endif
+
+//=============================================================================
+
+#ifndef PRODUCT
+void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  Compile* C = ra_->C;
+  const long framesize = C->frame_slots() << LogBytesPerInt;
+
+  st->print("PROLOG\n\t");
+  if (C->need_stack_bang(framesize)) {
+    st->print("stack_overflow_check\n\t");
+  }
+
+  if (!false /* TODO: PPC port C->is_frameless_method()*/) {
+    st->print("save return pc\n\t");
+    st->print("push frame %d\n\t", -framesize);
+  }
+}
+#endif
+
+// Macro used instead of the common __ to emulate the pipes of PPC.
+// Instead of e.g. __ ld(...) one hase to write ___(ld) ld(...) This enables the
+// micro scheduler to cope with "hand written" assembler like in the prolog. Though
+// still no scheduling of this code is possible, the micro scheduler is aware of the
+// code and can update its internal data. The following mechanism is used to achieve this:
+// The micro scheduler calls size() of each compound node during scheduling. size() does a
+// dummy emit and only during this dummy emit C->hb_scheduling() is not NULL.
+#if 0 // TODO: PPC port
+#define ___(op) if (UsePower6SchedulerPPC64 && C->hb_scheduling())                    \
+                  C->hb_scheduling()->_pdScheduling->PdEmulatePipe(ppc64Opcode_##op); \
+                _masm.
+#define ___stop if (UsePower6SchedulerPPC64 && C->hb_scheduling())                    \
+                  C->hb_scheduling()->_pdScheduling->PdEmulatePipe(archOpcode_none)
+#define ___advance if (UsePower6SchedulerPPC64 && C->hb_scheduling())                 \
+                  C->hb_scheduling()->_pdScheduling->advance_offset
+#else
+#define ___(op) if (UsePower6SchedulerPPC64)                                          \
+                  Unimplemented();                                                    \
+                _masm.
+#define ___stop if (UsePower6SchedulerPPC64)                                          \
+                  Unimplemented()
+#define ___advance if (UsePower6SchedulerPPC64)                                       \
+                  Unimplemented()
+#endif
+
+void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  Compile* C = ra_->C;
+  MacroAssembler _masm(&cbuf);
+
+  const long framesize = ((long)C->frame_slots()) << LogBytesPerInt;
+  assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
+
+  const bool method_is_frameless      = false /* TODO: PPC port C->is_frameless_method()*/;
+
+  const Register return_pc            = R20; // Must match return_addr() in frame section.
+  const Register callers_sp           = R21;
+  const Register push_frame_temp      = R22;
+  const Register toc_temp             = R23;
+  assert_different_registers(R11, return_pc, callers_sp, push_frame_temp, toc_temp);
+
+  if (method_is_frameless) {
+    // Add nop at beginning of all frameless methods to prevent any
+    // oop instructions from getting overwritten by make_not_entrant
+    // (patching attempt would fail).
+    ___(nop) nop();
+  } else {
+    // Get return pc.
+    ___(mflr) mflr(return_pc);
+  }
+
+  // Calls to C2R adapters often do not accept exceptional returns.
+  // We require that their callers must bang for them. But be
+  // careful, because some VM calls (such as call site linkage) can
+  // use several kilobytes of stack. But the stack safety zone should
+  // account for that. See bugs 4446381, 4468289, 4497237.
+  if (C->need_stack_bang(framesize) && UseStackBanging) {
+    // Unfortunately we cannot use the function provided in
+    // assembler.cpp as we have to emulate the pipes. So I had to
+    // insert the code of generate_stack_overflow_check(), see
+    // assembler.cpp for some illuminative comments.
+    const int page_size = os::vm_page_size();
+    int bang_end = StackShadowPages*page_size;
+
+    // This is how far the previous frame's stack banging extended.
+    const int bang_end_safe = bang_end;
+
+    if (framesize > page_size) {
+      bang_end += framesize;
+    }
+
+    int bang_offset = bang_end_safe;
+
+    while (bang_offset <= bang_end) {
+      // Need at least one stack bang at end of shadow zone.
+
+      // Again I had to copy code, this time from assembler_ppc64.cpp,
+      // bang_stack_with_offset - see there for comments.
+
+      // Stack grows down, caller passes positive offset.
+      assert(bang_offset > 0, "must bang with positive offset");
+
+      long stdoffset = -bang_offset;
+
+      if (Assembler::is_simm(stdoffset, 16)) {
+        // Signed 16 bit offset, a simple std is ok.
+        if (UseLoadInstructionsForStackBangingPPC64) {
+          ___(ld) ld(R0,  (int)(signed short)stdoffset, R1_SP);
+        } else {
+          ___(std) std(R0, (int)(signed short)stdoffset, R1_SP);
+        }
+      } else if (Assembler::is_simm(stdoffset, 31)) {
+        // Use largeoffset calculations for addis & ld/std.
+        const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
+        const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
+
+        Register tmp = R11;
+        ___(addis) addis(tmp, R1_SP, hi);
+        if (UseLoadInstructionsForStackBangingPPC64) {
+          ___(ld) ld(R0, lo, tmp);
+        } else {
+          ___(std) std(R0, lo, tmp);
+        }
+      } else {
+        ShouldNotReachHere();
+      }
+
+      bang_offset += page_size;
+    }
+    // R11 trashed
+  } // C->need_stack_bang(framesize) && UseStackBanging
+
+  unsigned int bytes = (unsigned int)framesize;
+  long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes);
+  ciMethod *currMethod = C -> method();
+
+  // Optimized version for most common case.
+  if (UsePower6SchedulerPPC64 &&
+      !method_is_frameless && Assembler::is_simm((int)(-offset), 16) &&
+      !(false /* ConstantsALot TODO: PPC port*/)) {
+    ___(or) mr(callers_sp, R1_SP);
+    ___(std) std(return_pc, _abi(lr), R1_SP);
+    ___(stdu) stdu(R1_SP, -offset, R1_SP);
+    return;
+  }
+
+  if (!method_is_frameless) {
+    // Get callers sp.
+    ___(or) mr(callers_sp, R1_SP);
+
+    // Push method's frame, modifies SP.
+    assert(Assembler::is_uimm(framesize, 32U), "wrong type");
+    // The ABI is already accounted for in 'framesize' via the
+    // 'out_preserve' area.
+    Register tmp = push_frame_temp;
+    // Had to insert code of push_frame((unsigned int)framesize, push_frame_temp).
+    if (Assembler::is_simm(-offset, 16)) {
+      ___(stdu) stdu(R1_SP, -offset, R1_SP);
+    } else {
+      long x = -offset;
+      // Had to insert load_const(tmp, -offset).
+      ___(addis)  lis( tmp, (int)((signed short)(((x >> 32) & 0xffff0000) >> 16)));
+      ___(ori)    ori( tmp, tmp, ((x >> 32) & 0x0000ffff));
+      ___(rldicr) sldi(tmp, tmp, 32);
+      ___(oris)   oris(tmp, tmp, (x & 0xffff0000) >> 16);
+      ___(ori)    ori( tmp, tmp, (x & 0x0000ffff));
+
+      ___(stdux) stdux(R1_SP, R1_SP, tmp);
+    }
+  }
+#if 0 // TODO: PPC port
+  // For testing large constant pools, emit a lot of constants to constant pool.
+  // "Randomize" const_size.
+  if (ConstantsALot) {
+    const int num_consts = const_size();
+    for (int i = 0; i < num_consts; i++) {
+      __ long_constant(0xB0B5B00BBABE);
+    }
+  }
+#endif
+  if (!method_is_frameless) {
+    // Save return pc.
+    ___(std) std(return_pc, _abi(lr), callers_sp);
+  }
+}
+#undef ___
+#undef ___stop
+#undef ___advance
+
+uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
+  // Variable size. determine dynamically.
+  return MachNode::size(ra_);
+}
+
+int MachPrologNode::reloc() const {
+  // Return number of relocatable values contained in this instruction.
+  return 1; // 1 reloc entry for load_const(toc).
+}
+
+//=============================================================================
+
+#ifndef PRODUCT
+void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  Compile* C = ra_->C;
+
+  st->print("EPILOG\n\t");
+  st->print("restore return pc\n\t");
+  st->print("pop frame\n\t");
+
+  if (do_polling() && C->is_method_compilation()) {
+    st->print("touch polling page\n\t");
+  }
+}
+#endif
+
+void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  Compile* C = ra_->C;
+  MacroAssembler _masm(&cbuf);
+
+  const long framesize = ((long)C->frame_slots()) << LogBytesPerInt;
+  assert(framesize >= 0, "negative frame-size?");
+
+  const bool method_needs_polling = do_polling() && C->is_method_compilation();
+  const bool method_is_frameless  = false /* TODO: PPC port C->is_frameless_method()*/;
+  const Register return_pc        = R11;
+  const Register polling_page     = R12;
+
+  if (!method_is_frameless) {
+    // Restore return pc relative to callers' sp.
+    __ ld(return_pc, ((int)framesize) + _abi(lr), R1_SP);
+  }
+
+  if (method_needs_polling) {
+    if (LoadPollAddressFromThread) {
+      // TODO: PPC port __ ld(polling_page, in_bytes(JavaThread::poll_address_offset()), R16_thread);
+      Unimplemented();
+    } else {
+      __ load_const_optimized(polling_page, (long)(address) os::get_polling_page()); // TODO: PPC port: get_standard_polling_page()
+    }
+  }
+
+  if (!method_is_frameless) {
+    // Move return pc to LR.
+    __ mtlr(return_pc);
+    // Pop frame (fixed frame-size).
+    __ addi(R1_SP, R1_SP, (int)framesize);
+  }
+
+  if (method_needs_polling) {
+    // We need to mark the code position where the load from the safepoint
+    // polling page was emitted as relocInfo::poll_return_type here.
+    __ relocate(relocInfo::poll_return_type);
+    __ load_from_polling_page(polling_page);
+  }
+}
+
+uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
+  // Variable size. Determine dynamically.
+  return MachNode::size(ra_);
+}
+
+int MachEpilogNode::reloc() const {
+  // Return number of relocatable values contained in this instruction.
+  return 1; // 1 for load_from_polling_page.
+}
+
+const Pipeline * MachEpilogNode::pipeline() const {
+  return MachNode::pipeline_class();
+}
+
+// This method seems to be obsolete. It is declared in machnode.hpp
+// and defined in all *.ad files, but it is never called. Should we
+// get rid of it?
+int MachEpilogNode::safepoint_offset() const {
+  assert(do_polling(), "no return for this epilog node");
+  return 0;
+}
+
+#if 0 // TODO: PPC port
+void MachLoadPollAddrLateNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
+  MacroAssembler _masm(&cbuf);
+  if (LoadPollAddressFromThread) {
+    _masm.ld(R11, in_bytes(JavaThread::poll_address_offset()), R16_thread);
+  } else {
+    _masm.nop();
+  }
+}
+
+uint MachLoadPollAddrLateNode::size(PhaseRegAlloc* ra_) const {
+  if (LoadPollAddressFromThread) {
+    return 4;
+  } else {
+    return 4;
+  }
+}
+
+#ifndef PRODUCT
+void MachLoadPollAddrLateNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
+  st->print_cr(" LD R11, PollAddressOffset, R16_thread \t// LoadPollAddressFromThread");
+}
+#endif
+
+const RegMask &MachLoadPollAddrLateNode::out_RegMask() const {
+  return RSCRATCH1_BITS64_REG_mask();
+}
+#endif // PPC port
+
+// =============================================================================
+
+// Figure out which register class each belongs in: rc_int, rc_float or
+// rc_stack.
+enum RC { rc_bad, rc_int, rc_float, rc_stack };
+
+static enum RC rc_class(OptoReg::Name reg) {
+  // Return the register class for the given register. The given register
+  // reg is a <register>_num value, which is an index into the MachRegisterNumbers
+  // enumeration in adGlobals_ppc64.hpp.
+
+  if (reg == OptoReg::Bad) return rc_bad;
+
+  // We have 64 integer register halves, starting at index 0.
+  if (reg < 64) return rc_int;
+
+  // We have 64 floating-point register halves, starting at index 64.
+  if (reg < 64+64) return rc_float;
+
+  // Between float regs & stack are the flags regs.
+  assert(OptoReg::is_stack(reg), "blow up if spilling flags");
+
+  return rc_stack;
+}
+
+static int ld_st_helper(CodeBuffer *cbuf, const char *op_str, uint opcode, int reg, int offset,
+                        bool do_print, Compile* C, outputStream *st) {
+
+  assert(opcode == Assembler::LD_OPCODE   ||
+         opcode == Assembler::STD_OPCODE  ||
+         opcode == Assembler::LWZ_OPCODE  ||
+         opcode == Assembler::STW_OPCODE  ||
+         opcode == Assembler::LFD_OPCODE  ||
+         opcode == Assembler::STFD_OPCODE ||
+         opcode == Assembler::LFS_OPCODE  ||
+         opcode == Assembler::STFS_OPCODE,
+         "opcode not supported");
+
+  if (cbuf) {
+    int d =
+      (Assembler::LD_OPCODE == opcode || Assembler::STD_OPCODE == opcode) ?
+        Assembler::ds(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/)
+      : Assembler::d1(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/); // Makes no difference in opt build.
+    emit_long(*cbuf, opcode | Assembler::rt(Matcher::_regEncode[reg]) | d | Assembler::ra(R1_SP));
+  }
+#ifndef PRODUCT
+  else if (do_print) {
+    st->print("%-7s %s, [R1_SP + #%d+%d] \t// spill copy",
+              op_str,
+              Matcher::regName[reg],
+              offset, 0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/);
+  }
+#endif
+  return 4; // size
+}
+
+uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
+  Compile* C = ra_->C;
+
+  // Get registers to move.
+  OptoReg::Name src_hi = ra_->get_reg_second(in(1));
+  OptoReg::Name src_lo = ra_->get_reg_first(in(1));
+  OptoReg::Name dst_hi = ra_->get_reg_second(this);
+  OptoReg::Name dst_lo = ra_->get_reg_first(this);
+
+  enum RC src_hi_rc = rc_class(src_hi);
+  enum RC src_lo_rc = rc_class(src_lo);
+  enum RC dst_hi_rc = rc_class(dst_hi);
+  enum RC dst_lo_rc = rc_class(dst_lo);
+
+  assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
+  if (src_hi != OptoReg::Bad)
+    assert((src_lo&1)==0 && src_lo+1==src_hi &&
+           (dst_lo&1)==0 && dst_lo+1==dst_hi,
+           "expected aligned-adjacent pairs");
+  // Generate spill code!
+  int size = 0;
+
+  if (src_lo == dst_lo && src_hi == dst_hi)
+    return size;            // Self copy, no move.
+
+  // --------------------------------------
+  // Memory->Memory Spill. Use R0 to hold the value.
+  if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
+    int src_offset = ra_->reg2offset(src_lo);
+    int dst_offset = ra_->reg2offset(dst_lo);
+    if (src_hi != OptoReg::Bad) {
+      assert(src_hi_rc==rc_stack && dst_hi_rc==rc_stack,
+             "expected same type of move for high parts");
+      size += ld_st_helper(cbuf, "LD  ", Assembler::LD_OPCODE,  R0_num, src_offset, !do_size, C, st);
+      if (!cbuf && !do_size) st->print("\n\t");
+      size += ld_st_helper(cbuf, "STD ", Assembler::STD_OPCODE, R0_num, dst_offset, !do_size, C, st);
+    } else {
+      size += ld_st_helper(cbuf, "LWZ ", Assembler::LWZ_OPCODE, R0_num, src_offset, !do_size, C, st);
+      if (!cbuf && !do_size) st->print("\n\t");
+      size += ld_st_helper(cbuf, "STW ", Assembler::STW_OPCODE, R0_num, dst_offset, !do_size, C, st);
+    }
+    return size;
+  }
+
+  // --------------------------------------
+  // Check for float->int copy; requires a trip through memory.
+  if (src_lo_rc == rc_float && dst_lo_rc == rc_int) {
+    Unimplemented();
+  }
+
+  // --------------------------------------
+  // Check for integer reg-reg copy.
+  if (src_lo_rc == rc_int && dst_lo_rc == rc_int) {
+      Register Rsrc = as_Register(Matcher::_regEncode[src_lo]);
+      Register Rdst = as_Register(Matcher::_regEncode[dst_lo]);
+      size = (Rsrc != Rdst) ? 4 : 0;
+
+      if (cbuf) {
+        MacroAssembler _masm(cbuf);
+        if (size) {
+          __ mr(Rdst, Rsrc);
+        }
+      }
+#ifndef PRODUCT
+      else if (!do_size) {
+        if (size) {
+          st->print("%-7s %s, %s \t// spill copy", "MR", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
+        } else {
+          st->print("%-7s %s, %s \t// spill copy", "MR-NOP", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
+        }
+      }
+#endif
+      return size;
+  }
+
+  // Check for integer store.
+  if (src_lo_rc == rc_int && dst_lo_rc == rc_stack) {
+    int dst_offset = ra_->reg2offset(dst_lo);
+    if (src_hi != OptoReg::Bad) {
+      assert(src_hi_rc==rc_int && dst_hi_rc==rc_stack,
+             "expected same type of move for high parts");
+      size += ld_st_helper(cbuf, "STD ", Assembler::STD_OPCODE, src_lo, dst_offset, !do_size, C, st);
+    } else {
+      size += ld_st_helper(cbuf, "STW ", Assembler::STW_OPCODE, src_lo, dst_offset, !do_size, C, st);
+    }
+    return size;
+  }
+
+  // Check for integer load.
+  if (dst_lo_rc == rc_int && src_lo_rc == rc_stack) {
+    int src_offset = ra_->reg2offset(src_lo);
+    if (src_hi != OptoReg::Bad) {
+      assert(dst_hi_rc==rc_int && src_hi_rc==rc_stack,
+             "expected same type of move for high parts");
+      size += ld_st_helper(cbuf, "LD  ", Assembler::LD_OPCODE, dst_lo, src_offset, !do_size, C, st);
+    } else {
+      size += ld_st_helper(cbuf, "LWZ ", Assembler::LWZ_OPCODE, dst_lo, src_offset, !do_size, C, st);
+    }
+    return size;
+  }
+
+  // Check for float reg-reg copy.
+  if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
+    if (cbuf) {
+      MacroAssembler _masm(cbuf);
+      FloatRegister Rsrc = as_FloatRegister(Matcher::_regEncode[src_lo]);
+      FloatRegister Rdst = as_FloatRegister(Matcher::_regEncode[dst_lo]);
+      __ fmr(Rdst, Rsrc);
+    }
+#ifndef PRODUCT
+    else if (!do_size) {
+      st->print("%-7s %s, %s \t// spill copy", "FMR", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
+    }
+#endif
+    return 4;
+  }
+
+  // Check for float store.
+  if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
+    int dst_offset = ra_->reg2offset(dst_lo);
+    if (src_hi != OptoReg::Bad) {
+      assert(src_hi_rc==rc_float && dst_hi_rc==rc_stack,
+             "expected same type of move for high parts");
+      size += ld_st_helper(cbuf, "STFD", Assembler::STFD_OPCODE, src_lo, dst_offset, !do_size, C, st);
+    } else {
+      size += ld_st_helper(cbuf, "STFS", Assembler::STFS_OPCODE, src_lo, dst_offset, !do_size, C, st);
+    }
+    return size;
+  }
+
+  // Check for float load.
+  if (dst_lo_rc == rc_float && src_lo_rc == rc_stack) {
+    int src_offset = ra_->reg2offset(src_lo);
+    if (src_hi != OptoReg::Bad) {
+      assert(dst_hi_rc==rc_float && src_hi_rc==rc_stack,
+             "expected same type of move for high parts");
+      size += ld_st_helper(cbuf, "LFD ", Assembler::LFD_OPCODE, dst_lo, src_offset, !do_size, C, st);
+    } else {
+      size += ld_st_helper(cbuf, "LFS ", Assembler::LFS_OPCODE, dst_lo, src_offset, !do_size, C, st);
+    }
+    return size;
+  }
+
+  // --------------------------------------------------------------------
+  // Check for hi bits still needing moving. Only happens for misaligned
+  // arguments to native calls.
+  if (src_hi == dst_hi)
+    return size;               // Self copy; no move.
+
+  assert(src_hi_rc != rc_bad && dst_hi_rc != rc_bad, "src_hi & dst_hi cannot be Bad");
+  ShouldNotReachHere(); // Unimplemented
+  return 0;
+}
+
+#ifndef PRODUCT
+void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  if (!ra_)
+    st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
+  else
+    implementation(NULL, ra_, false, st);
+}
+#endif
+
+void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  implementation(&cbuf, ra_, false, NULL);
+}
+
+uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
+  return implementation(NULL, ra_, true, NULL);
+}
+
+#if 0 // TODO: PPC port
+ArchOpcode MachSpillCopyNode_archOpcode(MachSpillCopyNode *n, PhaseRegAlloc *ra_) {
+#ifndef PRODUCT
+  if (ra_->node_regs_max_index() == 0) return archOpcode_undefined;
+#endif
+  assert(ra_->node_regs_max_index() != 0, "");
+
+  // Get registers to move.
+  OptoReg::Name src_hi = ra_->get_reg_second(n->in(1));
+  OptoReg::Name src_lo = ra_->get_reg_first(n->in(1));
+  OptoReg::Name dst_hi = ra_->get_reg_second(n);
+  OptoReg::Name dst_lo = ra_->get_reg_first(n);
+
+  enum RC src_lo_rc = rc_class(src_lo);
+  enum RC dst_lo_rc = rc_class(dst_lo);
+
+  if (src_lo == dst_lo && src_hi == dst_hi)
+    return ppc64Opcode_none;            // Self copy, no move.
+
+  // --------------------------------------
+  // Memory->Memory Spill. Use R0 to hold the value.
+  if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
+    return ppc64Opcode_compound;
+  }
+
+  // --------------------------------------
+  // Check for float->int copy; requires a trip through memory.
+  if (src_lo_rc == rc_float && dst_lo_rc == rc_int) {
+    Unimplemented();
+  }
+
+  // --------------------------------------
+  // Check for integer reg-reg copy.
+  if (src_lo_rc == rc_int && dst_lo_rc == rc_int) {
+    Register Rsrc = as_Register(Matcher::_regEncode[src_lo]);
+    Register Rdst = as_Register(Matcher::_regEncode[dst_lo]);
+    if (Rsrc == Rdst) {
+      return ppc64Opcode_none;
+    } else {
+      return ppc64Opcode_or;
+    }
+  }
+
+  // Check for integer store.
+  if (src_lo_rc == rc_int && dst_lo_rc == rc_stack) {
+    if (src_hi != OptoReg::Bad) {
+      return ppc64Opcode_std;
+    } else {
+      return ppc64Opcode_stw;
+    }
+  }
+
+  // Check for integer load.
+  if (dst_lo_rc == rc_int && src_lo_rc == rc_stack) {
+    if (src_hi != OptoReg::Bad) {
+      return ppc64Opcode_ld;
+    } else {
+      return ppc64Opcode_lwz;
+    }
+  }
+
+  // Check for float reg-reg copy.
+  if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
+    return ppc64Opcode_fmr;
+  }
+
+  // Check for float store.
+  if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
+    if (src_hi != OptoReg::Bad) {
+      return ppc64Opcode_stfd;
+    } else {
+      return ppc64Opcode_stfs;
+    }
+  }
+
+  // Check for float load.
+  if (dst_lo_rc == rc_float && src_lo_rc == rc_stack) {
+    if (src_hi != OptoReg::Bad) {
+      return ppc64Opcode_lfd;
+    } else {
+      return ppc64Opcode_lfs;
+    }
+  }
+
+  // --------------------------------------------------------------------
+  // Check for hi bits still needing moving. Only happens for misaligned
+  // arguments to native calls.
+  if (src_hi == dst_hi)
+    return ppc64Opcode_none;               // Self copy; no move.
+
+  ShouldNotReachHere();
+  return ppc64Opcode_undefined;
+}
+#endif // PPC port
+
+#ifndef PRODUCT
+void MachNopNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  st->print("NOP \t// %d nops to pad for loops.", _count);
+}
+#endif
+
+void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *) const {
+  MacroAssembler _masm(&cbuf);
+  // _count contains the number of nops needed for padding.
+  for (int i = 0; i < _count; i++) {
+    __ nop();
+  }
+}
+
+uint MachNopNode::size(PhaseRegAlloc *ra_) const {
+   return _count * 4;
+}
+
+#ifndef PRODUCT
+void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
+  int reg = ra_->get_reg_first(this);
+  st->print("ADDI %s, SP, %d \t// box node", Matcher::regName[reg], offset);
+}
+#endif
+
+void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  MacroAssembler _masm(&cbuf);
+
+  int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
+  int reg    = ra_->get_encode(this);
+
+  if (Assembler::is_simm(offset, 16)) {
+    __ addi(as_Register(reg), R1, offset);
+  } else {
+    ShouldNotReachHere();
+  }
+}
+
+uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
+  // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
+  return 4;
+}
+
+#ifndef PRODUCT
+void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
+  st->print_cr("---- MachUEPNode ----");
+  st->print_cr("...");
+}
+#endif
+
+void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+  // This is the unverified entry point.
+  MacroAssembler _masm(&cbuf);
+
+  // Inline_cache contains a klass.
+  Register ic_klass       = as_Register(Matcher::inline_cache_reg_encode());
+  Register receiver_klass = R0;  // tmp
+
+  assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
+  assert(R11_scratch1 == R11, "need prologue scratch register");
+
+  // Check for NULL argument if we don't have implicit null checks.
+  if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
+    if (TrapBasedNullChecks) {
+      __ trap_null_check(R3_ARG1);
+    } else {
+      Label valid;
+      __ cmpdi(CCR0, R3_ARG1, 0);
+      __ bne_predict_taken(CCR0, valid);
+      // We have a null argument, branch to ic_miss_stub.
+      __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
+                           relocInfo::runtime_call_type);
+      __ bind(valid);
+    }
+  }
+  // Assume argument is not NULL, load klass from receiver.
+  __ load_klass(receiver_klass, R3_ARG1);
+
+  if (TrapBasedICMissChecks) {
+    __ trap_ic_miss_check(receiver_klass, ic_klass);
+  } else {
+    Label valid;
+    __ cmpd(CCR0, receiver_klass, ic_klass);
+    __ beq_predict_taken(CCR0, valid);
+    // We have an unexpected klass, branch to ic_miss_stub.
+    __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
+                         relocInfo::runtime_call_type);
+    __ bind(valid);
+  }
+
+  // Argument is valid and klass is as expected, continue.
+}
+
+#if 0 // TODO: PPC port
+// Optimize UEP code on z (save a load_const() call in main path).
+int MachUEPNode::ep_offset() {
+  return 0;
+}
+#endif
+
+uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
+  // Variable size. Determine dynamically.
+  return MachNode::size(ra_);
+}
+
+//=============================================================================
+
+uint size_exception_handler() {
+  // The exception_handler is a b64_patchable.
+  return MacroAssembler::b64_patchable_size;
+}
+
+uint size_deopt_handler() {
+  // The deopt_handler is a bl64_patchable.
+  return MacroAssembler::bl64_patchable_size;
+}
+
+int emit_exception_handler(CodeBuffer &cbuf) {
+  MacroAssembler _masm(&cbuf);
+
+  address base = __ start_a_stub(size_exception_handler());
+  if (base == NULL) return 0; // CodeBuffer::expand failed
+
+  int offset = __ offset();
+  __ b64_patchable((address)OptoRuntime::exception_blob()->content_begin(),
+                       relocInfo::runtime_call_type);
+  assert(__ offset() - offset == (int)size_exception_handler(), "must be fixed size");
+  __ end_a_stub();
+
+  return offset;
+}
+
+// The deopt_handler is like the exception handler, but it calls to
+// the deoptimization blob instead of jumping to the exception blob.
+int emit_deopt_handler(CodeBuffer& cbuf) {
+  MacroAssembler _masm(&cbuf);
+
+  address base = __ start_a_stub(size_deopt_handler());
+  if (base == NULL) return 0; // CodeBuffer::expand failed
+
+  int offset = __ offset();
+  __ bl64_patchable((address)SharedRuntime::deopt_blob()->unpack(),
+                        relocInfo::runtime_call_type);
+  assert(__ offset() - offset == (int) size_deopt_handler(), "must be fixed size");
+  __ end_a_stub();
+
+  return offset;
+}
+
+//=============================================================================
+
+// Use a frame slots bias for frameless methods if accessing the stack.
+static int frame_slots_bias(int reg_enc, PhaseRegAlloc* ra_) {
+  if (as_Register(reg_enc) == R1_SP) {
+    return 0; // TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes();
+  }
+  return 0;
+}
+
+const bool Matcher::match_rule_supported(int opcode) {
+  if (!has_match_rule(opcode))
+    return false;
+
+  switch (opcode) {
+  case Op_SqrtD:
+    return VM_Version::has_fsqrt();
+  case Op_CountLeadingZerosI:
+  case Op_CountLeadingZerosL:
+  case Op_CountTrailingZerosI:
+  case Op_CountTrailingZerosL:
+    if (!UseCountLeadingZerosInstructionsPPC64)
+      return false;
+    break;
+
+  case Op_PopCountI:
+  case Op_PopCountL:
+    return (UsePopCountInstruction && VM_Version::has_popcntw());
+
+  case Op_StrComp:
+    return SpecialStringCompareTo;
+  case Op_StrEquals:
+    return SpecialStringEquals;
+  case Op_StrIndexOf:
+    return SpecialStringIndexOf;
+  }
+
+  return true;  // Per default match rules are supported.
+}
+
+int Matcher::regnum_to_fpu_offset(int regnum) {
+  // No user for this method?
+  Unimplemented();
+  return 999;
+}
+
+const bool Matcher::convL2FSupported(void) {
+  // fcfids can do the conversion (>= Power7).
+  // fcfid + frsp showed rounding problem when result should be 0x3f800001.
+  return VM_Version::has_fcfids(); // False means that conversion is done by runtime call.
+}
+
+// Vector width in bytes.
+const int Matcher::vector_width_in_bytes(BasicType bt) {
+  assert(MaxVectorSize == 8, "");
+  return 8;
+}
+
+// Vector ideal reg.
+const int Matcher::vector_ideal_reg(int size) {
+  assert(MaxVectorSize == 8 && size == 8, "");
+  return Op_RegL;
+}
+
+const int Matcher::vector_shift_count_ideal_reg(int size) {
+  fatal("vector shift is not supported");
+  return Node::NotAMachineReg;
+}
+
+// Limits on vector size (number of elements) loaded into vector.
+const int Matcher::max_vector_size(const BasicType bt) {
+  assert(is_java_primitive(bt), "only primitive type vectors");
+  return vector_width_in_bytes(bt)/type2aelembytes(bt);
+}
+
+const int Matcher::min_vector_size(const BasicType bt) {
+  return max_vector_size(bt); // Same as max.
+}
+
+// PPC doesn't support misaligned vectors store/load.
+const bool Matcher::misaligned_vectors_ok() {
+  return false;
+}
+
+// PPC AES support not yet implemented
+const bool Matcher::pass_original_key_for_aes() {
+  return false;
+}
+
+// RETURNS: whether this branch offset is short enough that a short
+// branch can be used.
+//
+// If the platform does not provide any short branch variants, then
+// this method should return `false' for offset 0.
+//
+// `Compile::Fill_buffer' will decide on basis of this information
+// whether to do the pass `Compile::Shorten_branches' at all.
+//
+// And `Compile::Shorten_branches' will decide on basis of this
+// information whether to replace particular branch sites by short
+// ones.
+bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
+  // Is the offset within the range of a ppc64 pc relative branch?
+  bool b;
+
+  const int safety_zone = 3 * BytesPerInstWord;
+  b = Assembler::is_simm((offset<0 ? offset-safety_zone : offset+safety_zone),
+                         29 - 16 + 1 + 2);
+  return b;
+}
+
+const bool Matcher::isSimpleConstant64(jlong value) {
+  // Probably always true, even if a temp register is required.
+  return true;
+}
+/* TODO: PPC port
+// Make a new machine dependent decode node (with its operands).
+MachTypeNode *Matcher::make_decode_node(Compile *C) {
+  assert(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0,
+         "This method is only implemented for unscaled cOops mode so far");
+  MachTypeNode *decode = new (C) decodeN_unscaledNode();
+  decode->set_opnd_array(0, new (C) iRegPdstOper());
+  decode->set_opnd_array(1, new (C) iRegNsrcOper());
+  return decode;
+}
+*/
+// Threshold size for cleararray.
+const int Matcher::init_array_short_size = 8 * BytesPerLong;
+
+// false => size gets scaled to BytesPerLong, ok.
+const bool Matcher::init_array_count_is_in_bytes = false;
+
+// Use conditional move (CMOVL) on Power7.
+const int Matcher::long_cmove_cost() { return 0; } // this only makes long cmoves more expensive than int cmoves
+
+// Suppress CMOVF. Conditional move available (sort of) on PPC64 only from P7 onwards. Not exploited yet.
+// fsel doesn't accept a condition register as input, so this would be slightly different.
+const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
+
+// Power6 requires postalloc expand (see block.cpp for description of postalloc expand).
+const bool Matcher::require_postalloc_expand = true;
+
+// Should the Matcher clone shifts on addressing modes, expecting them to
+// be subsumed into complex addressing expressions or compute them into
+// registers? True for Intel but false for most RISCs.
+const bool Matcher::clone_shift_expressions = false;
+
+// Do we need to mask the count passed to shift instructions or does
+// the cpu only look at the lower 5/6 bits anyway?
+// Off, as masks are generated in expand rules where required.
+// Constant shift counts are handled in Ideal phase.
+const bool Matcher::need_masked_shift_count = false;
+
+// This affects two different things:
+//  - how Decode nodes are matched
+//  - how ImplicitNullCheck opportunities are recognized
+// If true, the matcher will try to remove all Decodes and match them
+// (as operands) into nodes. NullChecks are not prepared to deal with
+// Decodes by final_graph_reshaping().
+// If false, final_graph_reshaping() forces the decode behind the Cmp
+// for a NullCheck. The matcher matches the Decode node into a register.
+// Implicit_null_check optimization moves the Decode along with the
+// memory operation back up before the NullCheck.
+bool Matcher::narrow_oop_use_complex_address() {
+  // TODO: PPC port if (MatchDecodeNodes) return true;
+  return false;
+}
+
+bool Matcher::narrow_klass_use_complex_address() {
+  NOT_LP64(ShouldNotCallThis());
+  assert(UseCompressedClassPointers, "only for compressed klass code");
+  // TODO: PPC port if (MatchDecodeNodes) return true;
+  return false;
+}
+
+// Is it better to copy float constants, or load them directly from memory?
+// Intel can load a float constant from a direct address, requiring no
+// extra registers. Most RISCs will have to materialize an address into a
+// register first, so they would do better to copy the constant from stack.
+const bool Matcher::rematerialize_float_constants = false;
+
+// If CPU can load and store mis-aligned doubles directly then no fixup is
+// needed. Else we split the double into 2 integer pieces and move it
+// piece-by-piece. Only happens when passing doubles into C code as the
+// Java calling convention forces doubles to be aligned.
+const bool Matcher::misaligned_doubles_ok = true;
+
+void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
+ Unimplemented();
+}
+
+// Advertise here if the CPU requires explicit rounding operations
+// to implement the UseStrictFP mode.
+const bool Matcher::strict_fp_requires_explicit_rounding = false;
+
+// Do floats take an entire double register or just half?
+//
+// A float occupies a ppc64 double register. For the allocator, a
+// ppc64 double register appears as a pair of float registers.
+bool Matcher::float_in_double() { return true; }
+
+// Do ints take an entire long register or just half?
+// The relevant question is how the int is callee-saved:
+// the whole long is written but de-opt'ing will have to extract
+// the relevant 32 bits.
+const bool Matcher::int_in_long = true;
+
+// Constants for c2c and c calling conventions.
+
+const MachRegisterNumbers iarg_reg[8] = {
+  R3_num, R4_num, R5_num, R6_num,
+  R7_num, R8_num, R9_num, R10_num
+};
+
+const MachRegisterNumbers farg_reg[13] = {
+  F1_num, F2_num, F3_num, F4_num,
+  F5_num, F6_num, F7_num, F8_num,
+  F9_num, F10_num, F11_num, F12_num,
+  F13_num
+};
+
+const int num_iarg_registers = sizeof(iarg_reg) / sizeof(iarg_reg[0]);
+
+const int num_farg_registers = sizeof(farg_reg) / sizeof(farg_reg[0]);
+
+// Return whether or not this register is ever used as an argument. This
+// function is used on startup to build the trampoline stubs in generateOptoStub.
+// Registers not mentioned will be killed by the VM call in the trampoline, and
+// arguments in those registers not be available to the callee.
+bool Matcher::can_be_java_arg(int reg) {
+  // We return true for all registers contained in iarg_reg[] and
+  // farg_reg[] and their virtual halves.
+  // We must include the virtual halves in order to get STDs and LDs
+  // instead of STWs and LWs in the trampoline stubs.
+
+  if (   reg == R3_num  || reg == R3_H_num
+      || reg == R4_num  || reg == R4_H_num
+      || reg == R5_num  || reg == R5_H_num
+      || reg == R6_num  || reg == R6_H_num
+      || reg == R7_num  || reg == R7_H_num
+      || reg == R8_num  || reg == R8_H_num
+      || reg == R9_num  || reg == R9_H_num
+      || reg == R10_num || reg == R10_H_num)
+    return true;
+
+  if (   reg == F1_num  || reg == F1_H_num
+      || reg == F2_num  || reg == F2_H_num
+      || reg == F3_num  || reg == F3_H_num
+      || reg == F4_num  || reg == F4_H_num
+      || reg == F5_num  || reg == F5_H_num
+      || reg == F6_num  || reg == F6_H_num
+      || reg == F7_num  || reg == F7_H_num
+      || reg == F8_num  || reg == F8_H_num
+      || reg == F9_num  || reg == F9_H_num
+      || reg == F10_num || reg == F10_H_num
+      || reg == F11_num || reg == F11_H_num
+      || reg == F12_num || reg == F12_H_num
+      || reg == F13_num || reg == F13_H_num)
+    return true;
+
+  return false;
+}
+
+bool Matcher::is_spillable_arg(int reg) {
+  return can_be_java_arg(reg);
+}
+
+bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
+  return false;
+}
+
+// Register for DIVI projection of divmodI.
+RegMask Matcher::divI_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+// Register for MODI projection of divmodI.
+RegMask Matcher::modI_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+// Register for DIVL projection of divmodL.
+RegMask Matcher::divL_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+// Register for MODL projection of divmodL.
+RegMask Matcher::modL_proj_mask() {
+  ShouldNotReachHere();
+  return RegMask();
+}
+
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+  return RegMask();
+}
+
+%}
+
+//----------ENCODING BLOCK-----------------------------------------------------
+// This block specifies the encoding classes used by the compiler to output
+// byte streams. Encoding classes are parameterized macros used by
+// Machine Instruction Nodes in order to generate the bit encoding of the
+// instruction. Operands specify their base encoding interface with the
+// interface keyword. There are currently supported four interfaces,
+// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
+// operand to generate a function which returns its register number when
+// queried. CONST_INTER causes an operand to generate a function which
+// returns the value of the constant when queried. MEMORY_INTER causes an
+// operand to generate four functions which return the Base Register, the
+// Index Register, the Scale Value, and the Offset Value of the operand when
+// queried. COND_INTER causes an operand to generate six functions which
+// return the encoding code (ie - encoding bits for the instruction)
+// associated with each basic boolean condition for a conditional instruction.
+//
+// Instructions specify two basic values for encoding. Again, a function
+// is available to check if the constant displacement is an oop. They use the
+// ins_encode keyword to specify their encoding classes (which must be
+// a sequence of enc_class names, and their parameters, specified in
+// the encoding block), and they use the
+// opcode keyword to specify, in order, their primary, secondary, and
+// tertiary opcode. Only the opcode sections which a particular instruction
+// needs for encoding need to be specified.
+encode %{
+  enc_class enc_unimplemented %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    MacroAssembler _masm(&cbuf);
+    __ unimplemented("Unimplemented mach node encoding in AD file.", 13);
+  %}
+
+  enc_class enc_untested %{
+#ifdef ASSERT
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    MacroAssembler _masm(&cbuf);
+    __ untested("Untested mach node encoding in AD file.");
+#else
+    // TODO: PPC port $archOpcode(ppc64Opcode_none);
+#endif
+  %}
+
+  enc_class enc_lbz(iRegIdst dst, memory mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lbz);
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lbz($dst$$Register, Idisp, $mem$$base$$Register);
+  %}
+
+  // Load acquire.
+  enc_class enc_lbz_ac(iRegIdst dst, memory mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lbz($dst$$Register, Idisp, $mem$$base$$Register);
+    __ twi_0($dst$$Register);
+    __ isync();
+  %}
+
+  enc_class enc_lhz(iRegIdst dst, memory mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lhz);
+
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lhz($dst$$Register, Idisp, $mem$$base$$Register);
+  %}
+
+  // Load acquire.
+  enc_class enc_lhz_ac(iRegIdst dst, memory mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lhz($dst$$Register, Idisp, $mem$$base$$Register);
+    __ twi_0($dst$$Register);
+    __ isync();
+  %}
+
+  enc_class enc_lwz(iRegIdst dst, memory mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lwz);
+
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lwz($dst$$Register, Idisp, $mem$$base$$Register);
+  %}
+
+  // Load acquire.
+  enc_class enc_lwz_ac(iRegIdst dst, memory mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lwz($dst$$Register, Idisp, $mem$$base$$Register);
+    __ twi_0($dst$$Register);
+    __ isync();
+  %}
+
+  enc_class enc_ld(iRegLdst dst, memoryAlg4 mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ld);
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    // Operand 'ds' requires 4-alignment.
+    assert((Idisp & 0x3) == 0, "unaligned offset");
+    __ ld($dst$$Register, Idisp, $mem$$base$$Register);
+  %}
+
+  // Load acquire.
+  enc_class enc_ld_ac(iRegLdst dst, memoryAlg4 mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    // Operand 'ds' requires 4-alignment.
+    assert((Idisp & 0x3) == 0, "unaligned offset");
+    __ ld($dst$$Register, Idisp, $mem$$base$$Register);
+    __ twi_0($dst$$Register);
+    __ isync();
+  %}
+
+  enc_class enc_lfd(RegF dst, memory mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lfd);
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lfd($dst$$FloatRegister, Idisp, $mem$$base$$Register);
+  %}
+
+  enc_class enc_load_long_constL(iRegLdst dst, immL src, iRegLdst toc) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ld);
+
+    MacroAssembler _masm(&cbuf);
+    int toc_offset = 0;
+
+    if (!ra_->C->in_scratch_emit_size()) {
+      address const_toc_addr;
+      // Create a non-oop constant, no relocation needed.
+      // If it is an IC, it has a virtual_call_Relocation.
+      const_toc_addr = __ long_constant((jlong)$src$$constant);
+
+      // Get the constant's TOC offset.
+      toc_offset = __ offset_to_method_toc(const_toc_addr);
+
+      // Keep the current instruction offset in mind.
+      ((loadConLNode*)this)->_cbuf_insts_offset = __ offset();
+    }
+
+    __ ld($dst$$Register, toc_offset, $toc$$Register);
+  %}
+
+  enc_class enc_load_long_constL_hi(iRegLdst dst, iRegLdst toc, immL src) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addis);
+
+    MacroAssembler _masm(&cbuf);
+
+    if (!ra_->C->in_scratch_emit_size()) {
+      address const_toc_addr;
+      // Create a non-oop constant, no relocation needed.
+      // If it is an IC, it has a virtual_call_Relocation.
+      const_toc_addr = __ long_constant((jlong)$src$$constant);
+
+      // Get the constant's TOC offset.
+      const int toc_offset = __ offset_to_method_toc(const_toc_addr);
+      // Store the toc offset of the constant.
+      ((loadConL_hiNode*)this)->_const_toc_offset = toc_offset;
+
+      // Also keep the current instruction offset in mind.
+      ((loadConL_hiNode*)this)->_cbuf_insts_offset = __ offset();
+    }
+
+    __ addis($dst$$Register, $toc$$Register, MacroAssembler::largeoffset_si16_si16_hi(_const_toc_offset));
+  %}
+
+%} // encode
+
+source %{
+
+typedef struct {
+  loadConL_hiNode *_large_hi;
+  loadConL_loNode *_large_lo;
+  loadConLNode    *_small;
+  MachNode        *_last;
+} loadConLNodesTuple;
+
+loadConLNodesTuple loadConLNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
+                                             OptoReg::Name reg_second, OptoReg::Name reg_first) {
+  loadConLNodesTuple nodes;
+
+  const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
+  if (large_constant_pool) {
+    // Create new nodes.
+    loadConL_hiNode *m1 = new (C) loadConL_hiNode();
+    loadConL_loNode *m2 = new (C) loadConL_loNode();
+
+    // inputs for new nodes
+    m1->add_req(NULL, toc);
+    m2->add_req(NULL, m1);
+
+    // operands for new nodes
+    m1->_opnds[0] = new (C) iRegLdstOper(); // dst
+    m1->_opnds[1] = immSrc;                 // src
+    m1->_opnds[2] = new (C) iRegPdstOper(); // toc
+    m2->_opnds[0] = new (C) iRegLdstOper(); // dst
+    m2->_opnds[1] = immSrc;                 // src
+    m2->_opnds[2] = new (C) iRegLdstOper(); // base
+
+    // Initialize ins_attrib TOC fields.
+    m1->_const_toc_offset = -1;
+    m2->_const_toc_offset_hi_node = m1;
+
+    // Initialize ins_attrib instruction offset.
+    m1->_cbuf_insts_offset = -1;
+
+    // register allocation for new nodes
+    ra_->set_pair(m1->_idx, reg_second, reg_first);
+    ra_->set_pair(m2->_idx, reg_second, reg_first);
+
+    // Create result.
+    nodes._large_hi = m1;
+    nodes._large_lo = m2;
+    nodes._small = NULL;
+    nodes._last = nodes._large_lo;
+    assert(m2->bottom_type()->isa_long(), "must be long");
+  } else {
+    loadConLNode *m2 = new (C) loadConLNode();
+
+    // inputs for new nodes
+    m2->add_req(NULL, toc);
+
+    // operands for new nodes
+    m2->_opnds[0] = new (C) iRegLdstOper(); // dst
+    m2->_opnds[1] = immSrc;                 // src
+    m2->_opnds[2] = new (C) iRegPdstOper(); // toc
+
+    // Initialize ins_attrib instruction offset.
+    m2->_cbuf_insts_offset = -1;
+
+    // register allocation for new nodes
+    ra_->set_pair(m2->_idx, reg_second, reg_first);
+
+    // Create result.
+    nodes._large_hi = NULL;
+    nodes._large_lo = NULL;
+    nodes._small = m2;
+    nodes._last = nodes._small;
+    assert(m2->bottom_type()->isa_long(), "must be long");
+  }
+
+  return nodes;
+}
+
+%} // source
+
+encode %{
+  // Postalloc expand emitter for loading a long constant from the method's TOC.
+  // Enc_class needed as consttanttablebase is not supported by postalloc
+  // expand.
+  enc_class postalloc_expand_load_long_constant(iRegLdst dst, immL src, iRegLdst toc) %{
+    // Create new nodes.
+    loadConLNodesTuple loadConLNodes =
+      loadConLNodesTuple_create(C, ra_, n_toc, op_src,
+                                ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+    // Push new nodes.
+    if (loadConLNodes._large_hi) nodes->push(loadConLNodes._large_hi);
+    if (loadConLNodes._last)     nodes->push(loadConLNodes._last);
+
+    // some asserts
+    assert(nodes->length() >= 1, "must have created at least 1 node");
+    assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
+  %}
+
+  enc_class enc_load_long_constP(iRegLdst dst, immP src, iRegLdst toc) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ld);
+
+    MacroAssembler _masm(&cbuf);
+    int toc_offset = 0;
+
+    if (!ra_->C->in_scratch_emit_size()) {
+      intptr_t val = $src$$constant;
+      relocInfo::relocType constant_reloc = $src->constant_reloc();  // src
+      address const_toc_addr;
+      if (constant_reloc == relocInfo::oop_type) {
+        // Create an oop constant and a corresponding relocation.
+        AddressLiteral a = __ allocate_oop_address((jobject)val);
+        const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
+        __ relocate(a.rspec());
+      } else if (constant_reloc == relocInfo::metadata_type) {
+        AddressLiteral a = __ allocate_metadata_address((Metadata *)val);
+        const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
+        __ relocate(a.rspec());
+      } else {
+        // Create a non-oop constant, no relocation needed.
+        const_toc_addr = __ long_constant((jlong)$src$$constant);
+      }
+
+      // Get the constant's TOC offset.
+      toc_offset = __ offset_to_method_toc(const_toc_addr);
+    }
+
+    __ ld($dst$$Register, toc_offset, $toc$$Register);
+  %}
+
+  enc_class enc_load_long_constP_hi(iRegLdst dst, immP src, iRegLdst toc) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addis);
+
+    MacroAssembler _masm(&cbuf);
+    if (!ra_->C->in_scratch_emit_size()) {
+      intptr_t val = $src$$constant;
+      relocInfo::relocType constant_reloc = $src->constant_reloc();  // src
+      address const_toc_addr;
+      if (constant_reloc == relocInfo::oop_type) {
+        // Create an oop constant and a corresponding relocation.
+        AddressLiteral a = __ allocate_oop_address((jobject)val);
+        const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
+        __ relocate(a.rspec());
+      } else if (constant_reloc == relocInfo::metadata_type) {
+        AddressLiteral a = __ allocate_metadata_address((Metadata *)val);
+        const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
+        __ relocate(a.rspec());
+      } else {  // non-oop pointers, e.g. card mark base, heap top
+        // Create a non-oop constant, no relocation needed.
+        const_toc_addr = __ long_constant((jlong)$src$$constant);
+      }
+
+      // Get the constant's TOC offset.
+      const int toc_offset = __ offset_to_method_toc(const_toc_addr);
+      // Store the toc offset of the constant.
+      ((loadConP_hiNode*)this)->_const_toc_offset = toc_offset;
+    }
+
+    __ addis($dst$$Register, $toc$$Register, MacroAssembler::largeoffset_si16_si16_hi(_const_toc_offset));
+  %}
+
+  // Postalloc expand emitter for loading a ptr constant from the method's TOC.
+  // Enc_class needed as consttanttablebase is not supported by postalloc
+  // expand.
+  enc_class postalloc_expand_load_ptr_constant(iRegPdst dst, immP src, iRegLdst toc) %{
+    const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
+    if (large_constant_pool) {
+      // Create new nodes.
+      loadConP_hiNode *m1 = new (C) loadConP_hiNode();
+      loadConP_loNode *m2 = new (C) loadConP_loNode();
+
+      // inputs for new nodes
+      m1->add_req(NULL, n_toc);
+      m2->add_req(NULL, m1);
+      
+      // operands for new nodes
+      m1->_opnds[0] = new (C) iRegPdstOper(); // dst
+      m1->_opnds[1] = op_src;                 // src
+      m1->_opnds[2] = new (C) iRegPdstOper(); // toc
+      m2->_opnds[0] = new (C) iRegPdstOper(); // dst
+      m2->_opnds[1] = op_src;                 // src
+      m2->_opnds[2] = new (C) iRegLdstOper(); // base
+      
+      // Initialize ins_attrib TOC fields.
+      m1->_const_toc_offset = -1;
+      m2->_const_toc_offset_hi_node = m1;
+      
+      // Register allocation for new nodes.
+      ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+      ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+      
+      nodes->push(m1);
+      nodes->push(m2);
+      assert(m2->bottom_type()->isa_ptr(), "must be ptr");
+    } else {
+      loadConPNode *m2 = new (C) loadConPNode();
+      
+      // inputs for new nodes
+      m2->add_req(NULL, n_toc);
+      
+      // operands for new nodes
+      m2->_opnds[0] = new (C) iRegPdstOper(); // dst
+      m2->_opnds[1] = op_src;                 // src
+      m2->_opnds[2] = new (C) iRegPdstOper(); // toc
+      
+      // Register allocation for new nodes.
+      ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+      nodes->push(m2);
+      assert(m2->bottom_type()->isa_ptr(), "must be ptr");
+    }
+  %}
+
+  // Enc_class needed as consttanttablebase is not supported by postalloc
+  // expand.
+  enc_class postalloc_expand_load_float_constant(regF dst, immF src, iRegLdst toc) %{
+    bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
+
+    MachNode *m2;
+    if (large_constant_pool) {
+      m2 = new (C) loadConFCompNode();
+    } else {
+      m2 = new (C) loadConFNode();
+    }
+    // inputs for new nodes
+    m2->add_req(NULL, n_toc);
+
+    // operands for new nodes
+    m2->_opnds[0] = op_dst;
+    m2->_opnds[1] = op_src;
+    m2->_opnds[2] = new (C) iRegPdstOper(); // constanttablebase
+
+    // register allocation for new nodes
+    ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    nodes->push(m2);
+  %}
+
+  // Enc_class needed as consttanttablebase is not supported by postalloc
+  // expand.
+  enc_class postalloc_expand_load_double_constant(regD dst, immD src, iRegLdst toc) %{
+    bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
+
+    MachNode *m2;
+    if (large_constant_pool) {
+      m2 = new (C) loadConDCompNode();
+    } else {
+      m2 = new (C) loadConDNode();
+    }
+    // inputs for new nodes
+    m2->add_req(NULL, n_toc);
+
+    // operands for new nodes
+    m2->_opnds[0] = op_dst;
+    m2->_opnds[1] = op_src;
+    m2->_opnds[2] = new (C) iRegPdstOper(); // constanttablebase
+
+    // register allocation for new nodes
+    ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    nodes->push(m2);
+  %}
+
+  enc_class enc_stw(iRegIsrc src, memory mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_stw);
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ stw($src$$Register, Idisp, $mem$$base$$Register);
+  %}
+
+  enc_class enc_std(iRegIsrc src, memoryAlg4 mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_std);
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    // Operand 'ds' requires 4-alignment.
+    assert((Idisp & 0x3) == 0, "unaligned offset");
+    __ std($src$$Register, Idisp, $mem$$base$$Register);
+  %}
+
+  enc_class enc_stfs(RegF src, memory mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_stfs);
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ stfs($src$$FloatRegister, Idisp, $mem$$base$$Register);
+  %}
+
+  enc_class enc_stfd(RegF src, memory mem) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_stfd);
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ stfd($src$$FloatRegister, Idisp, $mem$$base$$Register);
+  %}
+
+  // Use release_store for card-marking to ensure that previous
+  // oop-stores are visible before the card-mark change.
+  enc_class enc_cms_card_mark(memory mem, iRegLdst releaseFieldAddr) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    // FIXME: Implement this as a cmove and use a fixed condition code
+    // register which is written on every transition to compiled code,
+    // e.g. in call-stub and when returning from runtime stubs.
+    //
+    // Proposed code sequence for the cmove implementation:
+    //
+    // Label skip_release;
+    // __ beq(CCRfixed, skip_release);
+    // __ release();
+    // __ bind(skip_release);
+    // __ stb(card mark);
+
+    MacroAssembler _masm(&cbuf);
+    Label skip_storestore;
+
+#if 0 // TODO: PPC port
+    // Check CMSCollectorCardTableModRefBSExt::_requires_release and do the
+    // StoreStore barrier conditionally.
+    __ lwz(R0, 0, $releaseFieldAddr$$Register);
+    __ cmpwi(CCR0, R0, 0);
+    __ beq_predict_taken(CCR0, skip_storestore);
+#endif
+    __ li(R0, 0);
+    __ membar(Assembler::StoreStore);
+#if 0 // TODO: PPC port
+    __ bind(skip_storestore);
+#endif
+
+    // Do the store.
+    if ($mem$$index == 0) {
+      __ stb(R0, $mem$$disp, $mem$$base$$Register);
+    } else {
+      assert(0 == $mem$$disp, "no displacement possible with indexed load/stores on ppc");
+      __ stbx(R0, $mem$$base$$Register, $mem$$index$$Register);
+    }
+  %}
+
+  enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{
+
+    if (VM_Version::has_isel()) {
+      // use isel instruction with Power 7
+      cmpP_reg_imm16Node *n_compare  = new (C) cmpP_reg_imm16Node();
+      encodeP_subNode    *n_sub_base = new (C) encodeP_subNode();
+      encodeP_shiftNode  *n_shift    = new (C) encodeP_shiftNode();
+      cond_set_0_oopNode *n_cond_set = new (C) cond_set_0_oopNode();
+
+      n_compare->add_req(n_region, n_src);
+      n_compare->_opnds[0] = op_crx;
+      n_compare->_opnds[1] = op_src;
+      n_compare->_opnds[2] = new (C) immL16Oper(0);
+
+      n_sub_base->add_req(n_region, n_src);
+      n_sub_base->_opnds[0] = op_dst;
+      n_sub_base->_opnds[1] = op_src;
+      n_sub_base->_bottom_type = _bottom_type;
+
+      n_shift->add_req(n_region, n_sub_base);
+      n_shift->_opnds[0] = op_dst;
+      n_shift->_opnds[1] = op_dst;
+      n_shift->_bottom_type = _bottom_type;
+
+      n_cond_set->add_req(n_region, n_compare, n_shift);
+      n_cond_set->_opnds[0] = op_dst;
+      n_cond_set->_opnds[1] = op_crx;
+      n_cond_set->_opnds[2] = op_dst;
+      n_cond_set->_bottom_type = _bottom_type;
+
+      ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
+      ra_->set_pair(n_sub_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+      ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+      ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+      nodes->push(n_compare);
+      nodes->push(n_sub_base);
+      nodes->push(n_shift);
+      nodes->push(n_cond_set);
+
+    } else {
+      // before Power 7
+      moveRegNode        *n_move     = new (C) moveRegNode();
+      cmpP_reg_imm16Node *n_compare  = new (C) cmpP_reg_imm16Node();
+      encodeP_shiftNode  *n_shift    = new (C) encodeP_shiftNode();
+      cond_sub_baseNode  *n_sub_base = new (C) cond_sub_baseNode();
+
+      n_move->add_req(n_region, n_src);
+      n_move->_opnds[0] = op_dst;
+      n_move->_opnds[1] = op_src;
+      ra_->set_oop(n_move, true); // Until here, 'n_move' still produces an oop.
+
+      n_compare->add_req(n_region, n_src);
+      n_compare->add_prec(n_move);
+
+      n_compare->_opnds[0] = op_crx;
+      n_compare->_opnds[1] = op_src;
+      n_compare->_opnds[2] = new (C) immL16Oper(0);
+
+      n_sub_base->add_req(n_region, n_compare, n_src);
+      n_sub_base->_opnds[0] = op_dst;
+      n_sub_base->_opnds[1] = op_crx;
+      n_sub_base->_opnds[2] = op_src;
+      n_sub_base->_bottom_type = _bottom_type;
+   
+      n_shift->add_req(n_region, n_sub_base);
+      n_shift->_opnds[0] = op_dst;
+      n_shift->_opnds[1] = op_dst;
+      n_shift->_bottom_type = _bottom_type;
+   
+      ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+      ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
+      ra_->set_pair(n_sub_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+      ra_->set_pair(n_move->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+   
+      nodes->push(n_move);
+      nodes->push(n_compare);
+      nodes->push(n_sub_base);
+      nodes->push(n_shift);
+    }
+
+    assert(!(ra_->is_oop(this)), "sanity"); // This is not supposed to be GC'ed.
+  %}
+
+  enc_class postalloc_expand_encode_oop_not_null(iRegNdst dst, iRegPdst src) %{
+
+    encodeP_subNode *n1 = new (C) encodeP_subNode();
+    n1->add_req(n_region, n_src);
+    n1->_opnds[0] = op_dst;
+    n1->_opnds[1] = op_src;
+    n1->_bottom_type = _bottom_type;
+
+    encodeP_shiftNode *n2 = new (C) encodeP_shiftNode();
+    n2->add_req(n_region, n1);
+    n2->_opnds[0] = op_dst;
+    n2->_opnds[1] = op_dst;
+    n2->_bottom_type = _bottom_type;
+    ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+    nodes->push(n1);
+    nodes->push(n2);
+    assert(!(ra_->is_oop(this)), "sanity"); // This is not supposed to be GC'ed.
+  %}
+
+  enc_class postalloc_expand_decode_oop(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
+    decodeN_shiftNode *n_shift    = new (C) decodeN_shiftNode();
+    cmpN_reg_imm0Node *n_compare  = new (C) cmpN_reg_imm0Node();
+
+    n_compare->add_req(n_region, n_src);
+    n_compare->_opnds[0] = op_crx;
+    n_compare->_opnds[1] = op_src;
+    n_compare->_opnds[2] = new (C) immN_0Oper(TypeNarrowOop::NULL_PTR);
+
+    n_shift->add_req(n_region, n_src);
+    n_shift->_opnds[0] = op_dst;
+    n_shift->_opnds[1] = op_src;
+    n_shift->_bottom_type = _bottom_type;
+
+    if (VM_Version::has_isel()) {
+      // use isel instruction with Power 7
+
+      decodeN_addNode *n_add_base = new (C) decodeN_addNode();
+      n_add_base->add_req(n_region, n_shift);
+      n_add_base->_opnds[0] = op_dst;
+      n_add_base->_opnds[1] = op_dst;
+      n_add_base->_bottom_type = _bottom_type;
+
+      cond_set_0_ptrNode *n_cond_set = new (C) cond_set_0_ptrNode();
+      n_cond_set->add_req(n_region, n_compare, n_add_base);
+      n_cond_set->_opnds[0] = op_dst;
+      n_cond_set->_opnds[1] = op_crx;
+      n_cond_set->_opnds[2] = op_dst;
+      n_cond_set->_bottom_type = _bottom_type;
+
+      assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
+      ra_->set_oop(n_cond_set, true);
+
+      ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+      ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
+      ra_->set_pair(n_add_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+      ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+      nodes->push(n_compare);
+      nodes->push(n_shift);
+      nodes->push(n_add_base);
+      nodes->push(n_cond_set);
+
+    } else {
+      // before Power 7
+      cond_add_baseNode *n_add_base = new (C) cond_add_baseNode();
+     
+      n_add_base->add_req(n_region, n_compare, n_shift);
+      n_add_base->_opnds[0] = op_dst;
+      n_add_base->_opnds[1] = op_crx;
+      n_add_base->_opnds[2] = op_dst;
+      n_add_base->_bottom_type = _bottom_type;
+     
+      assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
+      ra_->set_oop(n_add_base, true);
+     
+      ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+      ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
+      ra_->set_pair(n_add_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+     
+      nodes->push(n_compare);
+      nodes->push(n_shift);
+      nodes->push(n_add_base);
+    }
+  %}
+
+  enc_class postalloc_expand_decode_oop_not_null(iRegPdst dst, iRegNsrc src) %{
+    decodeN_shiftNode *n1 = new (C) decodeN_shiftNode();
+    n1->add_req(n_region, n_src);
+    n1->_opnds[0] = op_dst;
+    n1->_opnds[1] = op_src;
+    n1->_bottom_type = _bottom_type;
+
+    decodeN_addNode *n2 = new (C) decodeN_addNode();
+    n2->add_req(n_region, n1);
+    n2->_opnds[0] = op_dst;
+    n2->_opnds[1] = op_dst;
+    n2->_bottom_type = _bottom_type;
+    ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+    assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
+    ra_->set_oop(n2, true);
+
+    nodes->push(n1);
+    nodes->push(n2);
+  %}
+
+  enc_class enc_cmove_reg(iRegIdst dst, flagsReg crx, iRegIsrc src, cmpOp cmp) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+
+    MacroAssembler _masm(&cbuf);
+    int cc        = $cmp$$cmpcode;
+    int flags_reg = $crx$$reg;
+    Label done;
+    assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
+    // Branch if not (cmp crx).
+    __ bc(cc_to_inverse_boint(cc), cc_to_biint(cc, flags_reg), done);
+    __ mr($dst$$Register, $src$$Register);
+    // TODO PPC port __ endgroup_if_needed(_size == 12);
+    __ bind(done);
+  %}
+
+  enc_class enc_cmove_imm(iRegIdst dst, flagsReg crx, immI16 src, cmpOp cmp) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+
+    MacroAssembler _masm(&cbuf);
+    Label done;
+    assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
+    // Branch if not (cmp crx).
+    __ bc(cc_to_inverse_boint($cmp$$cmpcode), cc_to_biint($cmp$$cmpcode, $crx$$reg), done);
+    __ li($dst$$Register, $src$$constant);
+    // TODO PPC port __ endgroup_if_needed(_size == 12);
+    __ bind(done);
+  %}
+
+  // New atomics.
+  enc_class enc_GetAndAddI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+
+    MacroAssembler _masm(&cbuf);
+    Register Rtmp   = R0;
+    Register Rres   = $res$$Register;
+    Register Rsrc   = $src$$Register;
+    Register Rptr   = $mem_ptr$$Register;
+    bool RegCollision = (Rres == Rsrc) || (Rres == Rptr);
+    Register Rold   = RegCollision ? Rtmp : Rres;
+
+    Label Lretry;
+    __ bind(Lretry);
+    __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
+    __ add(Rtmp, Rsrc, Rold);
+    __ stwcx_(Rtmp, Rptr);
+    if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
+      __ bne_predict_not_taken(CCR0, Lretry);
+    } else {
+      __ bne(                  CCR0, Lretry);
+    }
+    if (RegCollision) __ subf(Rres, Rsrc, Rtmp);
+    __ fence();
+  %}
+
+  enc_class enc_GetAndAddL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+
+    MacroAssembler _masm(&cbuf);
+    Register Rtmp   = R0;
+    Register Rres   = $res$$Register;
+    Register Rsrc   = $src$$Register;
+    Register Rptr   = $mem_ptr$$Register;
+    bool RegCollision = (Rres == Rsrc) || (Rres == Rptr);
+    Register Rold   = RegCollision ? Rtmp : Rres;
+
+    Label Lretry;
+    __ bind(Lretry);
+    __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
+    __ add(Rtmp, Rsrc, Rold);
+    __ stdcx_(Rtmp, Rptr);
+    if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
+      __ bne_predict_not_taken(CCR0, Lretry);
+    } else {
+      __ bne(                  CCR0, Lretry);
+    }
+    if (RegCollision) __ subf(Rres, Rsrc, Rtmp);
+    __ fence();
+  %}
+
+  enc_class enc_GetAndSetI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+
+    MacroAssembler _masm(&cbuf);
+    Register Rtmp   = R0;
+    Register Rres   = $res$$Register;
+    Register Rsrc   = $src$$Register;
+    Register Rptr   = $mem_ptr$$Register;
+    bool RegCollision = (Rres == Rsrc) || (Rres == Rptr);
+    Register Rold   = RegCollision ? Rtmp : Rres;
+
+    Label Lretry;
+    __ bind(Lretry);
+    __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
+    __ stwcx_(Rsrc, Rptr);
+    if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
+      __ bne_predict_not_taken(CCR0, Lretry);
+    } else {
+      __ bne(                  CCR0, Lretry);
+    }
+    if (RegCollision) __ mr(Rres, Rtmp);
+    __ fence();
+  %}
+
+  enc_class enc_GetAndSetL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+
+    MacroAssembler _masm(&cbuf);
+    Register Rtmp   = R0;
+    Register Rres   = $res$$Register;
+    Register Rsrc   = $src$$Register;
+    Register Rptr   = $mem_ptr$$Register;
+    bool RegCollision = (Rres == Rsrc) || (Rres == Rptr);
+    Register Rold   = RegCollision ? Rtmp : Rres;
+
+    Label Lretry;
+    __ bind(Lretry);
+    __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update());
+    __ stdcx_(Rsrc, Rptr);
+    if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
+      __ bne_predict_not_taken(CCR0, Lretry);
+    } else {
+      __ bne(                  CCR0, Lretry);
+    }
+    if (RegCollision) __ mr(Rres, Rtmp);
+    __ fence();
+  %}
+
+  // This enc_class is needed so that scheduler gets proper
+  // input mapping for latency computation.
+  enc_class enc_andc(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_andc);
+    MacroAssembler _masm(&cbuf);
+    __ andc($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+
+  enc_class enc_convI2B_regI__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx, immI16 zero, immI16 notzero) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+
+    MacroAssembler _masm(&cbuf);
+
+    Label done;
+    __ cmpwi($crx$$CondRegister, $src$$Register, 0);
+    __ li($dst$$Register, $zero$$constant);
+    __ beq($crx$$CondRegister, done);
+    __ li($dst$$Register, $notzero$$constant);
+    __ bind(done);
+  %}
+
+  enc_class enc_convP2B_regP__cmove(iRegIdst dst, iRegPsrc src, flagsReg crx, immI16 zero, immI16 notzero) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+
+    MacroAssembler _masm(&cbuf);
+
+    Label done;
+    __ cmpdi($crx$$CondRegister, $src$$Register, 0);
+    __ li($dst$$Register, $zero$$constant);
+    __ beq($crx$$CondRegister, done);
+    __ li($dst$$Register, $notzero$$constant);
+    __ bind(done);
+  %}
+
+  enc_class enc_cmove_bso_stackSlotL(iRegLdst dst, flagsReg crx, stackSlotL mem ) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+
+    MacroAssembler _masm(&cbuf);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    Label done;
+    __ bso($crx$$CondRegister, done);
+    __ ld($dst$$Register, Idisp, $mem$$base$$Register);
+    // TODO PPC port __ endgroup_if_needed(_size == 12);
+    __ bind(done);
+  %}
+
+  enc_class enc_bc(flagsReg crx, cmpOp cmp, Label lbl) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_bc);
+
+    MacroAssembler _masm(&cbuf);
+    Label d;   // dummy
+    __ bind(d);
+    Label* p = ($lbl$$label);
+    // `p' is `NULL' when this encoding class is used only to
+    // determine the size of the encoded instruction.
+    Label& l = (NULL == p)? d : *(p);
+    int cc = $cmp$$cmpcode;
+    int flags_reg = $crx$$reg;
+    assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
+    int bhint = Assembler::bhintNoHint;
+
+    if (UseStaticBranchPredictionForUncommonPathsPPC64) {
+      if (_prob <= PROB_NEVER) {
+        bhint = Assembler::bhintIsNotTaken;
+      } else if (_prob >= PROB_ALWAYS) {
+        bhint = Assembler::bhintIsTaken;
+      }
+    }
+
+    __ bc(Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)),
+          cc_to_biint(cc, flags_reg),
+          l);
+  %}
+
+  enc_class enc_bc_far(flagsReg crx, cmpOp cmp, Label lbl) %{
+    // The scheduler doesn't know about branch shortening, so we set the opcode
+    // to ppc64Opcode_bc in order to hide this detail from the scheduler.
+    // TODO: PPC port $archOpcode(ppc64Opcode_bc);
+
+    MacroAssembler _masm(&cbuf);
+    Label d;    // dummy
+    __ bind(d);
+    Label* p = ($lbl$$label);
+    // `p' is `NULL' when this encoding class is used only to
+    // determine the size of the encoded instruction.
+    Label& l = (NULL == p)? d : *(p);
+    int cc = $cmp$$cmpcode;
+    int flags_reg = $crx$$reg;
+    int bhint = Assembler::bhintNoHint;
+
+    if (UseStaticBranchPredictionForUncommonPathsPPC64) {
+      if (_prob <= PROB_NEVER) {
+        bhint = Assembler::bhintIsNotTaken;
+      } else if (_prob >= PROB_ALWAYS) {
+        bhint = Assembler::bhintIsTaken;
+      }
+    }
+
+    // Tell the conditional far branch to optimize itself when being relocated.
+    __ bc_far(Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)),
+                  cc_to_biint(cc, flags_reg),
+                  l,
+                  MacroAssembler::bc_far_optimize_on_relocate);
+  %}
+
+  // Branch used with Power6 scheduling (can be shortened without changing the node).
+  enc_class enc_bc_short_far(flagsReg crx, cmpOp cmp, Label lbl) %{
+    // The scheduler doesn't know about branch shortening, so we set the opcode
+    // to ppc64Opcode_bc in order to hide this detail from the scheduler.
+    // TODO: PPC port $archOpcode(ppc64Opcode_bc);
+
+    MacroAssembler _masm(&cbuf);
+    Label d;   // dummy
+    __ bind(d);
+    Label* p = ($lbl$$label);
+    // `p' is `NULL' when this encoding class is used only to
+    // determine the size of the encoded instruction.
+    Label& l = (NULL == p)? d : *(p);
+    int cc = $cmp$$cmpcode;
+    int flags_reg = $crx$$reg;
+    int bhint = Assembler::bhintNoHint;
+
+    if (UseStaticBranchPredictionForUncommonPathsPPC64) {
+      if (_prob <= PROB_NEVER) {
+        bhint = Assembler::bhintIsNotTaken;
+      } else if (_prob >= PROB_ALWAYS) {
+        bhint = Assembler::bhintIsTaken;
+      }
+    }
+
+#if 0 // TODO: PPC port
+    if (_size == 8) {
+      // Tell the conditional far branch to optimize itself when being relocated.
+      __ bc_far(Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)),
+                    cc_to_biint(cc, flags_reg),
+                    l,
+                    MacroAssembler::bc_far_optimize_on_relocate);
+    } else {
+      __ bc    (Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)),
+                    cc_to_biint(cc, flags_reg),
+                    l);
+    }
+#endif
+    Unimplemented();
+  %}
+
+  // Postalloc expand emitter for loading a replicatef float constant from
+  // the method's TOC.
+  // Enc_class needed as consttanttablebase is not supported by postalloc
+  // expand.
+  enc_class postalloc_expand_load_replF_constant(iRegLdst dst, immF src, iRegLdst toc) %{
+    // Create new nodes.
+
+    // Make an operand with the bit pattern to load as float.
+    immLOper *op_repl = new (C) immLOper((jlong)replicate_immF(op_src->constantF()));
+
+    loadConLNodesTuple loadConLNodes =
+      loadConLNodesTuple_create(C, ra_, n_toc, op_repl,
+                                ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+    // Push new nodes.
+    if (loadConLNodes._large_hi) nodes->push(loadConLNodes._large_hi);
+    if (loadConLNodes._last)     nodes->push(loadConLNodes._last);
+
+    assert(nodes->length() >= 1, "must have created at least 1 node");
+    assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
+  %}
+
+  // This enc_class is needed so that scheduler gets proper
+  // input mapping for latency computation.
+  enc_class enc_poll(immI dst, iRegLdst poll) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ld);
+    // Fake operand dst needed for PPC scheduler.
+    assert($dst$$constant == 0x0, "dst must be 0x0");
+
+    MacroAssembler _masm(&cbuf);
+    // Mark the code position where the load from the safepoint
+    // polling page was emitted as relocInfo::poll_type.
+    __ relocate(relocInfo::poll_type);
+    __ load_from_polling_page($poll$$Register);
+  %}
+
+  // A Java static call or a runtime call.
+  //
+  // Branch-and-link relative to a trampoline.
+  // The trampoline loads the target address and does a long branch to there.
+  // In case we call java, the trampoline branches to a interpreter_stub
+  // which loads the inline cache and the real call target from the constant pool.
+  //
+  // This basically looks like this:
+  //
+  // >>>> consts      -+  -+
+  //                   |   |- offset1
+  // [call target1]    | <-+
+  // [IC cache]        |- offset2
+  // [call target2] <--+
+  //
+  // <<<< consts
+  // >>>> insts
+  //
+  // bl offset16               -+  -+             ??? // How many bits available?
+  //                            |   |
+  // <<<< insts                 |   |
+  // >>>> stubs                 |   |
+  //                            |   |- trampoline_stub_Reloc
+  // trampoline stub:           | <-+
+  //   r2 = toc                 |
+  //   r2 = [r2 + offset1]      |       // Load call target1 from const section
+  //   mtctr r2                 |
+  //   bctr                     |- static_stub_Reloc
+  // comp_to_interp_stub:   <---+
+  //   r1 = toc
+  //   ICreg = [r1 + IC_offset]         // Load IC from const section
+  //   r1    = [r1 + offset2]           // Load call target2 from const section
+  //   mtctr r1
+  //   bctr
+  //
+  // <<<< stubs
+  //
+  // The call instruction in the code either
+  // - Branches directly to a compiled method if the offset is encodable in instruction.
+  // - Branches to the trampoline stub if the offset to the compiled method is not encodable.
+  // - Branches to the compiled_to_interp stub if the target is interpreted.
+  //
+  // Further there are three relocations from the loads to the constants in
+  // the constant section.
+  //
+  // Usage of r1 and r2 in the stubs allows to distinguish them.
+  enc_class enc_java_static_call(method meth) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_bl);
+
+    MacroAssembler _masm(&cbuf);
+    address entry_point = (address)$meth$$method;
+
+    if (!_method) {
+      // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
+      emit_call_with_trampoline_stub(_masm, entry_point, relocInfo::runtime_call_type);
+    } else {
+      // Remember the offset not the address.
+      const int start_offset = __ offset();
+      // The trampoline stub.
+      if (!Compile::current()->in_scratch_emit_size()) {
+        // No entry point given, use the current pc.
+        // Make sure branch fits into
+        if (entry_point == 0) entry_point = __ pc();
+
+        // Put the entry point as a constant into the constant pool.
+        const address entry_point_toc_addr   = __ address_constant(entry_point, RelocationHolder::none);
+        const int     entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
+
+        // Emit the trampoline stub which will be related to the branch-and-link below.
+        emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+        __ relocate(_optimized_virtual ?
+                    relocInfo::opt_virtual_call_type : relocInfo::static_call_type);
+      }
+
+      // The real call.
+      // Note: At this point we do not have the address of the trampoline
+      // stub, and the entry point might be too far away for bl, so __ pc()
+      // serves as dummy and the bl will be patched later.
+      cbuf.set_insts_mark();
+      __ bl(__ pc());  // Emits a relocation.
+
+      // The stub for call to interpreter.
+      CompiledStaticCall::emit_to_interp_stub(cbuf);
+    }
+  %}
+
+  // Emit a method handle call.
+  //
+  // Method handle calls from compiled to compiled are going thru a
+  // c2i -> i2c adapter, extending the frame for their arguments. The
+  // caller however, returns directly to the compiled callee, that has
+  // to cope with the extended frame. We restore the original frame by
+  // loading the callers sp and adding the calculated framesize.
+  enc_class enc_java_handle_call(method meth) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+
+    MacroAssembler _masm(&cbuf);
+    address entry_point = (address)$meth$$method;
+
+    // Remember the offset not the address.
+    const int start_offset = __ offset();
+    // The trampoline stub.
+    if (!ra_->C->in_scratch_emit_size()) {
+      // No entry point given, use the current pc.
+      // Make sure branch fits into
+      if (entry_point == 0) entry_point = __ pc();
+
+      // Put the entry point as a constant into the constant pool.
+      const address entry_point_toc_addr   = __ address_constant(entry_point, RelocationHolder::none);
+      const int     entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
+
+      // Emit the trampoline stub which will be related to the branch-and-link below.
+      emit_trampoline_stub(_masm, entry_point_toc_offset, start_offset);
+      assert(_optimized_virtual, "methodHandle call should be a virtual call");
+      __ relocate(relocInfo::opt_virtual_call_type);
+    }
+
+    // The real call.
+    // Note: At this point we do not have the address of the trampoline
+    // stub, and the entry point might be too far away for bl, so __ pc()
+    // serves as dummy and the bl will be patched later.
+    cbuf.set_insts_mark();
+    __ bl(__ pc());  // Emits a relocation.
+
+    assert(_method, "execute next statement conditionally");
+    // The stub for call to interpreter.
+    CompiledStaticCall::emit_to_interp_stub(cbuf);
+
+    // Restore original sp.
+    __ ld(R11_scratch1, 0, R1_SP); // Load caller sp.
+    const long framesize = ra_->C->frame_slots() << LogBytesPerInt;
+    unsigned int bytes = (unsigned int)framesize;
+    long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes);
+    if (Assembler::is_simm(-offset, 16)) {
+      __ addi(R1_SP, R11_scratch1, -offset);
+    } else {
+      __ load_const_optimized(R12_scratch2, -offset);
+      __ add(R1_SP, R11_scratch1, R12_scratch2);
+    }
+#ifdef ASSERT
+  __ ld(R12_scratch2, 0, R1_SP); // Load from unextended_sp.
+  __ cmpd(CCR0, R11_scratch1, R12_scratch2);
+  __ asm_assert_eq("backlink changed", 0x8000);
+#endif
+    // If fails should store backlink before unextending.
+
+    if (ra_->C->env()->failing()) {
+      return;
+    }
+  %}
+
+  // Second node of expanded dynamic call - the call.
+  enc_class enc_java_dynamic_call_sched(method meth) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_bl);
+
+    MacroAssembler _masm(&cbuf);
+
+    if (!ra_->C->in_scratch_emit_size()) {
+      // Create a call trampoline stub for the given method.
+      const address entry_point = !($meth$$method) ? 0 : (address)$meth$$method;
+      const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
+      const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
+      emit_trampoline_stub(_masm, entry_point_const_toc_offset, __ offset());
+
+      if (ra_->C->env()->failing())
+        return;
+
+      // Build relocation at call site with ic position as data.
+      assert((_load_ic_hi_node != NULL && _load_ic_node == NULL) ||
+             (_load_ic_hi_node == NULL && _load_ic_node != NULL),
+             "must have one, but can't have both");
+      assert((_load_ic_hi_node != NULL && _load_ic_hi_node->_cbuf_insts_offset != -1) ||
+             (_load_ic_node != NULL    && _load_ic_node->_cbuf_insts_offset != -1),
+             "must contain instruction offset");
+      const int virtual_call_oop_addr_offset = _load_ic_hi_node != NULL
+        ? _load_ic_hi_node->_cbuf_insts_offset
+        : _load_ic_node->_cbuf_insts_offset;
+      const address virtual_call_oop_addr = __ addr_at(virtual_call_oop_addr_offset);
+      assert(MacroAssembler::is_load_const_from_method_toc_at(virtual_call_oop_addr),
+             "should be load from TOC");
+
+      __ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr));
+    }
+
+    // At this point I do not have the address of the trampoline stub,
+    // and the entry point might be too far away for bl. Pc() serves
+    // as dummy and bl will be patched later.
+    __ bl((address) __ pc());
+  %}
+
+  // postalloc expand emitter for virtual calls.
+  enc_class postalloc_expand_java_dynamic_call_sched(method meth, iRegLdst toc) %{
+
+    // Create the nodes for loading the IC from the TOC.
+    loadConLNodesTuple loadConLNodes_IC =
+      loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong)Universe::non_oop_word()),
+                                OptoReg::Name(R19_H_num), OptoReg::Name(R19_num));
+
+    // Create the call node.
+    CallDynamicJavaDirectSchedNode *call = new (C) CallDynamicJavaDirectSchedNode();
+    call->_method_handle_invoke = _method_handle_invoke;
+    call->_vtable_index      = _vtable_index;
+    call->_method            = _method;
+    call->_bci               = _bci;
+    call->_optimized_virtual = _optimized_virtual;
+    call->_tf                = _tf;
+    call->_entry_point       = _entry_point;
+    call->_cnt               = _cnt;
+    call->_argsize           = _argsize;
+    call->_oop_map           = _oop_map;
+    call->_jvms              = _jvms;
+    call->_jvmadj            = _jvmadj;
+    call->_in_rms            = _in_rms;
+    call->_nesting           = _nesting;
+
+    // New call needs all inputs of old call.
+    // Req...
+    for (uint i = 0; i < req(); ++i) {
+      // The expanded node does not need toc any more.
+      // Add the inline cache constant here instead.  This expresses the 
+      // register of the inline cache must be live at the call.
+      // Else we would have to adapt JVMState by -1.
+      if (i == mach_constant_base_node_input()) {
+        call->add_req(loadConLNodes_IC._last);        
+      } else {
+        call->add_req(in(i));
+      }
+    }
+    // ...as well as prec
+    for (uint i = req(); i < len(); ++i) {
+      call->add_prec(in(i));
+    }
+
+    // Remember nodes loading the inline cache into r19.
+    call->_load_ic_hi_node = loadConLNodes_IC._large_hi;
+    call->_load_ic_node    = loadConLNodes_IC._small;
+
+    // Operands for new nodes.
+    call->_opnds[0] = _opnds[0];
+    call->_opnds[1] = _opnds[1];
+
+    // Only the inline cache is associated with a register.
+    assert(Matcher::inline_cache_reg() == OptoReg::Name(R19_num), "ic reg should be R19");
+
+    // Push new nodes.
+    if (loadConLNodes_IC._large_hi) nodes->push(loadConLNodes_IC._large_hi);
+    if (loadConLNodes_IC._last)     nodes->push(loadConLNodes_IC._last);
+    nodes->push(call);
+  %}
+
+  // Compound version of call dynamic
+  enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    MacroAssembler _masm(&cbuf);
+    int start_offset = __ offset();
+
+    Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
+#if 0
+    if (_vtable_index < 0) {
+      // Must be invalid_vtable_index, not nonvirtual_vtable_index.
+      assert(_vtable_index == Method::invalid_vtable_index, "correct sentinel value");
+      Register ic_reg = as_Register(Matcher::inline_cache_reg_encode());
+      AddressLiteral meta = __ allocate_metadata_address((Metadata *)Universe::non_oop_word());
+
+      address virtual_call_meta_addr = __ pc();
+      __ load_const_from_method_toc(ic_reg, meta, Rtoc);
+      // CALL to fixup routine.  Fixup routine uses ScopeDesc info
+      // to determine who we intended to call.
+      __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
+      emit_call_with_trampoline_stub(_masm, (address)$meth$$method, relocInfo::none);
+      assert(((MachCallDynamicJavaNode*)this)->ret_addr_offset() == __ offset() - start_offset,
+             "Fix constant in ret_addr_offset()");
+    } else {
+      assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
+      // Go thru the vtable. Get receiver klass. Receiver already
+      // checked for non-null. If we'll go thru a C2I adapter, the
+      // interpreter expects method in R19_method.
+
+      __ load_klass(R11_scratch1, R3);
+
+      int entry_offset = InstanceKlass::vtable_start_offset() + _vtable_index * vtableEntry::size();
+      int v_off = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
+      __ li(R19_method, v_off);
+      __ ldx(R19_method/*method oop*/, R19_method/*method offset*/, R11_scratch1/*class*/);
+      // NOTE: for vtable dispatches, the vtable entry will never be
+      // null. However it may very well end up in handle_wrong_method
+      // if the method is abstract for the particular class.
+      __ ld(R11_scratch1, in_bytes(Method::from_compiled_offset()), R19_method);
+      // Call target. Either compiled code or C2I adapter.
+      __ mtctr(R11_scratch1);
+      __ bctrl();
+      if (((MachCallDynamicJavaNode*)this)->ret_addr_offset() != __ offset() - start_offset) {
+        tty->print(" %d, %d\n", ((MachCallDynamicJavaNode*)this)->ret_addr_offset(),__ offset() - start_offset);
+      }
+      assert(((MachCallDynamicJavaNode*)this)->ret_addr_offset() == __ offset() - start_offset,
+             "Fix constant in ret_addr_offset()");
+    }
+#endif
+    guarantee(0, "Fix handling of toc edge: messes up derived/base pairs.");
+    Unimplemented();  // ret_addr_offset not yet fixed. Depends on compressed oops (load klass!).
+  %}
+
+  // a runtime call
+  enc_class enc_java_to_runtime_call (method meth) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+
+    MacroAssembler _masm(&cbuf);
+    const address start_pc = __ pc();
+
+    // The function we're going to call.
+    FunctionDescriptor fdtemp;
+    const FunctionDescriptor* fd = !($meth$$method) ? &fdtemp : (FunctionDescriptor*)$meth$$method;
+
+    Register Rtoc = R12_scratch2;
+    // Calculate the method's TOC.
+    __ calculate_address_from_global_toc(Rtoc, __ method_toc());
+    // Put entry, env, toc into the constant pool, this needs up to 3 constant
+    // pool entries; call_c_using_toc will optimize the call.
+    __ call_c_using_toc(fd, relocInfo::runtime_call_type, Rtoc);
+
+    // Check the ret_addr_offset.
+    assert(((MachCallRuntimeNode*)this)->ret_addr_offset() ==  __ last_calls_return_pc() - start_pc,
+           "Fix constant in ret_addr_offset()");
+  %}
+
+  // Move to ctr for leaf call.
+  // This enc_class is needed so that scheduler gets proper
+  // input mapping for latency computation.
+  enc_class enc_leaf_call_mtctr(iRegLsrc src) %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_mtctr);
+    MacroAssembler _masm(&cbuf);
+    __ mtctr($src$$Register);
+  %}
+
+  // postalloc expand emitter for runtime leaf calls.
+  enc_class postalloc_expand_java_to_runtime_call(method meth, iRegLdst toc) %{
+    // Get the struct that describes the function we are about to call.
+    FunctionDescriptor* fd = (FunctionDescriptor*) this->entry_point();
+    assert(fd, "need fd here");
+    // new nodes
+    loadConLNodesTuple loadConLNodes_Entry;
+    loadConLNodesTuple loadConLNodes_Env;
+    loadConLNodesTuple loadConLNodes_Toc;
+    MachNode         *mtctr = NULL;
+    MachCallLeafNode *call  = NULL;
+
+    // Create nodes and operands for loading the entry point.
+    loadConLNodes_Entry = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->entry()),
+                                                    OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
+
+
+    // Create nodes and operands for loading the env pointer.
+    if (fd->env() != NULL) {
+      loadConLNodes_Env = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->env()),
+                                                    OptoReg::Name(R11_H_num), OptoReg::Name(R11_num));
+    } else {
+      loadConLNodes_Env._large_hi = NULL;
+      loadConLNodes_Env._large_lo = NULL;
+      loadConLNodes_Env._small    = NULL;
+      loadConLNodes_Env._last = new (C) loadConL16Node();
+      loadConLNodes_Env._last->_opnds[0] = new (C) iRegLdstOper();
+      loadConLNodes_Env._last->_opnds[1] = new (C) immL16Oper(0);
+      ra_->set_pair(loadConLNodes_Env._last->_idx, OptoReg::Name(R11_H_num), OptoReg::Name(R11_num));
+    }
+
+    // Create nodes and operands for loading the Toc point.
+    loadConLNodes_Toc = loadConLNodesTuple_create(C, ra_, n_toc, new (C) immLOper((jlong) fd->toc()),
+                                                  OptoReg::Name(R2_H_num), OptoReg::Name(R2_num));
+    // mtctr node
+    mtctr = new (C) CallLeafDirect_mtctrNode();
+
+    assert(loadConLNodes_Entry._last != NULL, "entry must exist");
+    mtctr->add_req(0, loadConLNodes_Entry._last);
+
+    mtctr->_opnds[0] = new (C) iRegLdstOper();
+    mtctr->_opnds[1] = new (C) iRegLdstOper();
+
+    // call node
+    call = new (C) CallLeafDirectNode();
+
+    call->_opnds[0] = _opnds[0];
+    call->_opnds[1] = new (C) methodOper((intptr_t) fd->entry()); // may get set later
+
+    // Make the new call node look like the old one.
+    call->_name        = _name;
+    call->_tf          = _tf;
+    call->_entry_point = _entry_point;
+    call->_cnt         = _cnt;
+    call->_argsize     = _argsize;
+    call->_oop_map     = _oop_map;
+    guarantee(!_jvms, "You must clone the jvms and adapt the offsets by fix_jvms().");
+    call->_jvms        = NULL;
+    call->_jvmadj      = _jvmadj;
+    call->_in_rms      = _in_rms;
+    call->_nesting     = _nesting;
+
+
+    // New call needs all inputs of old call.
+    // Req...
+    for (uint i = 0; i < req(); ++i) {
+      if (i != mach_constant_base_node_input()) {
+        call->add_req(in(i));
+      }
+    }
+
+    // These must be reqired edges, as the registers are live up to
+    // the call. Else the constants are handled as kills.
+    call->add_req(mtctr);
+    call->add_req(loadConLNodes_Env._last);
+    call->add_req(loadConLNodes_Toc._last);
+
+    // ...as well as prec
+    for (uint i = req(); i < len(); ++i) {
+      call->add_prec(in(i));
+    }
+
+    // registers
+    ra_->set1(mtctr->_idx, OptoReg::Name(SR_CTR_num));
+
+    // Insert the new nodes.
+    if (loadConLNodes_Entry._large_hi) nodes->push(loadConLNodes_Entry._large_hi);
+    if (loadConLNodes_Entry._last)     nodes->push(loadConLNodes_Entry._last);
+    if (loadConLNodes_Env._large_hi)   nodes->push(loadConLNodes_Env._large_hi);
+    if (loadConLNodes_Env._last)       nodes->push(loadConLNodes_Env._last);
+    if (loadConLNodes_Toc._large_hi)   nodes->push(loadConLNodes_Toc._large_hi);
+    if (loadConLNodes_Toc._last)       nodes->push(loadConLNodes_Toc._last);
+    nodes->push(mtctr);
+    nodes->push(call);
+  %}
+%}
+
+//----------FRAME--------------------------------------------------------------
+// Definition of frame structure and management information.
+
+frame %{
+  // What direction does stack grow in (assumed to be same for native & Java).
+  stack_direction(TOWARDS_LOW);
+
+  // These two registers define part of the calling convention between
+  // compiled code and the interpreter.
+
+  // Inline Cache Register or method for I2C.
+  inline_cache_reg(R19); // R19_method
+
+  // Method Oop Register when calling interpreter.
+  interpreter_method_oop_reg(R19); // R19_method
+
+  // Optional: name the operand used by cisc-spilling to access
+  // [stack_pointer + offset].
+  cisc_spilling_operand_name(indOffset);
+
+  // Number of stack slots consumed by a Monitor enter.
+  sync_stack_slots((frame::jit_monitor_size / VMRegImpl::stack_slot_size));
+
+  // Compiled code's Frame Pointer.
+  frame_pointer(R1); // R1_SP
+
+  // Interpreter stores its frame pointer in a register which is
+  // stored to the stack by I2CAdaptors. I2CAdaptors convert from
+  // interpreted java to compiled java.
+  //
+  // R14_state holds pointer to caller's cInterpreter.
+  interpreter_frame_pointer(R14); // R14_state
+
+  stack_alignment(frame::alignment_in_bytes);
+
+  in_preserve_stack_slots((frame::jit_in_preserve_size / VMRegImpl::stack_slot_size));
+
+  // Number of outgoing stack slots killed above the
+  // out_preserve_stack_slots for calls to C. Supports the var-args
+  // backing area for register parms.
+  //
+  varargs_C_out_slots_killed(((frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size));
+
+  // The after-PROLOG location of the return address. Location of
+  // return address specifies a type (REG or STACK) and a number
+  // representing the register number (i.e. - use a register name) or
+  // stack slot.
+  //
+  // A: Link register is stored in stack slot ...
+  // M:  ... but it's in the caller's frame according to PPC-64 ABI.
+  // J: Therefore, we make sure that the link register is also in R11_scratch1
+  //    at the end of the prolog.
+  // B: We use R20, now.
+  //return_addr(REG R20);
+
+  // G: After reading the comments made by all the luminaries on their
+  //    failure to tell the compiler where the return address really is,
+  //    I hardly dare to try myself.  However, I'm convinced it's in slot
+  //    4 what apparently works and saves us some spills.
+  return_addr(STACK 4);
+
+  // This is the body of the function
+  //
+  // void Matcher::calling_convention(OptoRegPair* sig, // array of ideal regs
+  //                                  uint length,      // length of array
+  //                                  bool is_outgoing)
+  //
+  // The `sig' array is to be updated. sig[j] represents the location
+  // of the j-th argument, either a register or a stack slot.
+
+  // Comment taken from i486.ad:
+  // Body of function which returns an integer array locating
+  // arguments either in registers or in stack slots. Passed an array
+  // of ideal registers called "sig" and a "length" count. Stack-slot
+  // offsets are based on outgoing arguments, i.e. a CALLER setting up
+  // arguments for a CALLEE. Incoming stack arguments are
+  // automatically biased by the preserve_stack_slots field above.
+  calling_convention %{
+    // No difference between ingoing/outgoing. Just pass false.
+    SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
+  %}
+
+  // Comment taken from i486.ad:
+  // Body of function which returns an integer array locating
+  // arguments either in registers or in stack slots. Passed an array
+  // of ideal registers called "sig" and a "length" count. Stack-slot
+  // offsets are based on outgoing arguments, i.e. a CALLER setting up
+  // arguments for a CALLEE. Incoming stack arguments are
+  // automatically biased by the preserve_stack_slots field above.
+  c_calling_convention %{
+    // This is obviously always outgoing.
+    // C argument in register AND stack slot.
+    (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
+  %}
+
+  // Location of native (C/C++) and interpreter return values. This
+  // is specified to be the same as Java. In the 32-bit VM, long
+  // values are actually returned from native calls in O0:O1 and
+  // returned to the interpreter in I0:I1. The copying to and from
+  // the register pairs is done by the appropriate call and epilog
+  // opcodes. This simplifies the register allocator.
+  c_return_value %{
+    assert((ideal_reg >= Op_RegI && ideal_reg <= Op_RegL) ||
+            (ideal_reg == Op_RegN && Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0),
+            "only return normal values");
+    // enum names from opcodes.hpp:    Op_Node Op_Set Op_RegN       Op_RegI       Op_RegP       Op_RegF       Op_RegD       Op_RegL
+    static int typeToRegLo[Op_RegL+1] = { 0,   0,     R3_num,   R3_num,   R3_num,   F1_num,   F1_num,   R3_num };
+    static int typeToRegHi[Op_RegL+1] = { 0,   0,     OptoReg::Bad, R3_H_num, R3_H_num, OptoReg::Bad, F1_H_num, R3_H_num };
+    return OptoRegPair(typeToRegHi[ideal_reg], typeToRegLo[ideal_reg]);
+  %}
+
+  // Location of compiled Java return values.  Same as C
+  return_value %{
+    assert((ideal_reg >= Op_RegI && ideal_reg <= Op_RegL) ||
+            (ideal_reg == Op_RegN && Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0),
+            "only return normal values");
+    // enum names from opcodes.hpp:    Op_Node Op_Set Op_RegN       Op_RegI       Op_RegP       Op_RegF       Op_RegD       Op_RegL
+    static int typeToRegLo[Op_RegL+1] = { 0,   0,     R3_num,   R3_num,   R3_num,   F1_num,   F1_num,   R3_num };
+    static int typeToRegHi[Op_RegL+1] = { 0,   0,     OptoReg::Bad, R3_H_num, R3_H_num, OptoReg::Bad, F1_H_num, R3_H_num };
+    return OptoRegPair(typeToRegHi[ideal_reg], typeToRegLo[ideal_reg]);
+  %}
+%}
+
+
+//----------ATTRIBUTES---------------------------------------------------------
+
+//----------Operand Attributes-------------------------------------------------
+op_attrib op_cost(1);          // Required cost attribute.
+
+//----------Instruction Attributes---------------------------------------------
+
+// Cost attribute. required.
+ins_attrib ins_cost(DEFAULT_COST);
+
+// Is this instruction a non-matching short branch variant of some
+// long branch? Not required.
+ins_attrib ins_short_branch(0);
+
+ins_attrib ins_is_TrapBasedCheckNode(true);
+
+// Number of constants.
+// This instruction uses the given number of constants
+// (optional attribute).
+// This is needed to determine in time whether the constant pool will
+// exceed 4000 entries. Before postalloc_expand the overall number of constants
+// is determined. It's also used to compute the constant pool size
+// in Output().
+ins_attrib ins_num_consts(0);
+
+// Required alignment attribute (must be a power of 2) specifies the
+// alignment that some part of the instruction (not necessarily the
+// start) requires. If > 1, a compute_padding() function must be
+// provided for the instruction.
+ins_attrib ins_alignment(1);
+
+// Enforce/prohibit rematerializations.
+// - If an instruction is attributed with 'ins_cannot_rematerialize(true)'
+//   then rematerialization of that instruction is prohibited and the
+//   instruction's value will be spilled if necessary.
+//   Causes that MachNode::rematerialize() returns false.
+// - If an instruction is attributed with 'ins_should_rematerialize(true)'
+//   then rematerialization should be enforced and a copy of the instruction
+//   should be inserted if possible; rematerialization is not guaranteed.
+//   Note: this may result in rematerializations in front of every use.
+//   Causes that MachNode::rematerialize() can return true.
+// (optional attribute)
+ins_attrib ins_cannot_rematerialize(false);
+ins_attrib ins_should_rematerialize(false);
+
+// Instruction has variable size depending on alignment.
+ins_attrib ins_variable_size_depending_on_alignment(false);
+
+// Instruction is a nop.
+ins_attrib ins_is_nop(false);
+
+// Instruction is mapped to a MachIfFastLock node (instead of MachFastLock).
+ins_attrib ins_use_mach_if_fast_lock_node(false);
+
+// Field for the toc offset of a constant.
+//
+// This is needed if the toc offset is not encodable as an immediate in
+// the PPC load instruction. If so, the upper (hi) bits of the offset are
+// added to the toc, and from this a load with immediate is performed.
+// With postalloc expand, we get two nodes that require the same offset
+// but which don't know about each other. The offset is only known
+// when the constant is added to the constant pool during emitting.
+// It is generated in the 'hi'-node adding the upper bits, and saved
+// in this node.  The 'lo'-node has a link to the 'hi'-node and reads
+// the offset from there when it gets encoded.
+ins_attrib ins_field_const_toc_offset(0);
+ins_attrib ins_field_const_toc_offset_hi_node(0);
+
+// A field that can hold the instructions offset in the code buffer.
+// Set in the nodes emitter.
+ins_attrib ins_field_cbuf_insts_offset(-1);
+
+// Fields for referencing a call's load-IC-node.
+// If the toc offset can not be encoded as an immediate in a load, we
+// use two nodes.
+ins_attrib ins_field_load_ic_hi_node(0);
+ins_attrib ins_field_load_ic_node(0);
+
+//----------OPERANDS-----------------------------------------------------------
+// Operand definitions must precede instruction definitions for correct
+// parsing in the ADLC because operands constitute user defined types
+// which are used in instruction definitions.
+//
+// Formats are generated automatically for constants and base registers.
+
+//----------Simple Operands----------------------------------------------------
+// Immediate Operands
+
+// Integer Immediate: 32-bit
+operand immI() %{
+  match(ConI);
+  op_cost(40);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immI8() %{
+  predicate(Assembler::is_simm(n->get_int(), 8));
+  op_cost(0);
+  match(ConI);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: 16-bit
+operand immI16() %{
+  predicate(Assembler::is_simm(n->get_int(), 16));
+  op_cost(0);
+  match(ConI);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Immediate: 32-bit, where lowest 16 bits are 0x0000.
+operand immIhi16() %{
+  predicate(((n->get_int() & 0xffff0000) != 0) && ((n->get_int() & 0xffff) == 0));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immInegpow2() %{
+  predicate(is_power_of_2_long((jlong) (julong) (juint) (-(n->get_int()))));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immIpow2minus1() %{
+  predicate(is_power_of_2_long((((jlong) (n->get_int()))+1)));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immIpowerOf2() %{
+  predicate(is_power_of_2_long((((jlong) (julong) (juint) (n->get_int())))));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Unsigned Integer Immediate: the values 0-31
+operand uimmI5() %{
+  predicate(Assembler::is_uimm(n->get_int(), 5));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Unsigned Integer Immediate: 6-bit
+operand uimmI6() %{
+  predicate(Assembler::is_uimm(n->get_int(), 6));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Unsigned Integer Immediate:  6-bit int, greater than 32
+operand uimmI6_ge32() %{
+  predicate(Assembler::is_uimm(n->get_int(), 6) && n->get_int() >= 32);
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Unsigned Integer Immediate: 15-bit
+operand uimmI15() %{
+  predicate(Assembler::is_uimm(n->get_int(), 15));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Unsigned Integer Immediate: 16-bit
+operand uimmI16() %{
+  predicate(Assembler::is_uimm(n->get_int(), 16));
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// constant 'int 0'.
+operand immI_0() %{
+  predicate(n->get_int() == 0);
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// constant 'int 1'.
+operand immI_1() %{
+  predicate(n->get_int() == 1);
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// constant 'int -1'.
+operand immI_minus1() %{
+  predicate(n->get_int() == -1);
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// int value 16.
+operand immI_16() %{
+  predicate(n->get_int() == 16);
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// int value 24.
+operand immI_24() %{
+  predicate(n->get_int() == 24);
+  match(ConI);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Compressed oops constants
+// Pointer Immediate
+operand immN() %{
+  match(ConN);
+
+  op_cost(10);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// NULL Pointer Immediate
+operand immN_0() %{
+  predicate(n->get_narrowcon() == 0);
+  match(ConN);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Compressed klass constants
+operand immNKlass() %{
+  match(ConNKlass);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// This operand can be used to avoid matching of an instruct
+// with chain rule.
+operand immNKlass_NM() %{
+  match(ConNKlass);
+  predicate(false);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Pointer Immediate: 64-bit
+operand immP() %{
+  match(ConP);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Operand to avoid match of loadConP.
+// This operand can be used to avoid matching of an instruct
+// with chain rule.
+operand immP_NM() %{
+  match(ConP);
+  predicate(false);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// costant 'pointer 0'.
+operand immP_0() %{
+  predicate(n->get_ptr() == 0);
+  match(ConP);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// pointer 0x0 or 0x1
+operand immP_0or1() %{
+  predicate((n->get_ptr() == 0) || (n->get_ptr() == 1));
+  match(ConP);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immL() %{
+  match(ConL);
+  op_cost(40);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: 16-bit
+operand immL16() %{
+  predicate(Assembler::is_simm(n->get_long(), 16));
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: 16-bit, 4-aligned
+operand immL16Alg4() %{
+  predicate(Assembler::is_simm(n->get_long(), 16) && ((n->get_long() & 0x3) == 0));
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: 32-bit, where lowest 16 bits are 0x0000.
+operand immL32hi16() %{
+  predicate(Assembler::is_simm(n->get_long(), 32) && ((n->get_long() & 0xffffL) == 0L));
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: 32-bit
+operand immL32() %{
+  predicate(Assembler::is_simm(n->get_long(), 32));
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: 64-bit, where highest 16 bits are not 0x0000.
+operand immLhighest16() %{
+  predicate((n->get_long() & 0xffff000000000000L) != 0L && (n->get_long() & 0x0000ffffffffffffL) == 0L);
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immLnegpow2() %{
+  predicate(is_power_of_2_long((jlong)-(n->get_long())));
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immLpow2minus1() %{
+  predicate(is_power_of_2_long((((jlong) (n->get_long()))+1)) &&
+            (n->get_long() != (jlong)0xffffffffffffffffL));
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// constant 'long 0'.
+operand immL_0() %{
+  predicate(n->get_long() == 0L);
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// constat ' long -1'.
+operand immL_minus1() %{
+  predicate(n->get_long() == -1L);
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Long Immediate: low 32-bit mask
+operand immL_32bits() %{
+  predicate(n->get_long() == 0xFFFFFFFFL);
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Unsigned Long Immediate: 16-bit
+operand uimmL16() %{
+  predicate(Assembler::is_uimm(n->get_long(), 16));
+  match(ConL);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Float Immediate
+operand immF() %{
+  match(ConF);
+  op_cost(40);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// constant 'float +0.0'.
+operand immF_0() %{
+  predicate((n->getf() == 0) &&
+            (fpclassify(n->getf()) == FP_ZERO) && (signbit(n->getf()) == 0));
+  match(ConF);
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Double Immediate
+operand immD() %{
+  match(ConD);
+  op_cost(40);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+// Integer Register Operands
+// Integer Destination Register
+// See definition of reg_class bits32_reg_rw.
+operand iRegIdst() %{
+  constraint(ALLOC_IN_RC(bits32_reg_rw));
+  match(RegI);
+  match(rscratch1RegI);
+  match(rscratch2RegI);
+  match(rarg1RegI);
+  match(rarg2RegI);
+  match(rarg3RegI);
+  match(rarg4RegI);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Integer Source Register
+// See definition of reg_class bits32_reg_ro.
+operand iRegIsrc() %{
+  constraint(ALLOC_IN_RC(bits32_reg_ro));
+  match(RegI);
+  match(rscratch1RegI);
+  match(rscratch2RegI);
+  match(rarg1RegI);
+  match(rarg2RegI);
+  match(rarg3RegI);
+  match(rarg4RegI);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rscratch1RegI() %{
+  constraint(ALLOC_IN_RC(rscratch1_bits32_reg));
+  match(iRegIdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rscratch2RegI() %{
+  constraint(ALLOC_IN_RC(rscratch2_bits32_reg));
+  match(iRegIdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg1RegI() %{
+  constraint(ALLOC_IN_RC(rarg1_bits32_reg));
+  match(iRegIdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg2RegI() %{
+  constraint(ALLOC_IN_RC(rarg2_bits32_reg));
+  match(iRegIdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg3RegI() %{
+  constraint(ALLOC_IN_RC(rarg3_bits32_reg));
+  match(iRegIdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg4RegI() %{
+  constraint(ALLOC_IN_RC(rarg4_bits32_reg));
+  match(iRegIdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg1RegL() %{
+  constraint(ALLOC_IN_RC(rarg1_bits64_reg));
+  match(iRegLdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg2RegL() %{
+  constraint(ALLOC_IN_RC(rarg2_bits64_reg));
+  match(iRegLdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg3RegL() %{
+  constraint(ALLOC_IN_RC(rarg3_bits64_reg));
+  match(iRegLdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg4RegL() %{
+  constraint(ALLOC_IN_RC(rarg4_bits64_reg));
+  match(iRegLdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer Destination Register
+// See definition of reg_class bits64_reg_rw.
+operand iRegPdst() %{
+  constraint(ALLOC_IN_RC(bits64_reg_rw));
+  match(RegP);
+  match(rscratch1RegP);
+  match(rscratch2RegP);
+  match(rarg1RegP);
+  match(rarg2RegP);
+  match(rarg3RegP);
+  match(rarg4RegP);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer Destination Register
+// Operand not using r11 and r12 (killed in epilog).
+operand iRegPdstNoScratch() %{
+  constraint(ALLOC_IN_RC(bits64_reg_leaf_call));
+  match(RegP);
+  match(rarg1RegP);
+  match(rarg2RegP);
+  match(rarg3RegP);
+  match(rarg4RegP);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Pointer Source Register
+// See definition of reg_class bits64_reg_ro.
+operand iRegPsrc() %{
+  constraint(ALLOC_IN_RC(bits64_reg_ro));
+  match(RegP);
+  match(iRegPdst);
+  match(rscratch1RegP);
+  match(rscratch2RegP);
+  match(rarg1RegP);
+  match(rarg2RegP);
+  match(rarg3RegP);
+  match(rarg4RegP);
+  match(threadRegP);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Thread operand.
+operand threadRegP() %{
+  constraint(ALLOC_IN_RC(thread_bits64_reg));
+  match(iRegPdst);
+  format %{ "R16" %}
+  interface(REG_INTER);
+%}
+
+operand rscratch1RegP() %{
+  constraint(ALLOC_IN_RC(rscratch1_bits64_reg));
+  match(iRegPdst);
+  format %{ "R11" %}
+  interface(REG_INTER);
+%}
+
+operand rscratch2RegP() %{
+  constraint(ALLOC_IN_RC(rscratch2_bits64_reg));
+  match(iRegPdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg1RegP() %{
+  constraint(ALLOC_IN_RC(rarg1_bits64_reg));
+  match(iRegPdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg2RegP() %{
+  constraint(ALLOC_IN_RC(rarg2_bits64_reg));
+  match(iRegPdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg3RegP() %{
+  constraint(ALLOC_IN_RC(rarg3_bits64_reg));
+  match(iRegPdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rarg4RegP() %{
+  constraint(ALLOC_IN_RC(rarg4_bits64_reg));
+  match(iRegPdst);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand iRegNsrc() %{
+  constraint(ALLOC_IN_RC(bits32_reg_ro));
+  match(RegN);
+  match(iRegNdst);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand iRegNdst() %{
+  constraint(ALLOC_IN_RC(bits32_reg_rw));
+  match(RegN);
+
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Long Destination Register
+// See definition of reg_class bits64_reg_rw.
+operand iRegLdst() %{
+  constraint(ALLOC_IN_RC(bits64_reg_rw));
+  match(RegL);
+  match(rscratch1RegL);
+  match(rscratch2RegL);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Long Source Register
+// See definition of reg_class bits64_reg_ro.
+operand iRegLsrc() %{
+  constraint(ALLOC_IN_RC(bits64_reg_ro));
+  match(RegL);
+  match(iRegLdst);
+  match(rscratch1RegL);
+  match(rscratch2RegL);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Special operand for ConvL2I.
+operand iRegL2Isrc(iRegLsrc reg) %{
+  constraint(ALLOC_IN_RC(bits64_reg_ro));
+  match(ConvL2I reg);
+  format %{ "ConvL2I($reg)" %}
+  interface(REG_INTER)
+%}
+
+operand rscratch1RegL() %{
+  constraint(ALLOC_IN_RC(rscratch1_bits64_reg));
+  match(RegL);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand rscratch2RegL() %{
+  constraint(ALLOC_IN_RC(rscratch2_bits64_reg));
+  match(RegL);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Condition Code Flag Registers
+operand flagsReg() %{
+  constraint(ALLOC_IN_RC(int_flags));
+  match(RegFlags);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Condition Code Flag Register CR0
+operand flagsRegCR0() %{
+  constraint(ALLOC_IN_RC(int_flags_CR0));
+  match(RegFlags);
+  format %{ "CR0" %}
+  interface(REG_INTER);
+%}
+
+operand flagsRegCR1() %{
+  constraint(ALLOC_IN_RC(int_flags_CR1));
+  match(RegFlags);
+  format %{ "CR1" %}
+  interface(REG_INTER);
+%}
+
+operand flagsRegCR6() %{
+  constraint(ALLOC_IN_RC(int_flags_CR6));
+  match(RegFlags);
+  format %{ "CR6" %}
+  interface(REG_INTER);
+%}
+
+operand regCTR() %{
+  constraint(ALLOC_IN_RC(ctr_reg));
+  // RegFlags should work. Introducing a RegSpecial type would cause a
+  // lot of changes.
+  match(RegFlags);
+  format %{"SR_CTR" %}
+  interface(REG_INTER);
+%}
+
+operand regD() %{
+  constraint(ALLOC_IN_RC(dbl_reg));
+  match(RegD);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand regF() %{
+  constraint(ALLOC_IN_RC(flt_reg));
+  match(RegF);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Special Registers
+
+// Method Register
+operand inline_cache_regP(iRegPdst reg) %{
+  constraint(ALLOC_IN_RC(r19_bits64_reg)); // inline_cache_reg
+  match(reg);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand compiler_method_oop_regP(iRegPdst reg) %{
+  constraint(ALLOC_IN_RC(rscratch1_bits64_reg)); // compiler_method_oop_reg
+  match(reg);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+operand interpreter_method_oop_regP(iRegPdst reg) %{
+  constraint(ALLOC_IN_RC(r19_bits64_reg)); // interpreter_method_oop_reg
+  match(reg);
+  format %{ %}
+  interface(REG_INTER);
+%}
+
+// Operands to remove register moves in unscaled mode.
+// Match read/write registers with an EncodeP node if neither shift nor add are required.
+operand iRegP2N(iRegPsrc reg) %{
+  predicate(false /* TODO: PPC port MatchDecodeNodes*/&& Universe::narrow_oop_shift() == 0);
+  constraint(ALLOC_IN_RC(bits64_reg_ro));
+  match(EncodeP reg);
+  format %{ "$reg" %}
+  interface(REG_INTER)
+%}
+
+operand iRegN2P(iRegNsrc reg) %{
+  predicate(false /* TODO: PPC port MatchDecodeNodes*/);
+  constraint(ALLOC_IN_RC(bits32_reg_ro));
+  match(DecodeN reg);
+  match(DecodeNKlass reg);
+  format %{ "$reg" %}
+  interface(REG_INTER)
+%}
+
+//----------Complex Operands---------------------------------------------------
+// Indirect Memory Reference
+operand indirect(iRegPsrc reg) %{
+  constraint(ALLOC_IN_RC(bits64_reg_ro));
+  match(reg);
+  op_cost(100);
+  format %{ "[$reg]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0x0);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+// Indirect with Offset
+operand indOffset16(iRegPsrc reg, immL16 offset) %{
+  constraint(ALLOC_IN_RC(bits64_reg_ro));
+  match(AddP reg offset);
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0x0);
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+// Indirect with 4-aligned Offset
+operand indOffset16Alg4(iRegPsrc reg, immL16Alg4 offset) %{
+  constraint(ALLOC_IN_RC(bits64_reg_ro));
+  match(AddP reg offset);
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0x0);
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+//----------Complex Operands for Compressed OOPs-------------------------------
+// Compressed OOPs with narrow_oop_shift == 0.
+
+// Indirect Memory Reference, compressed OOP
+operand indirectNarrow(iRegNsrc reg) %{
+  predicate(false /* TODO: PPC port MatchDecodeNodes*/);
+  constraint(ALLOC_IN_RC(bits64_reg_ro));
+  match(DecodeN reg);
+  match(DecodeNKlass reg);
+  op_cost(100);
+  format %{ "[$reg]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0x0);
+    scale(0x0);
+    disp(0x0);
+  %}
+%}
+
+// Indirect with Offset, compressed OOP
+operand indOffset16Narrow(iRegNsrc reg, immL16 offset) %{
+  predicate(false /* TODO: PPC port MatchDecodeNodes*/);
+  constraint(ALLOC_IN_RC(bits64_reg_ro));
+  match(AddP (DecodeN reg) offset);
+  match(AddP (DecodeNKlass reg) offset);
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0x0);
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+// Indirect with 4-aligned Offset, compressed OOP
+operand indOffset16NarrowAlg4(iRegNsrc reg, immL16Alg4 offset) %{
+  predicate(false /* TODO: PPC port MatchDecodeNodes*/);
+  constraint(ALLOC_IN_RC(bits64_reg_ro));
+  match(AddP (DecodeN reg) offset);
+  match(AddP (DecodeNKlass reg) offset);
+  op_cost(100);
+  format %{ "[$reg + $offset]" %}
+  interface(MEMORY_INTER) %{
+    base($reg);
+    index(0x0);
+    scale(0x0);
+    disp($offset);
+  %}
+%}
+
+//----------Special Memory Operands--------------------------------------------
+// Stack Slot Operand
+//
+// This operand is used for loading and storing temporary values on
+// the stack where a match requires a value to flow through memory.
+operand stackSlotI(sRegI reg) %{
+  constraint(ALLOC_IN_RC(stack_slots));
+  op_cost(100);
+  //match(RegI);
+  format %{ "[sp+$reg]" %}
+  interface(MEMORY_INTER) %{
+    base(0x1);   // R1_SP
+    index(0x0);
+    scale(0x0);
+    disp($reg);  // Stack Offset
+  %}
+%}
+
+operand stackSlotL(sRegL reg) %{
+  constraint(ALLOC_IN_RC(stack_slots));
+  op_cost(100);
+  //match(RegL);
+  format %{ "[sp+$reg]" %}
+  interface(MEMORY_INTER) %{
+    base(0x1);   // R1_SP
+    index(0x0);
+    scale(0x0);
+    disp($reg);  // Stack Offset
+  %}
+%}
+
+operand stackSlotP(sRegP reg) %{
+  constraint(ALLOC_IN_RC(stack_slots));
+  op_cost(100);
+  //match(RegP);
+  format %{ "[sp+$reg]" %}
+  interface(MEMORY_INTER) %{
+    base(0x1);   // R1_SP
+    index(0x0);
+    scale(0x0);
+    disp($reg);  // Stack Offset
+  %}
+%}
+
+operand stackSlotF(sRegF reg) %{
+  constraint(ALLOC_IN_RC(stack_slots));
+  op_cost(100);
+  //match(RegF);
+  format %{ "[sp+$reg]" %}
+  interface(MEMORY_INTER) %{
+    base(0x1);   // R1_SP
+    index(0x0);
+    scale(0x0);
+    disp($reg);  // Stack Offset
+  %}
+%}
+
+operand stackSlotD(sRegD reg) %{
+  constraint(ALLOC_IN_RC(stack_slots));
+  op_cost(100);
+  //match(RegD);
+  format %{ "[sp+$reg]" %}
+  interface(MEMORY_INTER) %{
+    base(0x1);   // R1_SP
+    index(0x0);
+    scale(0x0);
+    disp($reg);  // Stack Offset
+  %}
+%}
+
+// Operands for expressing Control Flow
+// NOTE: Label is a predefined operand which should not be redefined in
+//       the AD file. It is generically handled within the ADLC.
+
+//----------Conditional Branch Operands----------------------------------------
+// Comparison Op
+//
+// This is the operation of the comparison, and is limited to the
+// following set of codes: L (<), LE (<=), G (>), GE (>=), E (==), NE
+// (!=).
+//
+// Other attributes of the comparison, such as unsignedness, are specified
+// by the comparison instruction that sets a condition code flags register.
+// That result is represented by a flags operand whose subtype is appropriate
+// to the unsignedness (etc.) of the comparison.
+//
+// Later, the instruction which matches both the Comparison Op (a Bool) and
+// the flags (produced by the Cmp) specifies the coding of the comparison op
+// by matching a specific subtype of Bool operand below.
+
+// When used for floating point comparisons: unordered same as less.
+operand cmpOp() %{
+  match(Bool);
+  format %{ "" %}
+  interface(COND_INTER) %{
+                           // BO only encodes bit 4 of bcondCRbiIsX, as bits 1-3 are always '100'.
+                           //           BO          &  BI
+    equal(0xA);            // 10 10:   bcondCRbiIs1 & Condition::equal
+    not_equal(0x2);        // 00 10:   bcondCRbiIs0 & Condition::equal
+    less(0x8);             // 10 00:   bcondCRbiIs1 & Condition::less
+    greater_equal(0x0);    // 00 00:   bcondCRbiIs0 & Condition::less
+    less_equal(0x1);       // 00 01:   bcondCRbiIs0 & Condition::greater
+    greater(0x9);          // 10 01:   bcondCRbiIs1 & Condition::greater
+    overflow(0xB);         // 10 11:   bcondCRbiIs1 & Condition::summary_overflow
+    no_overflow(0x3);      // 00 11:   bcondCRbiIs0 & Condition::summary_overflow
+  %}
+%}
+
+//----------OPERAND CLASSES----------------------------------------------------
+// Operand Classes are groups of operands that are used to simplify
+// instruction definitions by not requiring the AD writer to specify
+// seperate instructions for every form of operand when the
+// instruction accepts multiple operand types with the same basic
+// encoding and format. The classic case of this is memory operands.
+// Indirect is not included since its use is limited to Compare & Swap.
+
+opclass memory(indirect, indOffset16 /*, indIndex, tlsReference*/, indirectNarrow, indOffset16Narrow);
+// Memory operand where offsets are 4-aligned. Required for ld, std.
+opclass memoryAlg4(indirect, indOffset16Alg4, indirectNarrow, indOffset16NarrowAlg4);
+opclass indirectMemory(indirect, indirectNarrow);
+
+// Special opclass for I and ConvL2I.
+opclass iRegIsrc_iRegL2Isrc(iRegIsrc, iRegL2Isrc);
+
+// Operand classes to match encode and decode. iRegN_P2N is only used
+// for storeN. I have never seen an encode node elsewhere.
+opclass iRegN_P2N(iRegNsrc, iRegP2N);
+opclass iRegP_N2P(iRegPsrc, iRegN2P);
+
+//----------PIPELINE-----------------------------------------------------------
+
+pipeline %{
+
+// See J.M.Tendler et al. "Power4 system microarchitecture", IBM
+// J. Res. & Dev., No. 1, Jan. 2002.
+
+//----------ATTRIBUTES---------------------------------------------------------
+attributes %{
+
+  // Power4 instructions are of fixed length.
+  fixed_size_instructions;
+
+  // TODO: if `bundle' means number of instructions fetched
+  // per cycle, this is 8. If `bundle' means Power4 `group', that is
+  // max instructions issued per cycle, this is 5.
+  max_instructions_per_bundle = 8;
+
+  // A Power4 instruction is 4 bytes long.
+  instruction_unit_size = 4;
+
+  // The Power4 processor fetches 64 bytes...
+  instruction_fetch_unit_size = 64;
+
+  // ...in one line
+  instruction_fetch_units = 1
+
+  // Unused, list one so that array generated by adlc is not empty.
+  // Aix compiler chokes if _nop_count = 0.
+  nops(fxNop);
+%}
+
+//----------RESOURCES----------------------------------------------------------
+// Resources are the functional units available to the machine
+resources(
+   PPC_BR,         // branch unit
+   PPC_CR,         // condition unit
+   PPC_FX1,        // integer arithmetic unit 1
+   PPC_FX2,        // integer arithmetic unit 2
+   PPC_LDST1,      // load/store unit 1
+   PPC_LDST2,      // load/store unit 2
+   PPC_FP1,        // float arithmetic unit 1
+   PPC_FP2,        // float arithmetic unit 2
+   PPC_LDST = PPC_LDST1 | PPC_LDST2,
+   PPC_FX = PPC_FX1 | PPC_FX2,
+   PPC_FP = PPC_FP1 | PPC_FP2
+ );
+
+//----------PIPELINE DESCRIPTION-----------------------------------------------
+// Pipeline Description specifies the stages in the machine's pipeline
+pipe_desc(
+   // Power4 longest pipeline path
+   PPC_IF,   // instruction fetch
+   PPC_IC,
+   //PPC_BP, // branch prediction
+   PPC_D0,   // decode
+   PPC_D1,   // decode
+   PPC_D2,   // decode
+   PPC_D3,   // decode
+   PPC_Xfer1,
+   PPC_GD,   // group definition
+   PPC_MP,   // map
+   PPC_ISS,  // issue
+   PPC_RF,   // resource fetch
+   PPC_EX1,  // execute (all units)
+   PPC_EX2,  // execute (FP, LDST)
+   PPC_EX3,  // execute (FP, LDST)
+   PPC_EX4,  // execute (FP)
+   PPC_EX5,  // execute (FP)
+   PPC_EX6,  // execute (FP)
+   PPC_WB,   // write back
+   PPC_Xfer2,
+   PPC_CP
+ );
+
+//----------PIPELINE CLASSES---------------------------------------------------
+// Pipeline Classes describe the stages in which input and output are
+// referenced by the hardware pipeline.
+
+// Simple pipeline classes.
+
+// Default pipeline class.
+pipe_class pipe_class_default() %{
+  single_instruction;
+  fixed_latency(2);
+%}
+
+// Pipeline class for empty instructions.
+pipe_class pipe_class_empty() %{
+  single_instruction;
+  fixed_latency(0);
+%}
+
+// Pipeline class for compares.
+pipe_class pipe_class_compare() %{
+  single_instruction;
+  fixed_latency(16);
+%}
+
+// Pipeline class for traps.
+pipe_class pipe_class_trap() %{
+  single_instruction;
+  fixed_latency(100);
+%}
+
+// Pipeline class for memory operations.
+pipe_class pipe_class_memory() %{
+  single_instruction;
+  fixed_latency(16);
+%}
+
+// Pipeline class for call.
+pipe_class pipe_class_call() %{
+  single_instruction;
+  fixed_latency(100);
+%}
+
+// Define the class for the Nop node.
+define %{
+   MachNop = pipe_class_default;
+%}
+
+%}
+
+//----------INSTRUCTIONS-------------------------------------------------------
+
+// Naming of instructions:
+//   opA_operB / opA_operB_operC:
+//     Operation 'op' with one or two source operands 'oper'. Result
+//     type is A, source operand types are B and C.
+//     Iff A == B == C, B and C are left out.
+//
+// The instructions are ordered according to the following scheme:
+//  - loads
+//  - load constants
+//  - prefetch
+//  - store
+//  - encode/decode
+//  - membar
+//  - conditional moves
+//  - compare & swap
+//  - arithmetic and logic operations
+//    * int: Add, Sub, Mul, Div, Mod
+//    * int: lShift, arShift, urShift, rot
+//    * float: Add, Sub, Mul, Div
+//    * and, or, xor ...
+//  - register moves: float <-> int, reg <-> stack, repl
+//  - cast (high level type cast, XtoP, castPP, castII, not_null etc.
+//  - conv (low level type cast requiring bit changes (sign extend etc)
+//  - compares, range & zero checks.
+//  - branches
+//  - complex operations, intrinsics, min, max, replicate
+//  - lock
+//  - Calls
+//
+// If there are similar instructions with different types they are sorted:
+// int before float
+// small before big
+// signed before unsigned
+// e.g., loadS before loadUS before loadI before loadF.
+
+
+//----------Load/Store Instructions--------------------------------------------
+
+//----------Load Instructions--------------------------------------------------
+
+// Converts byte to int.
+// As convB2I_reg, but without match rule.  The match rule of convB2I_reg
+// reuses the 'amount' operand, but adlc expects that operand specification
+// and operands in match rule are equivalent.
+instruct convB2I_reg_2(iRegIdst dst, iRegIsrc src) %{
+  effect(DEF dst, USE src);
+  format %{ "EXTSB   $dst, $src \t// byte->int" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_extsb);
+    __ extsb($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct loadUB_indirect(iRegIdst dst, indirectMemory mem) %{
+  // match-rule, false predicate
+  match(Set dst (LoadB mem));
+  predicate(false);
+
+  format %{ "LBZ     $dst, $mem" %}
+  size(4);
+  ins_encode( enc_lbz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct loadUB_indirect_ac(iRegIdst dst, indirectMemory mem) %{
+  // match-rule, false predicate
+  match(Set dst (LoadB mem));
+  predicate(false);
+
+  format %{ "LBZ     $dst, $mem\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_lbz_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Byte (8bit signed). LoadB = LoadUB + ConvUB2B.
+instruct loadB_indirect_Ex(iRegIdst dst, indirectMemory mem) %{
+  match(Set dst (LoadB mem));
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST);
+  expand %{
+    iRegIdst tmp;
+    loadUB_indirect(tmp, mem);
+    convB2I_reg_2(dst, tmp);
+  %}
+%}
+
+instruct loadB_indirect_ac_Ex(iRegIdst dst, indirectMemory mem) %{
+  match(Set dst (LoadB mem));
+  ins_cost(3*MEMORY_REF_COST + DEFAULT_COST);
+  expand %{
+    iRegIdst tmp;
+    loadUB_indirect_ac(tmp, mem);
+    convB2I_reg_2(dst, tmp);
+  %}
+%}
+
+instruct loadUB_indOffset16(iRegIdst dst, indOffset16 mem) %{
+  // match-rule, false predicate
+  match(Set dst (LoadB mem));
+  predicate(false);
+
+  format %{ "LBZ     $dst, $mem" %}
+  size(4);
+  ins_encode( enc_lbz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct loadUB_indOffset16_ac(iRegIdst dst, indOffset16 mem) %{
+  // match-rule, false predicate
+  match(Set dst (LoadB mem));
+  predicate(false);
+
+  format %{ "LBZ     $dst, $mem\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_lbz_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Byte (8bit signed). LoadB = LoadUB + ConvUB2B.
+instruct loadB_indOffset16_Ex(iRegIdst dst, indOffset16 mem) %{
+  match(Set dst (LoadB mem));
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  ins_cost(MEMORY_REF_COST + DEFAULT_COST);
+
+  expand %{
+    iRegIdst tmp;
+    loadUB_indOffset16(tmp, mem);
+    convB2I_reg_2(dst, tmp);
+  %}
+%}
+
+instruct loadB_indOffset16_ac_Ex(iRegIdst dst, indOffset16 mem) %{
+  match(Set dst (LoadB mem));
+  ins_cost(3*MEMORY_REF_COST + DEFAULT_COST);
+
+  expand %{
+    iRegIdst tmp;
+    loadUB_indOffset16_ac(tmp, mem);
+    convB2I_reg_2(dst, tmp);
+  %}
+%}
+
+// Load Unsigned Byte (8bit UNsigned) into an int reg.
+instruct loadUB(iRegIdst dst, memory mem) %{
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  match(Set dst (LoadUB mem));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LBZ     $dst, $mem \t// byte, zero-extend to int" %}
+  size(4);
+  ins_encode( enc_lbz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load  Unsigned Byte (8bit UNsigned) acquire.
+instruct loadUB_ac(iRegIdst dst, memory mem) %{
+  match(Set dst (LoadUB mem));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LBZ     $dst, $mem \t// byte, zero-extend to int, acquire\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_lbz_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Unsigned Byte (8bit UNsigned) into a Long Register.
+instruct loadUB2L(iRegLdst dst, memory mem) %{
+  match(Set dst (ConvI2L (LoadUB mem)));
+  predicate(_kids[0]->_leaf->as_Load()->is_unordered() || followed_by_acquire(_kids[0]->_leaf));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LBZ     $dst, $mem \t// byte, zero-extend to long" %}
+  size(4);
+  ins_encode( enc_lbz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct loadUB2L_ac(iRegLdst dst, memory mem) %{
+  match(Set dst (ConvI2L (LoadUB mem)));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LBZ     $dst, $mem \t// byte, zero-extend to long, acquire\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_lbz_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Short (16bit signed)
+instruct loadS(iRegIdst dst, memory mem) %{
+  match(Set dst (LoadS mem));
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LHA     $dst, $mem" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lha);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lha($dst$$Register, Idisp, $mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Short (16bit signed) acquire.
+instruct loadS_ac(iRegIdst dst, memory mem) %{
+  match(Set dst (LoadS mem));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LHA     $dst, $mem\t acquire\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lha($dst$$Register, Idisp, $mem$$base$$Register);
+    __ twi_0($dst$$Register);
+    __ isync();
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Char (16bit unsigned)
+instruct loadUS(iRegIdst dst, memory mem) %{
+  match(Set dst (LoadUS mem));
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LHZ     $dst, $mem" %}
+  size(4);
+  ins_encode( enc_lhz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Char (16bit unsigned) acquire.
+instruct loadUS_ac(iRegIdst dst, memory mem) %{
+  match(Set dst (LoadUS mem));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LHZ     $dst, $mem \t// acquire\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_lhz_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Unsigned Short/Char (16bit UNsigned) into a Long Register.
+instruct loadUS2L(iRegLdst dst, memory mem) %{
+  match(Set dst (ConvI2L (LoadUS mem)));
+  predicate(_kids[0]->_leaf->as_Load()->is_unordered() || followed_by_acquire(_kids[0]->_leaf));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LHZ     $dst, $mem \t// short, zero-extend to long" %}
+  size(4);
+  ins_encode( enc_lhz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Unsigned Short/Char (16bit UNsigned) into a Long Register acquire.
+instruct loadUS2L_ac(iRegLdst dst, memory mem) %{
+  match(Set dst (ConvI2L (LoadUS mem)));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LHZ     $dst, $mem \t// short, zero-extend to long, acquire\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_lhz_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Integer.
+instruct loadI(iRegIdst dst, memory mem) %{
+  match(Set dst (LoadI mem));
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LWZ     $dst, $mem" %}
+  size(4);
+  ins_encode( enc_lwz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Integer acquire.
+instruct loadI_ac(iRegIdst dst, memory mem) %{
+  match(Set dst (LoadI mem));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LWZ     $dst, $mem \t// load acquire\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_lwz_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Match loading integer and casting it to unsigned int in 
+// long register.
+// LoadI + ConvI2L + AndL 0xffffffff.
+instruct loadUI2L(iRegLdst dst, memory mem, immL_32bits mask) %{
+  match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
+  predicate(_kids[0]->_kids[0]->_leaf->as_Load()->is_unordered());
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LWZ     $dst, $mem \t// zero-extend to long" %}
+  size(4);
+  ins_encode( enc_lwz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Match loading integer and casting it to long.
+instruct loadI2L(iRegLdst dst, memory mem) %{
+  match(Set dst (ConvI2L (LoadI mem)));
+  predicate(_kids[0]->_leaf->as_Load()->is_unordered());
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LWA     $dst, $mem \t// loadI2L" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lwa);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lwa($dst$$Register, Idisp, $mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Match loading integer and casting it to long - acquire.
+instruct loadI2L_ac(iRegLdst dst, memory mem) %{
+  match(Set dst (ConvI2L (LoadI mem)));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LWA     $dst, $mem \t// loadI2L acquire"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lwa);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lwa($dst$$Register, Idisp, $mem$$base$$Register);
+    __ twi_0($dst$$Register);
+    __ isync();
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Long - aligned
+instruct loadL(iRegLdst dst, memoryAlg4 mem) %{
+  match(Set dst (LoadL mem));
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LD      $dst, $mem \t// long" %}
+  size(4);
+  ins_encode( enc_ld(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Long - aligned acquire.
+instruct loadL_ac(iRegLdst dst, memoryAlg4 mem) %{
+  match(Set dst (LoadL mem));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LD      $dst, $mem \t// long acquire\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_ld_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Long - UNaligned
+instruct loadL_unaligned(iRegLdst dst, memoryAlg4 mem) %{
+  match(Set dst (LoadL_unaligned mem));
+  // predicate(...) // Unaligned_ac is not needed (and wouldn't make sense).
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LD      $dst, $mem \t// unaligned long" %}
+  size(4);
+  ins_encode( enc_ld(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load nodes for superwords
+
+// Load Aligned Packed Byte
+instruct loadV8(iRegLdst dst, memoryAlg4 mem) %{
+  predicate(n->as_LoadVector()->memory_size() == 8);
+  match(Set dst (LoadVector mem));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LD      $dst, $mem \t// load 8-byte Vector" %}
+  size(4);
+  ins_encode( enc_ld(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Range, range = array length (=jint)
+instruct loadRange(iRegIdst dst, memory mem) %{
+  match(Set dst (LoadRange mem));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LWZ     $dst, $mem \t// range" %}
+  size(4);
+  ins_encode( enc_lwz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Compressed Pointer
+instruct loadN(iRegNdst dst, memory mem) %{
+  match(Set dst (LoadN mem));
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LWZ     $dst, $mem \t// load compressed ptr" %}
+  size(4);
+  ins_encode( enc_lwz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Compressed Pointer acquire.
+instruct loadN_ac(iRegNdst dst, memory mem) %{
+  match(Set dst (LoadN mem));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LWZ     $dst, $mem \t// load acquire compressed ptr\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_lwz_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Compressed Pointer and decode it if narrow_oop_shift == 0.
+instruct loadN2P_unscaled(iRegPdst dst, memory mem) %{
+  match(Set dst (DecodeN (LoadN mem)));
+  predicate(_kids[0]->_leaf->as_Load()->is_unordered() && Universe::narrow_oop_shift() == 0);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LWZ     $dst, $mem \t// DecodeN (unscaled)" %}
+  size(4);
+  ins_encode( enc_lwz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Pointer
+instruct loadP(iRegPdst dst, memoryAlg4 mem) %{
+  match(Set dst (LoadP mem));
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LD      $dst, $mem \t// ptr" %}
+  size(4);
+  ins_encode( enc_ld(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Pointer acquire.
+instruct loadP_ac(iRegPdst dst, memoryAlg4 mem) %{
+  match(Set dst (LoadP mem));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LD      $dst, $mem \t// ptr acquire\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_ld_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// LoadP + CastP2L
+instruct loadP2X(iRegLdst dst, memoryAlg4 mem) %{
+  match(Set dst (CastP2X (LoadP mem)));
+  predicate(_kids[0]->_leaf->as_Load()->is_unordered());
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LD      $dst, $mem \t// ptr + p2x" %}
+  size(4);
+  ins_encode( enc_ld(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load compressed klass pointer.
+instruct loadNKlass(iRegNdst dst, memory mem) %{
+  match(Set dst (LoadNKlass mem));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LWZ     $dst, $mem \t// compressed klass ptr" %}
+  size(4);
+  ins_encode( enc_lwz(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+//// Load compressed klass and decode it if narrow_klass_shift == 0.
+//// TODO: will narrow_klass_shift ever be 0?
+//instruct decodeNKlass2Klass(iRegPdst dst, memory mem) %{
+//  match(Set dst (DecodeNKlass (LoadNKlass mem)));
+//  predicate(false /* TODO: PPC port Universe::narrow_klass_shift() == 0*);
+//  ins_cost(MEMORY_REF_COST);
+//
+//  format %{ "LWZ     $dst, $mem \t// DecodeNKlass (unscaled)" %}
+//  size(4);
+//  ins_encode( enc_lwz(dst, mem) );
+//  ins_pipe(pipe_class_memory);
+//%}
+
+// Load Klass Pointer
+instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{
+  match(Set dst (LoadKlass mem));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LD      $dst, $mem \t// klass ptr" %}
+  size(4);
+  ins_encode( enc_ld(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Float
+instruct loadF(regF dst, memory mem) %{
+  match(Set dst (LoadF mem));
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LFS     $dst, $mem" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lfs);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ lfs($dst$$FloatRegister, Idisp, $mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Float acquire.
+instruct loadF_ac(regF dst, memory mem) %{
+  match(Set dst (LoadF mem));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LFS     $dst, $mem \t// acquire\n\t"
+            "FCMPU   cr0, $dst, $dst\n\t"
+            "BNE     cr0, next\n"
+            "next:\n\t"
+            "ISYNC" %}
+  size(16);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    Label next;
+    __ lfs($dst$$FloatRegister, Idisp, $mem$$base$$Register);
+    __ fcmpu(CCR0, $dst$$FloatRegister, $dst$$FloatRegister);
+    __ bne(CCR0, next);
+    __ bind(next);
+    __ isync();
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Double - aligned
+instruct loadD(regD dst, memory mem) %{
+  match(Set dst (LoadD mem));
+  predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LFD     $dst, $mem" %}
+  size(4);
+  ins_encode( enc_lfd(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Double - aligned acquire.
+instruct loadD_ac(regD dst, memory mem) %{
+  match(Set dst (LoadD mem));
+  ins_cost(3*MEMORY_REF_COST);
+
+  format %{ "LFD     $dst, $mem \t// acquire\n\t"
+            "FCMPU   cr0, $dst, $dst\n\t"
+            "BNE     cr0, next\n"
+            "next:\n\t"
+            "ISYNC" %}
+  size(16);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    Label next;
+    __ lfd($dst$$FloatRegister, Idisp, $mem$$base$$Register);
+    __ fcmpu(CCR0, $dst$$FloatRegister, $dst$$FloatRegister);
+    __ bne(CCR0, next);
+    __ bind(next);
+    __ isync();
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load Double - UNaligned
+instruct loadD_unaligned(regD dst, memory mem) %{
+  match(Set dst (LoadD_unaligned mem));
+  // predicate(...) // Unaligned_ac is not needed (and wouldn't make sense).
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LFD     $dst, $mem" %}
+  size(4);
+  ins_encode( enc_lfd(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+//----------Constants--------------------------------------------------------
+
+// Load MachConstantTableBase: add hi offset to global toc.
+// TODO: Handle hidden register r29 in bundler!
+instruct loadToc_hi(iRegLdst dst) %{
+  effect(DEF dst);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "ADDIS   $dst, R29, DISP.hi \t// load TOC hi" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addis);
+    __ calculate_address_from_global_toc_hi16only($dst$$Register, __ method_toc());
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Load MachConstantTableBase: add lo offset to global toc.
+instruct loadToc_lo(iRegLdst dst, iRegLdst src) %{
+  effect(DEF dst, USE src);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "ADDI    $dst, $src, DISP.lo \t// load TOC lo" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ori);
+    __ calculate_address_from_global_toc_lo16only($dst$$Register, __ method_toc());
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Load 16-bit integer constant 0xssss????
+instruct loadConI16(iRegIdst dst, immI16 src) %{
+  match(Set dst src);
+
+  format %{ "LI      $dst, $src" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Load integer constant 0x????0000
+instruct loadConIhi16(iRegIdst dst, immIhi16 src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "LIS     $dst, $src.hi" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addis);
+    // Lis sign extends 16-bit src then shifts it 16 bit to the left.
+    __ lis($dst$$Register, (int)((short)(($src$$constant & 0xFFFF0000) >> 16)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Part 2 of loading 32 bit constant: hi16 is is src1 (properly shifted
+// and sign extended), this adds the low 16 bits.
+instruct loadConI32_lo16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "ORI     $dst, $src1.hi, $src2.lo" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ori);
+    __ ori($dst$$Register, $src1$$Register, ($src2$$constant) & 0xFFFF);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct loadConI_Ex(iRegIdst dst, immI src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST*2);
+
+  expand %{
+    // Would like to use $src$$constant.
+    immI16 srcLo %{ _opnds[1]->constant() %}
+    // srcHi can be 0000 if srcLo sign-extends to a negative number.
+    immIhi16 srcHi %{ _opnds[1]->constant() %}
+    iRegIdst tmpI;
+    loadConIhi16(tmpI, srcHi);
+    loadConI32_lo16(dst, tmpI, srcLo);
+  %}
+%}
+
+// No constant pool entries required.
+instruct loadConL16(iRegLdst dst, immL16 src) %{
+  match(Set dst src);
+
+  format %{ "LI      $dst, $src \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, (int)((short) ($src$$constant & 0xFFFF)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Load long constant 0xssssssss????0000
+instruct loadConL32hi16(iRegLdst dst, immL32hi16 src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "LIS     $dst, $src.hi \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addis);
+    __ lis($dst$$Register, (int)((short)(($src$$constant & 0xFFFF0000) >> 16)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// To load a 32 bit constant: merge lower 16 bits into already loaded
+// high 16 bits.
+instruct loadConL32_lo16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "ORI     $dst, $src1, $src2.lo" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ori);
+    __ ori($dst$$Register, $src1$$Register, ($src2$$constant) & 0xFFFF);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Load 32-bit long constant
+instruct loadConL32_Ex(iRegLdst dst, immL32 src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST*2);
+
+  expand %{
+    // Would like to use $src$$constant.
+    immL16     srcLo %{ _opnds[1]->constant() /*& 0x0000FFFFL */%}
+    // srcHi can be 0000 if srcLo sign-extends to a negative number.
+    immL32hi16 srcHi %{ _opnds[1]->constant() /*& 0xFFFF0000L */%}
+    iRegLdst tmpL;
+    loadConL32hi16(tmpL, srcHi);
+    loadConL32_lo16(dst, tmpL, srcLo);
+  %}
+%}
+
+// Load long constant 0x????000000000000.
+instruct loadConLhighest16_Ex(iRegLdst dst, immLhighest16 src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    immL32hi16 srcHi %{ _opnds[1]->constant() >> 32 /*& 0xFFFF0000L */%}
+    immI shift32 %{ 32 %}
+    iRegLdst tmpL;
+    loadConL32hi16(tmpL, srcHi);
+    lshiftL_regL_immI(dst, tmpL, shift32);
+  %}
+%}
+
+// Expand node for constant pool load: small offset.
+instruct loadConL(iRegLdst dst, immL src, iRegLdst toc) %{
+  effect(DEF dst, USE src, USE toc);
+  ins_cost(MEMORY_REF_COST);
+
+  ins_num_consts(1);
+  // Needed so that CallDynamicJavaDirect can compute the address of this
+  // instruction for relocation.
+  ins_field_cbuf_insts_offset(int);
+
+  format %{ "LD      $dst, offset, $toc \t// load long $src from TOC" %}
+  size(4);
+  ins_encode( enc_load_long_constL(dst, src, toc) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Expand node for constant pool load: large offset.
+instruct loadConL_hi(iRegLdst dst, immL src, iRegLdst toc) %{
+  effect(DEF dst, USE src, USE toc);
+  predicate(false);
+
+  ins_num_consts(1);
+  ins_field_const_toc_offset(int);
+  // Needed so that CallDynamicJavaDirect can compute the address of this
+  // instruction for relocation.
+  ins_field_cbuf_insts_offset(int);
+
+  format %{ "ADDIS   $dst, $toc, offset \t// load long $src from TOC (hi)" %}
+  size(4);
+  ins_encode( enc_load_long_constL_hi(dst, toc, src) );
+  ins_pipe(pipe_class_default);
+%}
+
+// Expand node for constant pool load: large offset.
+// No constant pool entries required.
+instruct loadConL_lo(iRegLdst dst, immL src, iRegLdst base) %{
+  effect(DEF dst, USE src, USE base);
+  predicate(false);
+
+  ins_field_const_toc_offset_hi_node(loadConL_hiNode*);
+
+  format %{ "LD      $dst, offset, $base \t// load long $src from TOC (lo)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ld);
+    int offset = ra_->C->in_scratch_emit_size() ? 0 : _const_toc_offset_hi_node->_const_toc_offset;
+    __ ld($dst$$Register, MacroAssembler::largeoffset_si16_si16_lo(offset), $base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load long constant from constant table. Expand in case of
+// offset > 16 bit is needed.
+// Adlc adds toc node MachConstantTableBase.
+instruct loadConL_Ex(iRegLdst dst, immL src) %{
+  match(Set dst src);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LD      $dst, offset, $constanttablebase\t// load long $src from table, postalloc expanded" %}
+  // We can not inline the enc_class for the expand as that does not support constanttablebase.
+  postalloc_expand( postalloc_expand_load_long_constant(dst, src, constanttablebase) );
+%}
+
+// Load NULL as compressed oop.
+instruct loadConN0(iRegNdst dst, immN_0 src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "LI      $dst, $src \t// compressed ptr" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, 0);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Load hi part of compressed oop constant.
+instruct loadConN_hi(iRegNdst dst, immN src) %{
+  effect(DEF dst, USE src);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "LIS     $dst, $src \t// narrow oop hi" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addis);
+    __ lis($dst$$Register, (int)(short)(($src$$constant >> 16) & 0xffff));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Add lo part of compressed oop constant to already loaded hi part.
+instruct loadConN_lo(iRegNdst dst, iRegNsrc src1, immN src2) %{
+  effect(DEF dst, USE src1, USE src2);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "ORI     $dst, $src1, $src2 \t// narrow oop lo" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
+    int oop_index = __ oop_recorder()->find_index((jobject)$src2$$constant);
+    RelocationHolder rspec = oop_Relocation::spec(oop_index);
+    __ relocate(rspec, 1);
+    __ ori($dst$$Register, $src1$$Register, $src2$$constant & 0xffff);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Needed to postalloc expand loadConN: ConN is loaded as ConI
+// leaving the upper 32 bits with sign-extension bits.
+// This clears these bits: dst = src & 0xFFFFFFFF.
+// TODO: Eventually call this maskN_regN_FFFFFFFF.
+instruct clearMs32b(iRegNdst dst, iRegNsrc src) %{
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "MASK    $dst, $src, 0xFFFFFFFF" %} // mask
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ clrldi($dst$$Register, $src$$Register, 0x20);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Loading ConN must be postalloc expanded so that edges between
+// the nodes are safe. They may not interfere with a safepoint.
+// GL TODO: This needs three instructions: better put this into the constant pool.
+instruct loadConN_Ex(iRegNdst dst, immN src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST*2);
+
+  format %{ "LoadN   $dst, $src \t// postalloc expanded" %} // mask
+  postalloc_expand %{
+    MachNode *m1 = new (C) loadConN_hiNode();
+    MachNode *m2 = new (C) loadConN_loNode();
+    MachNode *m3 = new (C) clearMs32bNode();
+    m1->add_req(NULL);
+    m2->add_req(NULL, m1);
+    m3->add_req(NULL, m2);
+    m1->_opnds[0] = op_dst;
+    m1->_opnds[1] = op_src;
+    m2->_opnds[0] = op_dst;
+    m2->_opnds[1] = op_dst;
+    m2->_opnds[2] = op_src;
+    m3->_opnds[0] = op_dst;
+    m3->_opnds[1] = op_dst;
+    ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    ra_->set_pair(m3->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    nodes->push(m1);
+    nodes->push(m2);
+    nodes->push(m3);
+  %}
+%}
+
+instruct loadConNKlass_hi(iRegNdst dst, immNKlass src) %{
+  effect(DEF dst, USE src);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "LIS     $dst, $src \t// narrow oop hi" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addis);
+    intptr_t Csrc = Klass::encode_klass((Klass *)$src$$constant);
+    __ lis($dst$$Register, (int)(short)((Csrc >> 16) & 0xffff));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// This needs a match rule so that build_oop_map knows this is 
+// not a narrow oop.
+instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
+  match(Set dst src1);
+  effect(TEMP src2);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "ADDI    $dst, $src1, $src2 \t// narrow oop lo" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    intptr_t Csrc = Klass::encode_klass((Klass *)$src1$$constant);
+    assert(__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
+    int klass_index = __ oop_recorder()->find_index((Klass *)$src1$$constant);
+    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
+
+    __ relocate(rspec, 1);
+    __ ori($dst$$Register, $src2$$Register, Csrc & 0xffff);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Loading ConNKlass must be postalloc expanded so that edges between
+// the nodes are safe. They may not interfere with a safepoint.
+instruct loadConNKlass_Ex(iRegNdst dst, immNKlass src) %{
+  match(Set dst src);
+  ins_cost(DEFAULT_COST*2);
+
+  format %{ "LoadN   $dst, $src \t// postalloc expanded" %} // mask
+  postalloc_expand %{
+    // Load high bits into register. Sign extended.
+    MachNode *m1 = new (C) loadConNKlass_hiNode();
+    m1->add_req(NULL);
+    m1->_opnds[0] = op_dst;
+    m1->_opnds[1] = op_src;
+    ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    nodes->push(m1);
+
+    MachNode *m2 = m1;
+    if (!Assembler::is_uimm((jlong)Klass::encode_klass((Klass *)op_src->constant()), 31)) {
+      // Value might be 1-extended. Mask out these bits.
+      m2 = new (C) clearMs32bNode();
+      m2->add_req(NULL, m1);
+      m2->_opnds[0] = op_dst;
+      m2->_opnds[1] = op_dst;
+      ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+      nodes->push(m2);
+    }
+
+    MachNode *m3 = new (C) loadConNKlass_loNode();
+    m3->add_req(NULL, m2);
+    m3->_opnds[0] = op_dst;
+    m3->_opnds[1] = op_src;
+    m3->_opnds[2] = op_dst;
+    ra_->set_pair(m3->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    nodes->push(m3);
+  %}
+%}
+
+// 0x1 is used in object initialization (initial object header).
+// No constant pool entries required.
+instruct loadConP0or1(iRegPdst dst, immP_0or1 src) %{
+  match(Set dst src);
+
+  format %{ "LI      $dst, $src \t// ptr" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Expand node for constant pool load: small offset.
+// The match rule is needed to generate the correct bottom_type(),
+// however this node should never match. The use of predicate is not
+// possible since ADLC forbids predicates for chain rules. The higher
+// costs do not prevent matching in this case. For that reason the
+// operand immP_NM with predicate(false) is used.
+instruct loadConP(iRegPdst dst, immP_NM src, iRegLdst toc) %{
+  match(Set dst src);
+  effect(TEMP toc);
+
+  ins_num_consts(1);
+
+  format %{ "LD      $dst, offset, $toc \t// load ptr $src from TOC" %}
+  size(4);
+  ins_encode( enc_load_long_constP(dst, src, toc) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Expand node for constant pool load: large offset.
+instruct loadConP_hi(iRegPdst dst, immP_NM src, iRegLdst toc) %{
+  effect(DEF dst, USE src, USE toc);
+  predicate(false);
+
+  ins_num_consts(1);
+  ins_field_const_toc_offset(int);
+
+  format %{ "ADDIS   $dst, $toc, offset \t// load ptr $src from TOC (hi)" %}
+  size(4);
+  ins_encode( enc_load_long_constP_hi(dst, src, toc) );
+  ins_pipe(pipe_class_default);
+%}
+
+// Expand node for constant pool load: large offset.
+instruct loadConP_lo(iRegPdst dst, immP_NM src, iRegLdst base) %{
+  match(Set dst src);
+  effect(TEMP base);
+
+  ins_field_const_toc_offset_hi_node(loadConP_hiNode*);
+
+  format %{ "LD      $dst, offset, $base \t// load ptr $src from TOC (lo)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ld);
+    int offset = ra_->C->in_scratch_emit_size() ? 0 : _const_toc_offset_hi_node->_const_toc_offset;
+    __ ld($dst$$Register, MacroAssembler::largeoffset_si16_si16_lo(offset), $base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load pointer constant from constant table. Expand in case an
+// offset > 16 bit is needed.
+// Adlc adds toc node MachConstantTableBase.
+instruct loadConP_Ex(iRegPdst dst, immP src) %{
+  match(Set dst src);
+  ins_cost(MEMORY_REF_COST);
+
+  // This rule does not use "expand" because then
+  // the result type is not known to be an Oop.  An ADLC
+  // enhancement will be needed to make that work - not worth it!
+
+  // If this instruction rematerializes, it prolongs the live range
+  // of the toc node, causing illegal graphs.
+  // assert(edge_from_to(_reg_node[reg_lo],def)) fails in verify_good_schedule().
+  ins_cannot_rematerialize(true);
+
+  format %{ "LD    $dst, offset, $constanttablebase \t//  load ptr $src from table, postalloc expanded" %}
+  postalloc_expand( postalloc_expand_load_ptr_constant(dst, src, constanttablebase) );
+%}
+
+// Expand node for constant pool load: small offset.
+instruct loadConF(regF dst, immF src, iRegLdst toc) %{
+  effect(DEF dst, USE src, USE toc);
+  ins_cost(MEMORY_REF_COST);
+
+  ins_num_consts(1);
+
+  format %{ "LFS     $dst, offset, $toc \t// load float $src from TOC" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lfs);
+    address float_address = __ float_constant($src$$constant);
+    __ lfs($dst$$FloatRegister, __ offset_to_method_toc(float_address), $toc$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Expand node for constant pool load: large offset.
+instruct loadConFComp(regF dst, immF src, iRegLdst toc) %{
+  effect(DEF dst, USE src, USE toc);
+  ins_cost(MEMORY_REF_COST);
+
+  ins_num_consts(1);
+
+  format %{ "ADDIS   $toc, $toc, offset_hi\n\t"
+            "LFS     $dst, offset_lo, $toc \t// load float $src from TOC (hi/lo)\n\t"
+            "ADDIS   $toc, $toc, -offset_hi"%}
+  size(12);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    FloatRegister Rdst    = $dst$$FloatRegister;
+    Register Rtoc         = $toc$$Register;
+    address float_address = __ float_constant($src$$constant);
+    int offset            = __ offset_to_method_toc(float_address);
+    int hi = (offset + (1<<15))>>16;
+    int lo = offset - hi * (1<<16);
+
+    __ addis(Rtoc, Rtoc, hi);
+    __ lfs(Rdst, lo, Rtoc);
+    __ addis(Rtoc, Rtoc, -hi);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Adlc adds toc node MachConstantTableBase.
+instruct loadConF_Ex(regF dst, immF src) %{
+  match(Set dst src);
+  ins_cost(MEMORY_REF_COST);
+
+  // See loadConP.
+  ins_cannot_rematerialize(true);
+
+  format %{ "LFS     $dst, offset, $constanttablebase \t// load $src from table, postalloc expanded" %}
+  postalloc_expand( postalloc_expand_load_float_constant(dst, src, constanttablebase) );
+%}
+
+// Expand node for constant pool load: small offset.
+instruct loadConD(regD dst, immD src, iRegLdst toc) %{
+  effect(DEF dst, USE src, USE toc);
+  ins_cost(MEMORY_REF_COST);
+
+  ins_num_consts(1);
+
+  format %{ "LFD     $dst, offset, $toc \t// load double $src from TOC" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lfd);
+    int offset =  __ offset_to_method_toc(__ double_constant($src$$constant));
+    __ lfd($dst$$FloatRegister, offset, $toc$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Expand node for constant pool load: large offset.
+instruct loadConDComp(regD dst, immD src, iRegLdst toc) %{
+  effect(DEF dst, USE src, USE toc);
+  ins_cost(MEMORY_REF_COST);
+
+  ins_num_consts(1);
+
+  format %{ "ADDIS   $toc, $toc, offset_hi\n\t"
+            "LFD     $dst, offset_lo, $toc \t// load double $src from TOC (hi/lo)\n\t"
+            "ADDIS   $toc, $toc, -offset_hi" %}
+  size(12);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    FloatRegister Rdst    = $dst$$FloatRegister;
+    Register      Rtoc    = $toc$$Register;
+    address float_address = __ double_constant($src$$constant);
+    int offset            = __ offset_to_method_toc(float_address);
+    int hi = (offset + (1<<15))>>16;
+    int lo = offset - hi * (1<<16);
+
+    __ addis(Rtoc, Rtoc, hi);
+    __ lfd(Rdst, lo, Rtoc);
+    __ addis(Rtoc, Rtoc, -hi);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Adlc adds toc node MachConstantTableBase.
+instruct loadConD_Ex(regD dst, immD src) %{
+  match(Set dst src);
+  ins_cost(MEMORY_REF_COST);
+
+  // See loadConP.
+  ins_cannot_rematerialize(true);
+
+  format %{ "ConD    $dst, offset, $constanttablebase \t// load $src from table, postalloc expanded" %}
+  postalloc_expand( postalloc_expand_load_double_constant(dst, src, constanttablebase) );
+%}
+
+// Prefetch instructions.
+// Must be safe to execute with invalid address (cannot fault).
+
+instruct prefetchr(indirectMemory mem, iRegLsrc src) %{
+  match(PrefetchRead (AddP mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "PREFETCH $mem, 0, $src \t// Prefetch read-many" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_dcbt);
+    __ dcbt($src$$Register, $mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct prefetchr_no_offset(indirectMemory mem) %{
+  match(PrefetchRead mem);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "PREFETCH $mem" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_dcbt);
+    __ dcbt($mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct prefetchw(indirectMemory mem, iRegLsrc src) %{
+  match(PrefetchWrite (AddP mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many (and read)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_dcbtst);
+    __ dcbtst($src$$Register, $mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct prefetchw_no_offset(indirectMemory mem) %{
+  match(PrefetchWrite mem);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "PREFETCH $mem" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_dcbtst);
+    __ dcbtst($mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Special prefetch versions which use the dcbz instruction.
+instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
+  match(PrefetchAllocation (AddP mem src));
+  predicate(AllocatePrefetchStyle == 3);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_dcbtst);
+    __ dcbz($src$$Register, $mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
+  match(PrefetchAllocation mem);
+  predicate(AllocatePrefetchStyle == 3);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_dcbtst);
+    __ dcbz($mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
+  match(PrefetchAllocation (AddP mem src));
+  predicate(AllocatePrefetchStyle != 3);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_dcbtst);
+    __ dcbtst($src$$Register, $mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+instruct prefetch_alloc_no_offset(indirectMemory mem) %{
+  match(PrefetchAllocation mem);
+  predicate(AllocatePrefetchStyle != 3);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_dcbtst);
+    __ dcbtst($mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+//----------Store Instructions-------------------------------------------------
+
+// Store Byte
+instruct storeB(memory mem, iRegIsrc src) %{
+  match(Set mem (StoreB mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STB     $src, $mem \t// byte" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_stb);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ stb($src$$Register, Idisp, $mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Char/Short
+instruct storeC(memory mem, iRegIsrc src) %{
+  match(Set mem (StoreC mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STH     $src, $mem \t// short" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_sth);
+    int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
+    __ sth($src$$Register, Idisp, $mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Integer
+instruct storeI(memory mem, iRegIsrc src) %{
+  match(Set mem (StoreI mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STW     $src, $mem" %}
+  size(4);
+  ins_encode( enc_stw(src, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// ConvL2I + StoreI.
+instruct storeI_convL2I(memory mem, iRegLsrc src) %{
+  match(Set mem (StoreI mem (ConvL2I src)));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STW     l2i($src), $mem" %}
+  size(4);
+  ins_encode( enc_stw(src, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Long
+instruct storeL(memoryAlg4 mem, iRegLsrc src) %{
+  match(Set mem (StoreL mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STD     $src, $mem \t// long" %}
+  size(4);
+  ins_encode( enc_std(src, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store super word nodes.
+
+// Store Aligned Packed Byte long register to memory
+instruct storeA8B(memoryAlg4 mem, iRegLsrc src) %{
+  predicate(n->as_StoreVector()->memory_size() == 8);
+  match(Set mem (StoreVector mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STD     $mem, $src \t// packed8B" %}
+  size(4);
+  ins_encode( enc_std(src, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Compressed Oop
+instruct storeN(memory dst, iRegN_P2N src) %{
+  match(Set dst (StoreN dst src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STW     $src, $dst \t// compressed oop" %}
+  size(4);
+  ins_encode( enc_stw(src, dst) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Compressed KLass
+instruct storeNKlass(memory dst, iRegN_P2N src) %{
+  match(Set dst (StoreNKlass dst src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STW     $src, $dst \t// compressed klass" %}
+  size(4);
+  ins_encode( enc_stw(src, dst) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Pointer
+instruct storeP(memoryAlg4 dst, iRegPsrc src) %{
+  match(Set dst (StoreP dst src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STD     $src, $dst \t// ptr" %}
+  size(4);
+  ins_encode( enc_std(src, dst) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Float
+instruct storeF(memory mem, regF src) %{
+  match(Set mem (StoreF mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STFS    $src, $mem" %}
+  size(4);
+  ins_encode( enc_stfs(src, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store Double
+instruct storeD(memory mem, regD src) %{
+  match(Set mem (StoreD mem src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STFD    $src, $mem" %}
+  size(4);
+  ins_encode( enc_stfd(src, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+//----------Store Instructions With Zeros--------------------------------------
+
+// Card-mark for CMS garbage collection.
+// This cardmark does an optimization so that it must not always
+// do a releasing store. For this, it gets the address of
+// CMSCollectorCardTableModRefBSExt::_requires_release as input.
+// (Using releaseFieldAddr in the match rule is a hack.)
+instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr) %{
+  match(Set mem (StoreCM mem releaseFieldAddr));
+  predicate(false);
+  ins_cost(MEMORY_REF_COST);
+
+  // See loadConP.
+  ins_cannot_rematerialize(true);
+
+  format %{ "STB     #0, $mem \t// CMS card-mark byte (must be 0!), checking requires_release in [$releaseFieldAddr]" %}
+  ins_encode( enc_cms_card_mark(mem, releaseFieldAddr) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Card-mark for CMS garbage collection.
+// This cardmark does an optimization so that it must not always
+// do a releasing store. For this, it needs the constant address of
+// CMSCollectorCardTableModRefBSExt::_requires_release.
+// This constant address is split off here by expand so we can use
+// adlc / matcher functionality to load it from the constant section.
+instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{
+  match(Set mem (StoreCM mem zero));
+  predicate(UseConcMarkSweepGC);
+
+  expand %{
+    immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableModRefBSExt::requires_release_address() */ %}
+    iRegLdst releaseFieldAddress;
+    loadConL_Ex(releaseFieldAddress, baseImm);
+    storeCM_CMS(mem, releaseFieldAddress);
+  %}
+%}
+
+instruct storeCM_G1(memory mem, immI_0 zero) %{
+  match(Set mem (StoreCM mem zero));
+  predicate(UseG1GC);
+  ins_cost(MEMORY_REF_COST);
+
+  ins_cannot_rematerialize(true);
+
+  format %{ "STB     #0, $mem \t// CMS card-mark byte store (G1)" %}
+  size(8);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ li(R0, 0);
+    //__ release(); // G1: oops are allowed to get visible after dirty marking
+    guarantee($mem$$base$$Register != R1_SP, "use frame_slots_bias");
+    __ stb(R0, $mem$$disp, $mem$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Convert oop pointer into compressed form.
+
+// Nodes for postalloc expand.
+
+// Shift node for expand.
+instruct encodeP_shift(iRegNdst dst, iRegNsrc src) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  match(Set dst (EncodeP src));
+  predicate(false);
+
+  format %{ "SRDI    $dst, $src, 3 \t// encode" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ srdi($dst$$Register, $src$$Register, Universe::narrow_oop_shift() & 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Add node for expand.
+instruct encodeP_sub(iRegPdst dst, iRegPdst src) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  match(Set dst (EncodeP src));
+  predicate(false);
+
+  format %{ "SUB     $dst, $src, oop_base \t// encode" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_subf);
+    __ subf($dst$$Register, R30, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Conditional sub base.
+instruct cond_sub_base(iRegNdst dst, flagsReg crx, iRegPsrc src1) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  match(Set dst (EncodeP (Binary crx src1)));
+  predicate(false);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "BEQ     $crx, done\n\t"
+            "SUB     $dst, $src1, R30 \t// encode: subtract base if != NULL\n"
+            "done:" %}
+  size(false /* TODO: PPC PORT (InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 12 : 8);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+    Label done;
+    __ beq($crx$$CondRegister, done);
+    __ subf($dst$$Register, R30, $src1$$Register);
+    // TODO PPC port __ endgroup_if_needed(_size == 12);
+    __ bind(done);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Power 7 can use isel instruction
+instruct cond_set_0_oop(iRegNdst dst, flagsReg crx, iRegPsrc src1) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  match(Set dst (EncodeP (Binary crx src1)));
+  predicate(false);
+
+  format %{ "CMOVE   $dst, $crx eq, 0, $src1 \t// encode: preserve 0" %}
+  size(4);
+  ins_encode %{
+    // This is a Power7 instruction for which no machine description exists.
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound); 
+    __ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// base != 0
+// 32G aligned narrow oop base.
+instruct encodeP_32GAligned(iRegNdst dst, iRegPsrc src) %{
+  match(Set dst (EncodeP src));
+  predicate(false /* TODO: PPC port Universe::narrow_oop_base_disjoint()*/);
+
+  format %{ "EXTRDI  $dst, $src, #32, #3 \t// encode with 32G aligned base" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_oop_shift(), 32);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// shift != 0, base != 0
+instruct encodeP_Ex(iRegNdst dst, flagsReg crx, iRegPsrc src) %{
+  match(Set dst (EncodeP src));
+  effect(TEMP crx);
+  predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull &&
+            Universe::narrow_oop_shift() != 0 &&
+            true /* TODO: PPC port Universe::narrow_oop_base_overlaps()*/);
+
+  format %{ "EncodeP $dst, $crx, $src \t// postalloc expanded" %}
+  postalloc_expand( postalloc_expand_encode_oop(dst, src, crx));
+%}
+
+// shift != 0, base != 0
+instruct encodeP_not_null_Ex(iRegNdst dst, iRegPsrc src) %{
+  match(Set dst (EncodeP src));
+  predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull &&
+            Universe::narrow_oop_shift() != 0 &&
+            true /* TODO: PPC port Universe::narrow_oop_base_overlaps()*/);
+
+  format %{ "EncodeP $dst, $src\t// $src != Null, postalloc expanded" %}
+  postalloc_expand( postalloc_expand_encode_oop_not_null(dst, src) );
+%}
+
+// shift != 0, base == 0
+// TODO: This is the same as encodeP_shift. Merge!
+instruct encodeP_not_null_base_null(iRegNdst dst, iRegPsrc src) %{
+  match(Set dst (EncodeP src));
+  predicate(Universe::narrow_oop_shift() != 0 &&
+            Universe::narrow_oop_base() ==0);
+
+  format %{ "SRDI    $dst, $src, #3 \t// encodeP, $src != NULL" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ srdi($dst$$Register, $src$$Register, Universe::narrow_oop_shift() & 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Compressed OOPs with narrow_oop_shift == 0.
+// shift == 0, base == 0
+instruct encodeP_narrow_oop_shift_0(iRegNdst dst, iRegPsrc src) %{
+  match(Set dst (EncodeP src));
+  predicate(Universe::narrow_oop_shift() == 0);
+
+  format %{ "MR      $dst, $src \t// Ptr->Narrow" %}
+  // variable size, 0 or 4.
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ mr_if_needed($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Decode nodes.
+
+// Shift node for expand.
+instruct decodeN_shift(iRegPdst dst, iRegPsrc src) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  match(Set dst (DecodeN src));
+  predicate(false);
+
+  format %{ "SLDI    $dst, $src, #3 \t// DecodeN" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
+    __ sldi($dst$$Register, $src$$Register, Universe::narrow_oop_shift());
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Add node for expand.
+instruct decodeN_add(iRegPdst dst, iRegPdst src) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  match(Set dst (DecodeN src));
+  predicate(false);
+
+  format %{ "ADD     $dst, $src, R30 \t// DecodeN, add oop base" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_add);
+    __ add($dst$$Register, $src$$Register, R30);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// conditianal add base for expand
+instruct cond_add_base(iRegPdst dst, flagsReg crx, iRegPsrc src1) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  // NOTICE that the rule is nonsense - we just have to make sure that:
+  //  - _matrule->_rChild->_opType == "DecodeN" (see InstructForm::captures_bottom_type() in formssel.cpp)
+  //  - we have to match 'crx' to avoid an "illegal USE of non-input: flagsReg crx" error in ADLC.
+  match(Set dst (DecodeN (Binary crx src1)));
+  predicate(false);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "BEQ     $crx, done\n\t"
+            "ADD     $dst, $src1, R30 \t// DecodeN: add oop base if $src1 != NULL\n"
+            "done:" %}
+  size(false /* TODO: PPC PORT (InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling()) */? 12 : 8);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+    Label done;
+    __ beq($crx$$CondRegister, done);
+    __ add($dst$$Register, $src1$$Register, R30);
+    // TODO PPC port  __ endgroup_if_needed(_size == 12);
+    __ bind(done);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cond_set_0_ptr(iRegPdst dst, flagsReg crx, iRegPsrc src1) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  // NOTICE that the rule is nonsense - we just have to make sure that:
+  //  - _matrule->_rChild->_opType == "DecodeN" (see InstructForm::captures_bottom_type() in formssel.cpp)
+  //  - we have to match 'crx' to avoid an "illegal USE of non-input: flagsReg crx" error in ADLC.
+  match(Set dst (DecodeN (Binary crx src1)));
+  predicate(false);
+
+  format %{ "CMOVE   $dst, $crx eq, 0, $src1 \t// decode: preserve 0" %}
+  size(4);
+  ins_encode %{
+    // This is a Power7 instruction for which no machine description exists.
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound); 
+    __ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+//  shift != 0, base != 0
+instruct decodeN_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
+  match(Set dst (DecodeN src));
+  predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
+             n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
+            Universe::narrow_oop_shift() != 0 &&
+            Universe::narrow_oop_base() != 0);
+  effect(TEMP crx);
+
+  format %{ "DecodeN $dst, $src \t// Kills $crx, postalloc expanded" %}
+  postalloc_expand( postalloc_expand_decode_oop(dst, src, crx) );
+%}
+
+// shift != 0, base == 0
+instruct decodeN_nullBase(iRegPdst dst, iRegNsrc src) %{
+  match(Set dst (DecodeN src));
+  predicate(Universe::narrow_oop_shift() != 0 &&
+            Universe::narrow_oop_base() == 0);
+
+  format %{ "SLDI    $dst, $src, #3 \t// DecodeN (zerobased)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
+    __ sldi($dst$$Register, $src$$Register, Universe::narrow_oop_shift());
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// src != 0, shift != 0, base != 0
+instruct decodeN_notNull_addBase_Ex(iRegPdst dst, iRegNsrc src) %{
+  match(Set dst (DecodeN src));
+  predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
+             n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
+            Universe::narrow_oop_shift() != 0 &&
+            Universe::narrow_oop_base() != 0);
+
+  format %{ "DecodeN $dst, $src \t// $src != NULL, postalloc expanded" %}
+  postalloc_expand( postalloc_expand_decode_oop_not_null(dst, src));
+%}
+
+// Compressed OOPs with narrow_oop_shift == 0.
+instruct decodeN_unscaled(iRegPdst dst, iRegNsrc src) %{
+  match(Set dst (DecodeN src));
+  predicate(Universe::narrow_oop_shift() == 0);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "MR      $dst, $src \t// DecodeN (unscaled)" %}
+  // variable size, 0 or 4.
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ mr_if_needed($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Convert compressed oop into int for vectors alignment masking.
+instruct decodeN2I_unscaled(iRegIdst dst, iRegNsrc src) %{
+  match(Set dst (ConvL2I (CastP2X (DecodeN src))));
+  predicate(Universe::narrow_oop_shift() == 0);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "MR      $dst, $src \t// (int)DecodeN (unscaled)" %}
+  // variable size, 0 or 4.
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ mr_if_needed($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Convert klass pointer into compressed form.
+
+// Nodes for postalloc expand.
+
+// Shift node for expand.
+instruct encodePKlass_shift(iRegNdst dst, iRegNsrc src) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  match(Set dst (EncodePKlass src));
+  predicate(false);
+
+  format %{ "SRDI    $dst, $src, 3 \t// encode" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ srdi($dst$$Register, $src$$Register, Universe::narrow_klass_shift());
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Add node for expand.
+instruct encodePKlass_sub_base(iRegPdst dst, iRegLsrc base, iRegPdst src) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  match(Set dst (EncodePKlass (Binary base src)));
+  predicate(false);
+
+  format %{ "SUB     $dst, $base, $src \t// encode" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_subf);
+    __ subf($dst$$Register, $base$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// base != 0
+// 32G aligned narrow oop base.
+instruct encodePKlass_32GAligned(iRegNdst dst, iRegPsrc src) %{
+  match(Set dst (EncodePKlass src));
+  predicate(false /* TODO: PPC port Universe::narrow_klass_base_disjoint()*/);
+
+  format %{ "EXTRDI  $dst, $src, #32, #3 \t// encode with 32G aligned base" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ rldicl($dst$$Register, $src$$Register, 64-Universe::narrow_oop_shift(), 32);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// shift != 0, base != 0
+instruct encodePKlass_not_null_Ex(iRegNdst dst, iRegLsrc base, iRegPsrc src) %{
+  match(Set dst (EncodePKlass (Binary base src)));
+  predicate(false);
+
+  format %{ "EncodePKlass $dst, $src\t// $src != Null, postalloc expanded" %}
+  postalloc_expand %{
+    encodePKlass_sub_baseNode *n1 = new (C) encodePKlass_sub_baseNode();
+    n1->add_req(n_region, n_base, n_src);
+    n1->_opnds[0] = op_dst;
+    n1->_opnds[1] = op_base;
+    n1->_opnds[2] = op_src;
+    n1->_bottom_type = _bottom_type;
+
+    encodePKlass_shiftNode *n2 = new (C) encodePKlass_shiftNode();
+    n2->add_req(n_region, n1);
+    n2->_opnds[0] = op_dst;
+    n2->_opnds[1] = op_dst;
+    n2->_bottom_type = _bottom_type;
+    ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+    nodes->push(n1);
+    nodes->push(n2);
+  %}
+%}
+
+// shift != 0, base != 0
+instruct encodePKlass_not_null_ExEx(iRegNdst dst, iRegPsrc src) %{
+  match(Set dst (EncodePKlass src));
+  //predicate(Universe::narrow_klass_shift() != 0 &&
+  //          true /* TODO: PPC port Universe::narrow_klass_base_overlaps()*/);
+
+  //format %{ "EncodePKlass $dst, $src\t// $src != Null, postalloc expanded" %}
+  ins_cost(DEFAULT_COST*2);  // Don't count constant.
+  expand %{
+    immL baseImm %{ (jlong)(intptr_t)Universe::narrow_klass_base() %}
+    iRegLdst base;
+    loadConL_Ex(base, baseImm);
+    encodePKlass_not_null_Ex(dst, base, src);
+  %}
+%}
+
+// Decode nodes.
+
+// Shift node for expand.
+instruct decodeNKlass_shift(iRegPdst dst, iRegPsrc src) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  match(Set dst (DecodeNKlass src));
+  predicate(false);
+
+  format %{ "SLDI    $dst, $src, #3 \t// DecodeNKlass" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
+    __ sldi($dst$$Register, $src$$Register, Universe::narrow_klass_shift());
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Add node for expand.
+
+instruct decodeNKlass_add_base(iRegPdst dst, iRegLsrc base, iRegPdst src) %{
+  // The match rule is needed to make it a 'MachTypeNode'!
+  match(Set dst (DecodeNKlass (Binary base src)));
+  predicate(false);
+
+  format %{ "ADD     $dst, $base, $src \t// DecodeNKlass, add klass base" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_add);
+    __ add($dst$$Register, $base$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// src != 0, shift != 0, base != 0
+instruct decodeNKlass_notNull_addBase_Ex(iRegPdst dst, iRegLsrc base, iRegNsrc src) %{
+  match(Set dst (DecodeNKlass (Binary base src)));
+  //effect(kill src); // We need a register for the immediate result after shifting.
+  predicate(false);
+
+  format %{ "DecodeNKlass $dst =  $base + ($src << 3) \t// $src != NULL, postalloc expanded" %}
+  postalloc_expand %{
+    decodeNKlass_add_baseNode *n1 = new (C) decodeNKlass_add_baseNode();
+    n1->add_req(n_region, n_base, n_src);
+    n1->_opnds[0] = op_dst;
+    n1->_opnds[1] = op_base;
+    n1->_opnds[2] = op_src;
+    n1->_bottom_type = _bottom_type;
+
+    decodeNKlass_shiftNode *n2 = new (C) decodeNKlass_shiftNode();
+    n2->add_req(n_region, n2);
+    n2->_opnds[0] = op_dst;
+    n2->_opnds[1] = op_dst;
+    n2->_bottom_type = _bottom_type;
+
+    ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+    ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
+
+    nodes->push(n1);
+    nodes->push(n2);
+  %}
+%}
+
+// src != 0, shift != 0, base != 0
+instruct decodeNKlass_notNull_addBase_ExEx(iRegPdst dst, iRegNsrc src) %{
+  match(Set dst (DecodeNKlass src));
+  // predicate(Universe::narrow_klass_shift() != 0 &&
+  //           Universe::narrow_klass_base() != 0);
+
+  //format %{ "DecodeNKlass $dst, $src \t// $src != NULL, expanded" %}
+
+  ins_cost(DEFAULT_COST*2);  // Don't count constant.
+  expand %{
+    // We add first, then we shift. Like this, we can get along with one register less.
+    // But we have to load the base pre-shifted.
+    immL baseImm %{ (jlong)((intptr_t)Universe::narrow_klass_base() >> Universe::narrow_klass_shift()) %}
+    iRegLdst base;
+    loadConL_Ex(base, baseImm);
+    decodeNKlass_notNull_addBase_Ex(dst, base, src);
+  %}
+%}
+
+//----------MemBar Instructions-----------------------------------------------
+// Memory barrier flavors
+
+instruct membar_acquire() %{
+  match(LoadFence);
+  ins_cost(4*MEMORY_REF_COST);
+
+  format %{ "MEMBAR-acquire" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lwsync);
+    __ acquire();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct unnecessary_membar_acquire() %{
+  match(MemBarAcquire);
+  ins_cost(0);
+
+  format %{ " -- \t// redundant MEMBAR-acquire - empty" %}
+  size(0);
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct membar_acquire_lock() %{
+  match(MemBarAcquireLock);
+  ins_cost(0);
+
+  format %{ " -- \t// redundant MEMBAR-acquire - empty (acquire as part of CAS in prior FastLock)" %}
+  size(0);
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct membar_release() %{
+  match(MemBarRelease);
+  match(StoreFence);
+  ins_cost(4*MEMORY_REF_COST);
+
+  format %{ "MEMBAR-release" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lwsync);
+    __ release();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct membar_storestore() %{
+  match(MemBarStoreStore);
+  ins_cost(4*MEMORY_REF_COST);
+
+  format %{ "MEMBAR-store-store" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lwsync);
+    __ membar(Assembler::StoreStore);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct membar_release_lock() %{
+  match(MemBarReleaseLock);
+  ins_cost(0);
+
+  format %{ " -- \t// redundant MEMBAR-release - empty (release in FastUnlock)" %}
+  size(0);
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct membar_volatile() %{
+  match(MemBarVolatile);
+  ins_cost(4*MEMORY_REF_COST);
+
+  format %{ "MEMBAR-volatile" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_sync);
+    __ fence();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// This optimization is wrong on PPC. The following pattern is not supported:
+//  MemBarVolatile
+//   ^        ^
+//   |        |
+//  CtrlProj MemProj
+//   ^        ^
+//   |        |
+//   |       Load
+//   |
+//  MemBarVolatile
+//
+//  The first MemBarVolatile could get optimized out! According to
+//  Vladimir, this pattern can not occur on Oracle platforms.
+//  However, it does occur on PPC64 (because of membars in
+//  inline_unsafe_load_store).
+//
+// Add this node again if we found a good solution for inline_unsafe_load_store().
+// Don't forget to look at the implementation of post_store_load_barrier again, 
+// we did other fixes in that method.
+//instruct unnecessary_membar_volatile() %{
+//  match(MemBarVolatile);
+//  predicate(Matcher::post_store_load_barrier(n));
+//  ins_cost(0);
+//
+//  format %{ " -- \t// redundant MEMBAR-volatile - empty" %}
+//  size(0);
+//  ins_encode( /*empty*/ );
+//  ins_pipe(pipe_class_default);
+//%}
+
+instruct membar_CPUOrder() %{
+  match(MemBarCPUOrder);
+  ins_cost(0);
+
+  format %{ " -- \t// MEMBAR-CPUOrder - empty: PPC64 processors are self-consistent." %}
+  size(0);
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Conditional Move---------------------------------------------------
+
+// Cmove using isel.
+instruct cmovI_reg_isel(cmpOp cmp, flagsReg crx, iRegIdst dst, iRegIsrc src) %{
+  match(Set dst (CMoveI (Binary cmp crx) (Binary dst src)));
+  predicate(VM_Version::has_isel());
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  size(4);
+  ins_encode %{
+    // This is a Power7 instruction for which no machine description
+    // exists. Anyways, the scheduler should be off on Power7.
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    int cc        = $cmp$$cmpcode;
+    __ isel($dst$$Register, $crx$$CondRegister, 
+            (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovI_reg(cmpOp cmp, flagsReg crx, iRegIdst dst, iRegIsrc src) %{
+  match(Set dst (CMoveI (Binary cmp crx) (Binary dst src)));
+  predicate(!VM_Version::has_isel());
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  // Worst case is branch + move + stop, no stop without scheduler
+  size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
+  ins_encode( enc_cmove_reg(dst, crx, src, cmp) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovI_imm(cmpOp cmp, flagsReg crx, iRegIdst dst, immI16 src) %{
+  match(Set dst (CMoveI (Binary cmp crx) (Binary dst src)));
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  // Worst case is branch + move + stop, no stop without scheduler
+  size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
+  ins_encode( enc_cmove_imm(dst, crx, src, cmp) );
+  ins_pipe(pipe_class_default);
+%}
+
+// Cmove using isel.
+instruct cmovL_reg_isel(cmpOp cmp, flagsReg crx, iRegLdst dst, iRegLsrc src) %{
+  match(Set dst (CMoveL (Binary cmp crx) (Binary dst src)));
+  predicate(VM_Version::has_isel());
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  size(4);
+  ins_encode %{
+    // This is a Power7 instruction for which no machine description
+    // exists. Anyways, the scheduler should be off on Power7.
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    int cc        = $cmp$$cmpcode;
+    __ isel($dst$$Register, $crx$$CondRegister, 
+            (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovL_reg(cmpOp cmp, flagsReg crx, iRegLdst dst, iRegLsrc src) %{
+  match(Set dst (CMoveL (Binary cmp crx) (Binary dst src)));
+  predicate(!VM_Version::has_isel());
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
+  ins_encode( enc_cmove_reg(dst, crx, src, cmp) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovL_imm(cmpOp cmp, flagsReg crx, iRegLdst dst, immL16 src) %{
+  match(Set dst (CMoveL (Binary cmp crx) (Binary dst src)));
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
+  ins_encode( enc_cmove_imm(dst, crx, src, cmp) );
+  ins_pipe(pipe_class_default);
+%}
+
+// Cmove using isel.
+instruct cmovN_reg_isel(cmpOp cmp, flagsReg crx, iRegNdst dst, iRegNsrc src) %{
+  match(Set dst (CMoveN (Binary cmp crx) (Binary dst src)));
+  predicate(VM_Version::has_isel());
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  size(4);
+  ins_encode %{
+    // This is a Power7 instruction for which no machine description
+    // exists. Anyways, the scheduler should be off on Power7.
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    int cc        = $cmp$$cmpcode;
+    __ isel($dst$$Register, $crx$$CondRegister, 
+            (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Conditional move for RegN. Only cmov(reg, reg).
+instruct cmovN_reg(cmpOp cmp, flagsReg crx, iRegNdst dst, iRegNsrc src) %{
+  match(Set dst (CMoveN (Binary cmp crx) (Binary dst src)));
+  predicate(!VM_Version::has_isel());
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
+  ins_encode( enc_cmove_reg(dst, crx, src, cmp) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovN_imm(cmpOp cmp, flagsReg crx, iRegNdst dst, immN_0 src) %{
+  match(Set dst (CMoveN (Binary cmp crx) (Binary dst src)));
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
+  ins_encode( enc_cmove_imm(dst, crx, src, cmp) );
+  ins_pipe(pipe_class_default);
+%}
+
+// Cmove using isel.
+instruct cmovP_reg_isel(cmpOp cmp, flagsReg crx, iRegPdst dst, iRegPsrc src) %{
+  match(Set dst (CMoveP (Binary cmp crx) (Binary dst src)));
+  predicate(VM_Version::has_isel());
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  size(4);
+  ins_encode %{
+    // This is a Power7 instruction for which no machine description
+    // exists. Anyways, the scheduler should be off on Power7.
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    int cc        = $cmp$$cmpcode;
+    __ isel($dst$$Register, $crx$$CondRegister, 
+            (Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovP_reg(cmpOp cmp, flagsReg crx, iRegPdst dst, iRegP_N2P src) %{
+  match(Set dst (CMoveP (Binary cmp crx) (Binary dst src)));
+  predicate(!VM_Version::has_isel());
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
+  ins_encode( enc_cmove_reg(dst, crx, src, cmp) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovP_imm(cmpOp cmp, flagsReg crx, iRegPdst dst, immP_0 src) %{
+  match(Set dst (CMoveP (Binary cmp crx) (Binary dst src)));
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVE   $cmp, $crx, $dst, $src\n\t" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
+  ins_encode( enc_cmove_imm(dst, crx, src, cmp) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovF_reg(cmpOp cmp, flagsReg crx, regF dst, regF src) %{
+  match(Set dst (CMoveF (Binary cmp crx) (Binary dst src)));
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVEF  $cmp, $crx, $dst, $src\n\t" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT (InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 12 : 8);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmovef);
+    Label done;
+    assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
+    // Branch if not (cmp crx).
+    __ bc(cc_to_inverse_boint($cmp$$cmpcode), cc_to_biint($cmp$$cmpcode, $crx$$reg), done);
+    __ fmr($dst$$FloatRegister, $src$$FloatRegister);
+    // TODO PPC port __ endgroup_if_needed(_size == 12);
+    __ bind(done);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovD_reg(cmpOp cmp, flagsReg crx, regD dst, regD src) %{
+  match(Set dst (CMoveD (Binary cmp crx) (Binary dst src)));
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVEF  $cmp, $crx, $dst, $src\n\t" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT (InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 12 : 8);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmovef);
+    Label done;
+    assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
+    // Branch if not (cmp crx).
+    __ bc(cc_to_inverse_boint($cmp$$cmpcode), cc_to_biint($cmp$$cmpcode, $crx$$reg), done);
+    __ fmr($dst$$FloatRegister, $src$$FloatRegister);
+    // TODO PPC port __ endgroup_if_needed(_size == 12);
+    __ bind(done);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Conditional_store--------------------------------------------------
+// Conditional-store of the updated heap-top.
+// Used during allocation of the shared heap.
+// Sets flags (EQ) on success. Implemented with a CASA on Sparc.
+
+// As compareAndSwapL, but return flag register instead of boolean value in
+// int register.
+// Used by sun/misc/AtomicLongCSImpl.java.
+// Mem_ptr must be a memory operand, else this node does not get
+// Flag_needs_anti_dependence_check set by adlc. If this is not set this node
+// can be rematerialized which leads to errors.
+instruct storeLConditional_regP_regL_regL(flagsReg crx, indirect mem_ptr, iRegLsrc oldVal, iRegLsrc newVal) %{
+  match(Set crx (StoreLConditional mem_ptr (Binary oldVal newVal)));
+  format %{ "CMPXCHGD if ($crx = ($oldVal == *$mem_ptr)) *mem_ptr = $newVal; as bool" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ cmpxchgd($crx$$CondRegister, R0, $oldVal$$Register, $newVal$$Register, $mem_ptr$$Register,
+                MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
+                noreg, NULL, true);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// As compareAndSwapP, but return flag register instead of boolean value in
+// int register.
+// This instruction is matched if UseTLAB is off.
+// Mem_ptr must be a memory operand, else this node does not get
+// Flag_needs_anti_dependence_check set by adlc. If this is not set this node
+// can be rematerialized which leads to errors.
+instruct storePConditional_regP_regP_regP(flagsReg crx, indirect mem_ptr, iRegPsrc oldVal, iRegPsrc newVal) %{
+  match(Set crx (StorePConditional mem_ptr (Binary oldVal newVal)));
+  format %{ "CMPXCHGD if ($crx = ($oldVal == *$mem_ptr)) *mem_ptr = $newVal; as bool" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ cmpxchgd($crx$$CondRegister, R0, $oldVal$$Register, $newVal$$Register, $mem_ptr$$Register,
+                MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
+                noreg, NULL, true);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Implement LoadPLocked. Must be ordered against changes of the memory location
+// by storePConditional.
+// Don't know whether this is ever used.
+instruct loadPLocked(iRegPdst dst, memory mem) %{
+  match(Set dst (LoadPLocked mem));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LD      $dst, $mem \t// loadPLocked\n\t"
+            "TWI     $dst\n\t"
+            "ISYNC" %}
+  size(12);
+  ins_encode( enc_ld_ac(dst, mem) );
+  ins_pipe(pipe_class_memory);
+%}
+
+//----------Compare-And-Swap---------------------------------------------------
+
+// CompareAndSwap{P,I,L} have more than one output, therefore "CmpI
+// (CompareAndSwap ...)" or "If (CmpI (CompareAndSwap ..))"  cannot be
+// matched.
+
+instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set res (CompareAndSwapI mem_ptr (Binary src1 src2)));
+  format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
+  // Variable size: instruction count smaller if regs are disjoint.
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
+    __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, 
+                MacroAssembler::MemBarFenceAfter, MacroAssembler::cmpxchgx_hint_atomic_update(), 
+                $res$$Register, true);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct compareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2) %{
+  match(Set res (CompareAndSwapN mem_ptr (Binary src1 src2)));
+  format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
+  // Variable size: instruction count smaller if regs are disjoint.
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
+    __ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
+                MacroAssembler::MemBarFenceAfter, MacroAssembler::cmpxchgx_hint_atomic_update(),
+                $res$$Register, true);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct compareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set res (CompareAndSwapL mem_ptr (Binary src1 src2)));
+  format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %}
+  // Variable size: instruction count smaller if regs are disjoint.
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
+    __ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
+                MacroAssembler::MemBarFenceAfter, MacroAssembler::cmpxchgx_hint_atomic_update(),
+                $res$$Register, NULL, true);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct compareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2) %{
+  match(Set res (CompareAndSwapP mem_ptr (Binary src1 src2)));
+  format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
+  // Variable size: instruction count smaller if regs are disjoint.
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    // CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
+    __ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
+                MacroAssembler::MemBarFenceAfter, MacroAssembler::cmpxchgx_hint_atomic_update(),
+                $res$$Register, NULL, true);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct getAndAddI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src) %{
+  match(Set res (GetAndAddI mem_ptr src));
+  format %{ "GetAndAddI $res, $mem_ptr, $src" %}
+  // Variable size: instruction count smaller if regs are disjoint.
+  ins_encode( enc_GetAndAddI(res, mem_ptr, src) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct getAndAddL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src) %{
+  match(Set res (GetAndAddL mem_ptr src));
+  format %{ "GetAndAddL $res, $mem_ptr, $src" %}
+  // Variable size: instruction count smaller if regs are disjoint.
+  ins_encode( enc_GetAndAddL(res, mem_ptr, src) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct getAndSetI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src) %{
+  match(Set res (GetAndSetI mem_ptr src));
+  format %{ "GetAndSetI $res, $mem_ptr, $src" %}
+  // Variable size: instruction count smaller if regs are disjoint.
+  ins_encode( enc_GetAndSetI(res, mem_ptr, src) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct getAndSetL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src) %{
+  match(Set res (GetAndSetL mem_ptr src));
+  format %{ "GetAndSetL $res, $mem_ptr, $src" %}
+  // Variable size: instruction count smaller if regs are disjoint.
+  ins_encode( enc_GetAndSetL(res, mem_ptr, src) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct getAndSetP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src) %{
+  match(Set res (GetAndSetP mem_ptr src));
+  format %{ "GetAndSetP $res, $mem_ptr, $src" %}
+  // Variable size: instruction count smaller if regs are disjoint.
+  ins_encode( enc_GetAndSetL(res, mem_ptr, src) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct getAndSetN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src) %{
+  match(Set res (GetAndSetN mem_ptr src));
+  format %{ "GetAndSetN $res, $mem_ptr, $src" %}
+  // Variable size: instruction count smaller if regs are disjoint.
+  ins_encode( enc_GetAndSetI(res, mem_ptr, src) );
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Arithmetic Instructions--------------------------------------------
+// Addition Instructions
+
+// Register Addition
+instruct addI_reg_reg(iRegIdst dst, iRegIsrc_iRegL2Isrc src1, iRegIsrc_iRegL2Isrc src2) %{
+  match(Set dst (AddI src1 src2));
+  format %{ "ADD     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_add);
+    __ add($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Expand does not work with above instruct. (??)
+instruct addI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  // no match-rule
+  effect(DEF dst, USE src1, USE src2);
+  format %{ "ADD     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_add);
+    __ add($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct tree_addI_addI_addI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{
+  match(Set dst (AddI (AddI (AddI src1 src2) src3) src4));
+  ins_cost(DEFAULT_COST*3);
+
+  expand %{
+    // FIXME: we should do this in the ideal world.
+    iRegIdst tmp1;
+    iRegIdst tmp2;
+    addI_reg_reg(tmp1, src1, src2);
+    addI_reg_reg_2(tmp2, src3, src4); // Adlc complains about addI_reg_reg.
+    addI_reg_reg(dst, tmp1, tmp2);
+  %}
+%}
+
+// Immediate Addition
+instruct addI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
+  match(Set dst (AddI src1 src2));
+  format %{ "ADDI    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ addi($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate Addition with 16-bit shifted operand
+instruct addI_reg_immhi16(iRegIdst dst, iRegIsrc src1, immIhi16 src2) %{
+  match(Set dst (AddI src1 src2));
+  format %{ "ADDIS   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addis);
+    __ addis($dst$$Register, $src1$$Register, ($src2$$constant)>>16);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Long Addition
+instruct addL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (AddL src1 src2));
+  format %{ "ADD     $dst, $src1, $src2 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_add);
+    __ add($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Expand does not work with above instruct. (??)
+instruct addL_reg_reg_2(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  // no match-rule
+  effect(DEF dst, USE src1, USE src2);
+  format %{ "ADD     $dst, $src1, $src2 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_add);
+    __ add($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct tree_addL_addL_addL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2, iRegLsrc src3, iRegLsrc src4) %{
+  match(Set dst (AddL (AddL (AddL src1 src2) src3) src4));
+  ins_cost(DEFAULT_COST*3);
+
+  expand %{
+    // FIXME: we should do this in the ideal world.
+    iRegLdst tmp1;
+    iRegLdst tmp2;
+    addL_reg_reg(tmp1, src1, src2);
+    addL_reg_reg_2(tmp2, src3, src4); // Adlc complains about orI_reg_reg.
+    addL_reg_reg(dst, tmp1, tmp2);
+  %}
+%}
+
+// AddL + ConvL2I.
+instruct addI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (ConvL2I (AddL src1 src2)));
+
+  format %{ "ADD     $dst, $src1, $src2 \t// long + l2i" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_add);
+    __ add($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// No constant pool entries required.
+instruct addL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
+  match(Set dst (AddL src1 src2));
+
+  format %{ "ADDI    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ addi($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Long Immediate Addition with 16-bit shifted operand.
+// No constant pool entries required.
+instruct addL_reg_immhi16(iRegLdst dst, iRegLsrc src1, immL32hi16 src2) %{
+  match(Set dst (AddL src1 src2));
+
+  format %{ "ADDIS   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addis);
+    __ addis($dst$$Register, $src1$$Register, ($src2$$constant)>>16);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Pointer Register Addition
+instruct addP_reg_reg(iRegPdst dst, iRegP_N2P src1, iRegLsrc src2) %{
+  match(Set dst (AddP src1 src2));
+  format %{ "ADD     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_add);
+    __ add($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Pointer Immediate Addition
+// No constant pool entries required.
+instruct addP_reg_imm16(iRegPdst dst, iRegP_N2P src1, immL16 src2) %{
+  match(Set dst (AddP src1 src2));
+
+  format %{ "ADDI    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ addi($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Pointer Immediate Addition with 16-bit shifted operand.
+// No constant pool entries required.
+instruct addP_reg_immhi16(iRegPdst dst, iRegP_N2P src1, immL32hi16 src2) %{
+  match(Set dst (AddP src1 src2));
+
+  format %{ "ADDIS   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addis);
+    __ addis($dst$$Register, $src1$$Register, ($src2$$constant)>>16);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+//---------------------
+// Subtraction Instructions
+
+// Register Subtraction
+instruct subI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (SubI src1 src2));
+  format %{ "SUBF    $dst, $src2, $src1" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_subf);
+    __ subf($dst$$Register, $src2$$Register, $src1$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate Subtraction
+// The compiler converts "x-c0" into "x+ -c0" (see SubINode::Ideal),
+// so this rule seems to be unused.
+instruct subI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
+  match(Set dst (SubI src1 src2));
+  format %{ "SUBI    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ addi($dst$$Register, $src1$$Register, ($src2$$constant) * (-1));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// SubI from constant (using subfic).
+instruct subI_imm16_reg(iRegIdst dst, immI16 src1, iRegIsrc src2) %{
+  match(Set dst (SubI src1 src2));
+  format %{ "SUBI    $dst, $src1, $src2" %}
+
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_subfic);
+    __ subfic($dst$$Register, $src2$$Register, $src1$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Turn the sign-bit of an integer into a 32-bit mask, 0x0...0 for
+// positive integers and 0xF...F for negative ones.
+instruct signmask32I_regI(iRegIdst dst, iRegIsrc src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "SRAWI   $dst, $src, #31" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_srawi);
+    __ srawi($dst$$Register, $src$$Register, 0x1f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct absI_reg_Ex(iRegIdst dst, iRegIsrc src) %{
+  match(Set dst (AbsI src));
+  ins_cost(DEFAULT_COST*3);
+
+  expand %{
+    iRegIdst tmp1;
+    iRegIdst tmp2;
+    signmask32I_regI(tmp1, src);
+    xorI_reg_reg(tmp2, tmp1, src);
+    subI_reg_reg(dst, tmp2, tmp1);
+  %}
+%}
+
+instruct negI_regI(iRegIdst dst, immI_0 zero, iRegIsrc src2) %{
+  match(Set dst (SubI zero src2));
+  format %{ "NEG     $dst, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_neg);
+    __ neg($dst$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Long subtraction
+instruct subL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (SubL src1 src2));
+  format %{ "SUBF    $dst, $src2, $src1 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_subf);
+    __ subf($dst$$Register, $src2$$Register, $src1$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// SubL + convL2I.
+instruct subI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (ConvL2I (SubL src1 src2)));
+
+  format %{ "SUBF    $dst, $src2, $src1 \t// long + l2i" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_subf);
+    __ subf($dst$$Register, $src2$$Register, $src1$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate Subtraction
+// The compiler converts "x-c0" into "x+ -c0" (see SubLNode::Ideal),
+// so this rule seems to be unused.
+// No constant pool entries required.
+instruct subL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
+  match(Set dst (SubL src1 src2));
+
+  format %{ "SUBI    $dst, $src1, $src2 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ addi($dst$$Register, $src1$$Register, ($src2$$constant) * (-1));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
+// positive longs and 0xF...F for negative ones.
+instruct signmask64I_regI(iRegIdst dst, iRegIsrc src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "SRADI   $dst, $src, #63" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_sradi);
+    __ sradi($dst$$Register, $src$$Register, 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Long negation
+instruct negL_reg_reg(iRegLdst dst, immL_0 zero, iRegLsrc src2) %{
+  match(Set dst (SubL zero src2));
+  format %{ "NEG     $dst, $src2 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_neg);
+    __ neg($dst$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// NegL + ConvL2I.
+instruct negI_con0_regL(iRegIdst dst, immL_0 zero, iRegLsrc src2) %{
+  match(Set dst (ConvL2I (SubL zero src2)));
+
+  format %{ "NEG     $dst, $src2 \t// long + l2i" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_neg);
+    __ neg($dst$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Multiplication Instructions
+// Integer Multiplication
+
+// Register Multiplication
+instruct mulI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (MulI src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "MULLW   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_mullw);
+    __ mullw($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate Multiplication
+instruct mulI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
+  match(Set dst (MulI src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "MULLI   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_mulli);
+    __ mulli($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct mulL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (MulL src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "MULLD   $dst $src1, $src2 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_mulld);
+    __ mulld($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Multiply high for optimized long division by constant.
+instruct mulHighL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (MulHiL src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "MULHD   $dst $src1, $src2 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_mulhd);
+    __ mulhd($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate Multiplication
+instruct mulL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
+  match(Set dst (MulL src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "MULLI   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_mulli);
+    __ mulli($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Integer Division with Immediate -1: Negate.
+instruct divI_reg_immIvalueMinus1(iRegIdst dst, iRegIsrc src1, immI_minus1 src2) %{
+  match(Set dst (DivI src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "NEG     $dst, $src1 \t// /-1" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_neg);
+    __ neg($dst$$Register, $src1$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Integer Division with constant, but not -1.
+// We should be able to improve this by checking the type of src2.
+// It might well be that src2 is known to be positive.
+instruct divI_reg_regnotMinus1(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (DivI src1 src2));
+  predicate(n->in(2)->find_int_con(-1) != -1); // src2 is a constant, but not -1
+  ins_cost(2*DEFAULT_COST);
+
+  format %{ "DIVW    $dst, $src1, $src2 \t// /not-1" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_divw);
+    __ divw($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovI_bne_negI_reg(iRegIdst dst, flagsReg crx, iRegIsrc src1) %{
+  effect(USE_DEF dst, USE src1, USE crx);
+  predicate(false);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVE   $dst, neg($src1), $crx" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT (InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 12 : 8);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+    Label done;
+    __ bne($crx$$CondRegister, done);
+    __ neg($dst$$Register, $src1$$Register);
+    // TODO PPC port __ endgroup_if_needed(_size == 12);
+    __ bind(done);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Integer Division with Registers not containing constants.
+instruct divI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (DivI src1 src2));
+  ins_cost(10*DEFAULT_COST);
+
+  expand %{
+    immI16 imm %{ (int)-1 %}
+    flagsReg tmp1;
+    cmpI_reg_imm16(tmp1, src2, imm);          // check src2 == -1
+    divI_reg_regnotMinus1(dst, src1, src2);   // dst = src1 / src2
+    cmovI_bne_negI_reg(dst, tmp1, src1);      // cmove dst = neg(src1) if src2 == -1
+  %}
+%}
+
+// Long Division with Immediate -1: Negate.
+instruct divL_reg_immLvalueMinus1(iRegLdst dst, iRegLsrc src1, immL_minus1 src2) %{
+  match(Set dst (DivL src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "NEG     $dst, $src1 \t// /-1, long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_neg);
+    __ neg($dst$$Register, $src1$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Long Division with constant, but not -1.
+instruct divL_reg_regnotMinus1(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (DivL src1 src2));
+  predicate(n->in(2)->find_long_con(-1L) != -1L); // Src2 is a constant, but not -1.
+  ins_cost(2*DEFAULT_COST);
+
+  format %{ "DIVD    $dst, $src1, $src2 \t// /not-1, long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_divd);
+    __ divd($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovL_bne_negL_reg(iRegLdst dst, flagsReg crx, iRegLsrc src1) %{
+  effect(USE_DEF dst, USE src1, USE crx);
+  predicate(false);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "CMOVE   $dst, neg($src1), $crx" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT (InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 12 : 8);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+    Label done;
+    __ bne($crx$$CondRegister, done);
+    __ neg($dst$$Register, $src1$$Register);
+    // TODO PPC port __ endgroup_if_needed(_size == 12);
+    __ bind(done);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Long Division with Registers not containing constants.
+instruct divL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (DivL src1 src2));
+  ins_cost(10*DEFAULT_COST);
+
+  expand %{
+    immL16 imm %{ (int)-1 %}
+    flagsReg tmp1;
+    cmpL_reg_imm16(tmp1, src2, imm);          // check src2 == -1
+    divL_reg_regnotMinus1(dst, src1, src2);   // dst = src1 / src2
+    cmovL_bne_negL_reg(dst, tmp1, src1);      // cmove dst = neg(src1) if src2 == -1
+  %}
+%}
+
+// Integer Remainder with registers.
+instruct modI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (ModI src1 src2));
+  ins_cost(10*DEFAULT_COST);
+
+  expand %{
+    immI16 imm %{ (int)-1 %}
+    flagsReg tmp1;
+    iRegIdst tmp2;
+    iRegIdst tmp3;
+    cmpI_reg_imm16(tmp1, src2, imm);           // check src2 == -1
+    divI_reg_regnotMinus1(tmp2, src1, src2);   // tmp2 = src1 / src2
+    cmovI_bne_negI_reg(tmp2, tmp1, src1);      // cmove tmp2 = neg(src1) if src2 == -1
+    mulI_reg_reg(tmp3, src2, tmp2);            // tmp3 = src2 * tmp2
+    subI_reg_reg(dst, src1, tmp3);             // dst = src1 - tmp3
+  %}
+%}
+
+// Long Remainder with registers
+instruct modL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
+  match(Set dst (ModL src1 src2));
+  ins_cost(10*DEFAULT_COST);
+
+  expand %{
+    immL16 imm %{ (int)-1 %}
+    flagsReg tmp1;
+    iRegLdst tmp2;
+    iRegLdst tmp3;
+    cmpL_reg_imm16(tmp1, src2, imm);             // check src2 == -1
+    divL_reg_regnotMinus1(tmp2, src1, src2);     // tmp2 = src1 / src2
+    cmovL_bne_negL_reg(tmp2, tmp1, src1);        // cmove tmp2 = neg(src1) if src2 == -1
+    mulL_reg_reg(tmp3, src2, tmp2);              // tmp3 = src2 * tmp2
+    subL_reg_reg(dst, src1, tmp3);               // dst = src1 - tmp3
+  %}
+%}
+
+// Integer Shift Instructions
+
+// Register Shift Left
+
+// Clear all but the lowest #mask bits.
+// Used to normalize shift amounts in registers.
+instruct maskI_reg_imm(iRegIdst dst, iRegIsrc src, uimmI6 mask) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src, USE mask);
+  predicate(false);
+
+  format %{ "MASK    $dst, $src, $mask \t// clear $mask upper bits" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ clrldi($dst$$Register, $src$$Register, $mask$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct lShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "SLW     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_slw);
+    __ slw($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct lShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (LShiftI src1 src2));
+  ins_cost(DEFAULT_COST*2);
+  expand %{
+    uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %}
+    iRegIdst tmpI;
+    maskI_reg_imm(tmpI, src2, mask);
+    lShiftI_reg_reg(dst, src1, tmpI);
+  %}
+%}
+
+// Register Shift Left Immediate
+instruct lShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{
+  match(Set dst (LShiftI src1 src2));
+
+  format %{ "SLWI    $dst, $src1, ($src2 & 0x1f)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm);
+    __ slwi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x1f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// AndI with negpow2-constant + LShiftI
+instruct lShiftI_andI_immInegpow2_imm5(iRegIdst dst, iRegIsrc src1, immInegpow2 src2, uimmI5 src3) %{
+  match(Set dst (LShiftI (AndI src1 src2) src3));
+  predicate(UseRotateAndMaskInstructionsPPC64);
+
+  format %{ "RLWINM  $dst, lShiftI(AndI($src1, $src2), $src3)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm); // FIXME: assert that rlwinm is equal to addi
+    long src2      = $src2$$constant;
+    long src3      = $src3$$constant;
+    long maskbits  = src3 + log2_long((jlong) (julong) (juint) -src2);
+    if (maskbits >= 32) {
+      __ li($dst$$Register, 0); // addi
+    } else {
+      __ rlwinm($dst$$Register, $src1$$Register, src3 & 0x1f, 0, (31-maskbits) & 0x1f);
+    }
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// RShiftI + AndI with negpow2-constant + LShiftI
+instruct lShiftI_andI_immInegpow2_rShiftI_imm5(iRegIdst dst, iRegIsrc src1, immInegpow2 src2, uimmI5 src3) %{
+  match(Set dst (LShiftI (AndI (RShiftI src1 src3) src2) src3));
+  predicate(UseRotateAndMaskInstructionsPPC64);
+
+  format %{ "RLWINM  $dst, lShiftI(AndI(RShiftI($src1, $src3), $src2), $src3)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm); // FIXME: assert that rlwinm is equal to addi
+    long src2      = $src2$$constant;
+    long src3      = $src3$$constant;
+    long maskbits  = src3 + log2_long((jlong) (julong) (juint) -src2);
+    if (maskbits >= 32) {
+      __ li($dst$$Register, 0); // addi
+    } else {
+      __ rlwinm($dst$$Register, $src1$$Register, 0, 0, (31-maskbits) & 0x1f);
+    }
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct lShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "SLD     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_sld);
+    __ sld($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Register Shift Left
+instruct lShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
+  match(Set dst (LShiftL src1 src2));
+  ins_cost(DEFAULT_COST*2);
+  expand %{
+    uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %}
+    iRegIdst tmpI;
+    maskI_reg_imm(tmpI, src2, mask);
+    lShiftL_regL_regI(dst, src1, tmpI);
+  %}
+%}
+
+// Register Shift Left Immediate
+instruct lshiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{
+  match(Set dst (LShiftL src1 src2));
+  format %{ "SLDI    $dst, $src1, ($src2 & 0x3f)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
+    __ sldi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// If we shift more than 32 bits, we need not convert I2L.
+instruct lShiftL_regI_immGE32(iRegLdst dst, iRegIsrc src1, uimmI6_ge32 src2) %{
+  match(Set dst (LShiftL (ConvI2L src1) src2));
+  ins_cost(DEFAULT_COST);
+
+  size(4);
+  format %{ "SLDI    $dst, i2l($src1), $src2" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
+    __ sldi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Shift a postivie int to the left.
+// Clrlsldi clears the upper 32 bits and shifts.
+instruct scaledPositiveI2L_lShiftL_convI2L_reg_imm6(iRegLdst dst, iRegIsrc src1, uimmI6 src2) %{
+  match(Set dst (LShiftL (ConvI2L src1) src2));
+  predicate(((ConvI2LNode*)(_kids[0]->_leaf))->type()->is_long()->is_positive_int());
+
+  format %{ "SLDI    $dst, i2l(positive_int($src1)), $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldic);
+    __ clrlsldi($dst$$Register, $src1$$Register, 0x20, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct arShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "SRAW    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_sraw);
+    __ sraw($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Register Arithmetic Shift Right
+instruct arShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (RShiftI src1 src2));
+  ins_cost(DEFAULT_COST*2);
+  expand %{
+    uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %}
+    iRegIdst tmpI;
+    maskI_reg_imm(tmpI, src2, mask);
+    arShiftI_reg_reg(dst, src1, tmpI);
+  %}
+%}
+
+// Register Arithmetic Shift Right Immediate
+instruct arShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{
+  match(Set dst (RShiftI src1 src2));
+
+  format %{ "SRAWI   $dst, $src1, ($src2 & 0x1f)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_srawi);
+    __ srawi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x1f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct arShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "SRAD    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_srad);
+    __ srad($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Register Shift Right Arithmetic Long
+instruct arShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
+  match(Set dst (RShiftL src1 src2));
+  ins_cost(DEFAULT_COST*2);
+
+  expand %{
+    uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %}
+    iRegIdst tmpI;
+    maskI_reg_imm(tmpI, src2, mask);
+    arShiftL_regL_regI(dst, src1, tmpI);
+  %}
+%}
+
+// Register Shift Right Immediate
+instruct arShiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{
+  match(Set dst (RShiftL src1 src2));
+
+  format %{ "SRADI   $dst, $src1, ($src2 & 0x3f)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_sradi);
+    __ sradi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// RShiftL + ConvL2I
+instruct convL2I_arShiftL_regL_immI(iRegIdst dst, iRegLsrc src1, immI src2) %{
+  match(Set dst (ConvL2I (RShiftL src1 src2)));
+
+  format %{ "SRADI   $dst, $src1, ($src2 & 0x3f) \t// long + l2i" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_sradi);
+    __ sradi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct urShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "SRW     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_srw);
+    __ srw($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Register Shift Right
+instruct urShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (URShiftI src1 src2));
+  ins_cost(DEFAULT_COST*2);
+
+  expand %{
+    uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %}
+    iRegIdst tmpI;
+    maskI_reg_imm(tmpI, src2, mask);
+    urShiftI_reg_reg(dst, src1, tmpI);
+  %}
+%}
+
+// Register Shift Right Immediate
+instruct urShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{
+  match(Set dst (URShiftI src1 src2));
+
+  format %{ "SRWI    $dst, $src1, ($src2 & 0x1f)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm);
+    __ srwi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x1f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct urShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "SRD     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_srd);
+    __ srd($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Register Shift Right
+instruct urShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
+  match(Set dst (URShiftL src1 src2));
+  ins_cost(DEFAULT_COST*2);
+
+  expand %{
+    uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %}
+    iRegIdst tmpI;
+    maskI_reg_imm(tmpI, src2, mask);
+    urShiftL_regL_regI(dst, src1, tmpI);
+  %}
+%}
+
+// Register Shift Right Immediate
+instruct urShiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{
+  match(Set dst (URShiftL src1 src2));
+
+  format %{ "SRDI    $dst, $src1, ($src2 & 0x3f)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ srdi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// URShiftL + ConvL2I.
+instruct convL2I_urShiftL_regL_immI(iRegIdst dst, iRegLsrc src1, immI src2) %{
+  match(Set dst (ConvL2I (URShiftL src1 src2)));
+
+  format %{ "SRDI    $dst, $src1, ($src2 & 0x3f) \t// long + l2i" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ srdi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Register Shift Right Immediate with a CastP2X
+instruct shrP_convP2X_reg_imm6(iRegLdst dst, iRegP_N2P src1, uimmI6 src2) %{
+  match(Set dst (URShiftL (CastP2X src1) src2));
+
+  format %{ "SRDI    $dst, $src1, $src2 \t// Cast ptr $src1 to long and shift" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ srdi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct sxtI_reg(iRegIdst dst, iRegIsrc src) %{
+  match(Set dst (ConvL2I (ConvI2L src)));
+
+  format %{ "EXTSW   $dst, $src \t// int->int" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_extsw);
+    __ extsw($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Rotate Instructions------------------------------------------------
+
+// Rotate Left by 8-bit immediate
+instruct rotlI_reg_immi8(iRegIdst dst, iRegIsrc src, immI8 lshift, immI8 rshift) %{
+  match(Set dst (OrI (LShiftI src lshift) (URShiftI src rshift)));
+  predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
+
+  format %{ "ROTLWI  $dst, $src, $lshift" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm);
+    __ rotlwi($dst$$Register, $src$$Register, $lshift$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Rotate Right by 8-bit immediate
+instruct rotrI_reg_immi8(iRegIdst dst, iRegIsrc src, immI8 rshift, immI8 lshift) %{
+  match(Set dst (OrI (URShiftI src rshift) (LShiftI src lshift)));
+  predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
+
+  format %{ "ROTRWI  $dst, $rshift" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm);
+    __ rotrwi($dst$$Register, $src$$Register, $rshift$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Floating Point Arithmetic Instructions-----------------------------
+
+// Add float single precision
+instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
+  match(Set dst (AddF src1 src2));
+
+  format %{ "FADDS   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fadds);
+    __ fadds($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Add float double precision
+instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
+  match(Set dst (AddD src1 src2));
+
+  format %{ "FADD    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fadd);
+    __ fadd($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Sub float single precision
+instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
+  match(Set dst (SubF src1 src2));
+
+  format %{ "FSUBS   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fsubs);
+    __ fsubs($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Sub float double precision
+instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
+  match(Set dst (SubD src1 src2));
+  format %{ "FSUB    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fsub);
+    __ fsub($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Mul float single precision
+instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
+  match(Set dst (MulF src1 src2));
+  format %{ "FMULS   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fmuls);
+    __ fmuls($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Mul float double precision
+instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
+  match(Set dst (MulD src1 src2));
+  format %{ "FMUL    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fmul);
+    __ fmul($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Div float single precision
+instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
+  match(Set dst (DivF src1 src2));
+  format %{ "FDIVS   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fdivs);
+    __ fdivs($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Div float double precision
+instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
+  match(Set dst (DivD src1 src2));
+  format %{ "FDIV    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fdiv);
+    __ fdiv($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Absolute float single precision
+instruct absF_reg(regF dst, regF src) %{
+  match(Set dst (AbsF src));
+  format %{ "FABS    $dst, $src \t// float" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fabs);
+    __ fabs($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Absolute float double precision
+instruct absD_reg(regD dst, regD src) %{
+  match(Set dst (AbsD src));
+  format %{ "FABS    $dst, $src \t// double" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fabs);
+    __ fabs($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct negF_reg(regF dst, regF src) %{
+  match(Set dst (NegF src));
+  format %{ "FNEG    $dst, $src \t// float" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fneg);
+    __ fneg($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct negD_reg(regD dst, regD src) %{
+  match(Set dst (NegD src));
+  format %{ "FNEG    $dst, $src \t// double" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fneg);
+    __ fneg($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// AbsF + NegF.
+instruct negF_absF_reg(regF dst, regF src) %{
+  match(Set dst (NegF (AbsF src)));
+  format %{ "FNABS   $dst, $src \t// float" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fnabs);
+    __ fnabs($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// AbsD + NegD.
+instruct negD_absD_reg(regD dst, regD src) %{
+  match(Set dst (NegD (AbsD src)));
+  format %{ "FNABS   $dst, $src \t// double" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fnabs);
+    __ fnabs($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// VM_Version::has_fsqrt() decides if this node will be used.
+// Sqrt float double precision
+instruct sqrtD_reg(regD dst, regD src) %{
+  match(Set dst (SqrtD src));
+  format %{ "FSQRT   $dst, $src" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fsqrt);
+    __ fsqrt($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Single-precision sqrt.
+instruct sqrtF_reg(regF dst, regF src) %{
+  match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "FSQRTS  $dst, $src" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fsqrts);
+    __ fsqrts($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct roundDouble_nop(regD dst) %{
+  match(Set dst (RoundDouble dst));
+  ins_cost(0);
+
+  format %{ " -- \t// RoundDouble not needed - empty" %}
+  size(0);
+  // PPC results are already "rounded" (i.e., normal-format IEEE).
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct roundFloat_nop(regF dst) %{
+  match(Set dst (RoundFloat dst));
+  ins_cost(0);
+
+  format %{ " -- \t// RoundFloat not needed - empty" %}
+  size(0);
+  // PPC results are already "rounded" (i.e., normal-format IEEE).
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Logical Instructions-----------------------------------------------
+
+// And Instructions
+
+// Register And
+instruct andI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (AndI src1 src2));
+  format %{ "AND     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_and);
+    __ andr($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate And
+instruct andI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2, flagsRegCR0 cr0) %{
+  match(Set dst (AndI src1 src2));
+  effect(KILL cr0);
+
+  format %{ "ANDI    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_andi_);
+    // FIXME: avoid andi_ ?
+    __ andi_($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate And where the immediate is a negative power of 2.
+instruct andI_reg_immInegpow2(iRegIdst dst, iRegIsrc src1, immInegpow2 src2) %{
+  match(Set dst (AndI src1 src2));
+  format %{ "ANDWI   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
+    __ clrrdi($dst$$Register, $src1$$Register, log2_long((jlong)(julong)(juint)-($src2$$constant)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct andI_reg_immIpow2minus1(iRegIdst dst, iRegIsrc src1, immIpow2minus1 src2) %{
+  match(Set dst (AndI src1 src2));
+  format %{ "ANDWI   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ clrldi($dst$$Register, $src1$$Register, 64-log2_long((((jlong) $src2$$constant)+1)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src1, immIpowerOf2 src2) %{
+  match(Set dst (AndI src1 src2));
+  predicate(UseRotateAndMaskInstructionsPPC64);
+  format %{ "ANDWI   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm);
+    __ rlwinm($dst$$Register, $src1$$Register, 0, 
+              (31-log2_long((jlong) $src2$$constant)) & 0x1f, (31-log2_long((jlong) $src2$$constant)) & 0x1f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Register And Long
+instruct andL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (AndL src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "AND     $dst, $src1, $src2 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_and);
+    __ andr($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate And long
+instruct andL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 src2, flagsRegCR0 cr0) %{
+  match(Set dst (AndL src1 src2));
+  effect(KILL cr0);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "ANDI    $dst, $src1, $src2 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_andi_);
+    // FIXME: avoid andi_ ?
+    __ andi_($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate And Long where the immediate is a negative power of 2.
+instruct andL_reg_immLnegpow2(iRegLdst dst, iRegLsrc src1, immLnegpow2 src2) %{
+  match(Set dst (AndL src1 src2));
+  format %{ "ANDDI   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
+    __ clrrdi($dst$$Register, $src1$$Register, log2_long((jlong)-$src2$$constant));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct andL_reg_immLpow2minus1(iRegLdst dst, iRegLsrc src1, immLpow2minus1 src2) %{
+  match(Set dst (AndL src1 src2));
+  format %{ "ANDDI   $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ clrldi($dst$$Register, $src1$$Register, 64-log2_long((((jlong) $src2$$constant)+1)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// AndL + ConvL2I.
+instruct convL2I_andL_reg_immLpow2minus1(iRegIdst dst, iRegLsrc src1, immLpow2minus1 src2) %{
+  match(Set dst (ConvL2I (AndL src1 src2)));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "ANDDI   $dst, $src1, $src2 \t// long + l2i" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ clrldi($dst$$Register, $src1$$Register, 64-log2_long((((jlong) $src2$$constant)+1)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Or Instructions
+
+// Register Or
+instruct orI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (OrI src1 src2));
+  format %{ "OR      $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Expand does not work with above instruct. (??)
+instruct orI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  // no match-rule
+  effect(DEF dst, USE src1, USE src2);
+  format %{ "OR      $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct tree_orI_orI_orI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{
+  match(Set dst (OrI (OrI (OrI src1 src2) src3) src4));
+  ins_cost(DEFAULT_COST*3);
+
+  expand %{
+    // FIXME: we should do this in the ideal world.
+    iRegIdst tmp1;
+    iRegIdst tmp2;
+    orI_reg_reg(tmp1, src1, src2);
+    orI_reg_reg_2(tmp2, src3, src4); // Adlc complains about orI_reg_reg.
+    orI_reg_reg(dst, tmp1, tmp2);
+  %}
+%}
+
+// Immediate Or
+instruct orI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2) %{
+  match(Set dst (OrI src1 src2));
+  format %{ "ORI     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ori);
+    __ ori($dst$$Register, $src1$$Register, ($src2$$constant) & 0xFFFF);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Register Or Long
+instruct orL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (OrL src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  size(4);
+  format %{ "OR      $dst, $src1, $src2 \t// long" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// OrL + ConvL2I.
+instruct orI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (ConvL2I (OrL src1 src2)));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "OR      $dst, $src1, $src2 \t// long + l2i" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate Or long
+instruct orL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 con) %{
+  match(Set dst (OrL src1 con));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "ORI     $dst, $src1, $con \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_ori);
+    __ ori($dst$$Register, $src1$$Register, ($con$$constant) & 0xFFFF);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Xor Instructions
+
+// Register Xor
+instruct xorI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (XorI src1 src2));
+  format %{ "XOR     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_xor);
+    __ xorr($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Expand does not work with above instruct. (??)
+instruct xorI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  // no match-rule
+  effect(DEF dst, USE src1, USE src2);
+  format %{ "XOR     $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_xor);
+    __ xorr($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct tree_xorI_xorI_xorI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{
+  match(Set dst (XorI (XorI (XorI src1 src2) src3) src4));
+  ins_cost(DEFAULT_COST*3);
+
+  expand %{
+    // FIXME: we should do this in the ideal world.
+    iRegIdst tmp1;
+    iRegIdst tmp2;
+    xorI_reg_reg(tmp1, src1, src2);
+    xorI_reg_reg_2(tmp2, src3, src4); // Adlc complains about xorI_reg_reg.
+    xorI_reg_reg(dst, tmp1, tmp2);
+  %}
+%}
+
+// Immediate Xor
+instruct xorI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2) %{
+  match(Set dst (XorI src1 src2));
+  format %{ "XORI    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_xori);
+    __ xori($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Register Xor Long
+instruct xorL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (XorL src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "XOR     $dst, $src1, $src2 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_xor);
+    __ xorr($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// XorL + ConvL2I.
+instruct xorI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (ConvL2I (XorL src1 src2)));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "XOR     $dst, $src1, $src2 \t// long + l2i" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_xor);
+    __ xorr($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Immediate Xor Long
+instruct xorL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 src2) %{
+  match(Set dst (XorL src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "XORI    $dst, $src1, $src2 \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_xori);
+    __ xori($dst$$Register, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct notI_reg(iRegIdst dst, iRegIsrc src1, immI_minus1 src2) %{
+  match(Set dst (XorI src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "NOT     $dst, $src1 ($src2)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_nor);
+    __ nor($dst$$Register, $src1$$Register, $src1$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct notL_reg(iRegLdst dst, iRegLsrc src1, immL_minus1 src2) %{
+  match(Set dst (XorL src1 src2));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "NOT     $dst, $src1 ($src2) \t// long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_nor);
+    __ nor($dst$$Register, $src1$$Register, $src1$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// And-complement
+instruct andcI_reg_reg(iRegIdst dst, iRegIsrc src1, immI_minus1 src2, iRegIsrc src3) %{
+  match(Set dst (AndI (XorI src1 src2) src3));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "ANDW    $dst, xori($src1, $src2), $src3" %}
+  size(4);
+  ins_encode( enc_andc(dst, src3, src1) );
+  ins_pipe(pipe_class_default);
+%}
+
+// And-complement
+instruct andcL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "ANDC    $dst, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_andc);
+    __ andc($dst$$Register, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Moves between int/long and float/double----------------------------
+//
+// The following rules move values from int/long registers/stack-locations
+// to float/double registers/stack-locations and vice versa, without doing any
+// conversions. These rules are used to implement the bit-conversion methods
+// of java.lang.Float etc., e.g.
+//   int   floatToIntBits(float value)
+//   float intBitsToFloat(int bits)
+//
+// Notes on the implementation on ppc64:
+// We only provide rules which move between a register and a stack-location,
+// because we always have to go through memory when moving between a float
+// register and an integer register.
+
+//---------- Chain stack slots between similar types --------
+
+// These are needed so that the rules below can match.
+
+// Load integer from stack slot
+instruct stkI_to_regI(iRegIdst dst, stackSlotI src) %{
+  match(Set dst src);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LWZ     $dst, $src" %}
+  size(4);
+  ins_encode( enc_lwz(dst, src) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store integer to stack slot
+instruct regI_to_stkI(stackSlotI dst, iRegIsrc src) %{
+  match(Set dst src);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STW     $src, $dst \t// stk" %}
+  size(4);
+  ins_encode( enc_stw(src, dst) ); // rs=rt
+  ins_pipe(pipe_class_memory);
+%}
+
+// Load long from stack slot
+instruct stkL_to_regL(iRegLdst dst, stackSlotL src) %{
+  match(Set dst src);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LD      $dst, $src \t// long" %}
+  size(4);
+  ins_encode( enc_ld(dst, src) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Store long to stack slot
+instruct regL_to_stkL(stackSlotL dst, iRegLsrc src) %{
+  match(Set dst src);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STD     $src, $dst \t// long" %}
+  size(4);
+  ins_encode( enc_std(src, dst) ); // rs=rt
+  ins_pipe(pipe_class_memory);
+%}
+
+//----------Moves between int and float
+
+// Move float value from float stack-location to integer register.
+instruct moveF2I_stack_reg(iRegIdst dst, stackSlotF src) %{
+  match(Set dst (MoveF2I src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LWZ     $dst, $src \t// MoveF2I" %}
+  size(4);
+  ins_encode( enc_lwz(dst, src) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Move float value from float register to integer stack-location.
+instruct moveF2I_reg_stack(stackSlotI dst, regF src) %{
+  match(Set dst (MoveF2I src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STFS    $src, $dst \t// MoveF2I" %}
+  size(4);
+  ins_encode( enc_stfs(src, dst) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Move integer value from integer stack-location to float register.
+instruct moveI2F_stack_reg(regF dst, stackSlotI src) %{
+  match(Set dst (MoveI2F src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LFS     $dst, $src \t// MoveI2F" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_lfs);
+    int Idisp = $src$$disp + frame_slots_bias($src$$base, ra_);
+    __ lfs($dst$$FloatRegister, Idisp, $src$$base$$Register);
+  %}
+  ins_pipe(pipe_class_memory);
+%}
+
+// Move integer value from integer register to float stack-location.
+instruct moveI2F_reg_stack(stackSlotF dst, iRegIsrc src) %{
+  match(Set dst (MoveI2F src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STW     $src, $dst \t// MoveI2F" %}
+  size(4);
+  ins_encode( enc_stw(src, dst) );
+  ins_pipe(pipe_class_memory);
+%}
+
+//----------Moves between long and float
+
+instruct moveF2L_reg_stack(stackSlotL dst, regF src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "storeD  $src, $dst \t// STACK" %}
+  size(4);
+  ins_encode( enc_stfd(src, dst) );
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Moves between long and double
+
+// Move double value from double stack-location to long register.
+instruct moveD2L_stack_reg(iRegLdst dst, stackSlotD src) %{
+  match(Set dst (MoveD2L src));
+  ins_cost(MEMORY_REF_COST);
+  size(4);
+  format %{ "LD      $dst, $src \t// MoveD2L" %}
+  ins_encode( enc_ld(dst, src) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Move double value from double register to long stack-location.
+instruct moveD2L_reg_stack(stackSlotL dst, regD src) %{
+  match(Set dst (MoveD2L src));
+  effect(DEF dst, USE src);
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STFD    $src, $dst \t// MoveD2L" %}
+  size(4);
+  ins_encode( enc_stfd(src, dst) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Move long value from long stack-location to double register.
+instruct moveL2D_stack_reg(regD dst, stackSlotL src) %{
+  match(Set dst (MoveL2D src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "LFD     $dst, $src \t// MoveL2D" %}
+  size(4);
+  ins_encode( enc_lfd(dst, src) );
+  ins_pipe(pipe_class_memory);
+%}
+
+// Move long value from long register to double stack-location.
+instruct moveL2D_reg_stack(stackSlotD dst, iRegLsrc src) %{
+  match(Set dst (MoveL2D src));
+  ins_cost(MEMORY_REF_COST);
+
+  format %{ "STD     $src, $dst \t// MoveL2D" %}
+  size(4);
+  ins_encode( enc_std(src, dst) );
+  ins_pipe(pipe_class_memory);
+%}
+
+//----------Register Move Instructions-----------------------------------------
+
+// Replicate for Superword
+
+instruct moveReg(iRegLdst dst, iRegIsrc src) %{
+  predicate(false);
+  effect(DEF dst, USE src);
+
+  format %{ "MR      $dst, $src \t// replicate " %}
+  // variable size, 0 or 4.
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ mr_if_needed($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Cast instructions (Java-level type cast)---------------------------
+
+// Cast Long to Pointer for unsafe natives.
+instruct castX2P(iRegPdst dst, iRegLsrc src) %{
+  match(Set dst (CastX2P src));
+
+  format %{ "MR      $dst, $src \t// Long->Ptr" %}
+  // variable size, 0 or 4.
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ mr_if_needed($dst$$Register, $src$$Register);
+  %}
+ ins_pipe(pipe_class_default);
+%}
+
+// Cast Pointer to Long for unsafe natives.
+instruct castP2X(iRegLdst dst, iRegP_N2P src) %{
+  match(Set dst (CastP2X src));
+
+  format %{ "MR      $dst, $src \t// Ptr->Long" %}
+  // variable size, 0 or 4.
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ mr_if_needed($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct castPP(iRegPdst dst) %{
+  match(Set dst (CastPP dst));
+  format %{ " -- \t// castPP of $dst" %}
+  size(0);
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct castII(iRegIdst dst) %{
+  match(Set dst (CastII dst));
+  format %{ " -- \t// castII of $dst" %}
+  size(0);
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct checkCastPP(iRegPdst dst) %{
+  match(Set dst (CheckCastPP dst));
+  format %{ " -- \t// checkcastPP of $dst" %}
+  size(0);
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Convert instructions-----------------------------------------------
+
+// Convert to boolean.
+
+// int_to_bool(src) : { 1   if src != 0
+//                    { 0   else
+//
+// strategy:
+// 1) Count leading zeros of 32 bit-value src,
+//    this returns 32 (0b10.0000) iff src == 0 and <32 otherwise.
+// 2) Shift 5 bits to the right, result is 0b1 iff src == 0, 0b0 otherwise.
+// 3) Xori the result to get 0b1 if src != 0 and 0b0 if src == 0.
+
+// convI2Bool
+instruct convI2Bool_reg__cntlz_Ex(iRegIdst dst, iRegIsrc src) %{
+  match(Set dst (Conv2B src));
+  predicate(UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    immI shiftAmount %{ 0x5 %}
+    uimmI16 mask %{ 0x1 %}
+    iRegIdst tmp1;
+    iRegIdst tmp2;
+    countLeadingZerosI(tmp1, src);
+    urShiftI_reg_imm(tmp2, tmp1, shiftAmount);
+    xorI_reg_uimm16(dst, tmp2, mask);
+  %}
+%}
+
+instruct convI2Bool_reg__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx) %{
+  match(Set dst (Conv2B src));
+  effect(TEMP crx);
+  predicate(!UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CMPWI   $crx, $src, #0 \t// convI2B"
+            "LI      $dst, #0\n\t"
+            "BEQ     $crx, done\n\t"
+            "LI      $dst, #1\n"
+            "done:" %}
+  size(16);
+  ins_encode( enc_convI2B_regI__cmove(dst, src, crx, 0x0, 0x1) );
+  ins_pipe(pipe_class_compare);
+%}
+
+// ConvI2B + XorI
+instruct xorI_convI2Bool_reg_immIvalue1__cntlz_Ex(iRegIdst dst, iRegIsrc src, immI_1 mask) %{
+  match(Set dst (XorI (Conv2B src) mask));
+  predicate(UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    immI shiftAmount %{ 0x5 %}
+    iRegIdst tmp1;
+    countLeadingZerosI(tmp1, src);
+    urShiftI_reg_imm(dst, tmp1, shiftAmount);
+  %}
+%}
+
+instruct xorI_convI2Bool_reg_immIvalue1__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx, immI_1 mask) %{
+  match(Set dst (XorI (Conv2B src) mask));
+  effect(TEMP crx);
+  predicate(!UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CMPWI   $crx, $src, #0 \t// Xor(convI2B($src), $mask)"
+            "LI      $dst, #1\n\t"
+            "BEQ     $crx, done\n\t"
+            "LI      $dst, #0\n"
+            "done:" %}
+  size(16);
+  ins_encode( enc_convI2B_regI__cmove(dst, src, crx, 0x1, 0x0) );
+  ins_pipe(pipe_class_compare);
+%}
+
+// AndI 0b0..010..0 + ConvI2B
+instruct convI2Bool_andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src, immIpowerOf2 mask) %{
+  match(Set dst (Conv2B (AndI src mask)));
+  predicate(UseRotateAndMaskInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "RLWINM  $dst, $src, $mask \t// convI2B(AndI($src, $mask))" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rlwinm);
+    __ rlwinm($dst$$Register, $src$$Register, (32-log2_long((jlong)$mask$$constant)) & 0x1f, 31, 31);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Convert pointer to boolean.
+//
+// ptr_to_bool(src) : { 1   if src != 0
+//                    { 0   else
+//
+// strategy:
+// 1) Count leading zeros of 64 bit-value src,
+//    this returns 64 (0b100.0000) iff src == 0 and <64 otherwise.
+// 2) Shift 6 bits to the right, result is 0b1 iff src == 0, 0b0 otherwise.
+// 3) Xori the result to get 0b1 if src != 0 and 0b0 if src == 0.
+
+// ConvP2B
+instruct convP2Bool_reg__cntlz_Ex(iRegIdst dst, iRegP_N2P src) %{
+  match(Set dst (Conv2B src));
+  predicate(UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    immI shiftAmount %{ 0x6 %}
+    uimmI16 mask %{ 0x1 %}
+    iRegIdst tmp1;
+    iRegIdst tmp2;
+    countLeadingZerosP(tmp1, src);
+    urShiftI_reg_imm(tmp2, tmp1, shiftAmount);
+    xorI_reg_uimm16(dst, tmp2, mask);
+  %}
+%}
+
+instruct convP2Bool_reg__cmove(iRegIdst dst, iRegP_N2P src, flagsReg crx) %{
+  match(Set dst (Conv2B src));
+  effect(TEMP crx);
+  predicate(!UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CMPDI   $crx, $src, #0 \t// convP2B"
+            "LI      $dst, #0\n\t"
+            "BEQ     $crx, done\n\t"
+            "LI      $dst, #1\n"
+            "done:" %}
+  size(16);
+  ins_encode( enc_convP2B_regP__cmove(dst, src, crx, 0x0, 0x1) );
+  ins_pipe(pipe_class_compare);
+%}
+
+// ConvP2B + XorI
+instruct xorI_convP2Bool_reg__cntlz_Ex(iRegIdst dst, iRegP_N2P src, immI_1 mask) %{
+  match(Set dst (XorI (Conv2B src) mask));
+  predicate(UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    immI shiftAmount %{ 0x6 %}
+    iRegIdst tmp1;
+    countLeadingZerosP(tmp1, src);
+    urShiftI_reg_imm(dst, tmp1, shiftAmount);
+  %}
+%}
+
+instruct xorI_convP2Bool_reg_immIvalue1__cmove(iRegIdst dst, iRegP_N2P src, flagsReg crx, immI_1 mask) %{
+  match(Set dst (XorI (Conv2B src) mask));
+  effect(TEMP crx);
+  predicate(!UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CMPDI   $crx, $src, #0 \t// XorI(convP2B($src), $mask)"
+            "LI      $dst, #1\n\t"
+            "BEQ     $crx, done\n\t"
+            "LI      $dst, #0\n"
+            "done:" %}
+  size(16);
+  ins_encode( enc_convP2B_regP__cmove(dst, src, crx, 0x1, 0x0) );
+  ins_pipe(pipe_class_compare);
+%}
+
+// if src1 < src2, return -1 else return 0
+instruct cmpLTMask_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (CmpLTMask src1 src2));
+  ins_cost(DEFAULT_COST*4);
+
+  expand %{
+    iRegIdst src1s;
+    iRegIdst src2s;
+    iRegIdst diff;
+    sxtI_reg(src1s, src1); // ensure proper sign extention
+    sxtI_reg(src2s, src2); // ensure proper sign extention
+    subI_reg_reg(diff, src1s, src2s);
+    // Need to consider >=33 bit result, therefore we need signmaskL.
+    signmask64I_regI(dst, diff);
+  %}
+%}
+
+instruct cmpLTMask_reg_immI0(iRegIdst dst, iRegIsrc src1, immI_0 src2) %{
+  match(Set dst (CmpLTMask src1 src2)); // if src1 < src2, return -1 else return 0
+  format %{ "SRAWI   $dst, $src1, $src2 \t// CmpLTMask" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_srawi);
+    __ srawi($dst$$Register, $src1$$Register, 0x1f);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Arithmetic Conversion Instructions---------------------------------
+
+// Convert to Byte  -- nop
+// Convert to Short -- nop
+
+// Convert to Int
+
+instruct convB2I_reg(iRegIdst dst, iRegIsrc src, immI_24 amount) %{
+  match(Set dst (RShiftI (LShiftI src amount) amount));
+  format %{ "EXTSB   $dst, $src \t// byte->int" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_extsb);
+    __ extsb($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// LShiftI 16 + RShiftI 16 converts short to int.
+instruct convS2I_reg(iRegIdst dst, iRegIsrc src, immI_16 amount) %{
+  match(Set dst (RShiftI (LShiftI src amount) amount));
+  format %{ "EXTSH   $dst, $src \t// short->int" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_extsh);
+    __ extsh($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// ConvL2I + ConvI2L: Sign extend int in long register.
+instruct sxtI_L2L_reg(iRegLdst dst, iRegLsrc src) %{
+  match(Set dst (ConvI2L (ConvL2I src)));
+
+  format %{ "EXTSW   $dst, $src \t// long->long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_extsw);
+    __ extsw($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct convL2I_reg(iRegIdst dst, iRegLsrc src) %{
+  match(Set dst (ConvL2I src));
+  format %{ "MR      $dst, $src \t// long->int" %}
+  // variable size, 0 or 4
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_or);
+    __ mr_if_needed($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct convD2IRaw_regD(regD dst, regD src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "FCTIWZ $dst, $src \t// convD2I, $src != NaN" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fctiwz);;
+    __ fctiwz($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsReg crx, stackSlotL src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE crx, USE src);
+  predicate(false);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "cmovI   $crx, $dst, $src" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT(InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 12 : 8);
+  ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovI_bso_stackSlotL_conLvalue0_Ex(iRegIdst dst, flagsReg crx, stackSlotL mem) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE crx, USE mem);
+  predicate(false);
+
+  format %{ "CmovI   $dst, $crx, $mem \t// postalloc expanded" %}
+  postalloc_expand %{
+    //
+    // replaces
+    //
+    //   region  dst  crx  mem
+    //    \       |    |   /
+    //     dst=cmovI_bso_stackSlotL_conLvalue0
+    //
+    // with
+    //
+    //   region  dst
+    //    \       /
+    //     dst=loadConI16(0)
+    //      |
+    //      ^  region  dst  crx  mem
+    //      |   \       |    |    /
+    //      dst=cmovI_bso_stackSlotL
+    //
+
+    // Create new nodes.
+    MachNode *m1 = new (C) loadConI16Node();
+    MachNode *m2 = new (C) cmovI_bso_stackSlotLNode();
+
+    // inputs for new nodes
+    m1->add_req(n_region);
+    m2->add_req(n_region, n_crx, n_mem);
+
+    // precedences for new nodes
+    m2->add_prec(m1);
+
+    // operands for new nodes
+    m1->_opnds[0] = op_dst;
+    m1->_opnds[1] = new (C) immI16Oper(0);
+
+    m2->_opnds[0] = op_dst;
+    m2->_opnds[1] = op_crx;
+    m2->_opnds[2] = op_mem;
+
+    // registers for new nodes
+    ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
+    ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
+
+    // Insert new nodes.
+    nodes->push(m1);
+    nodes->push(m2);
+  %}
+%}
+
+// Double to Int conversion, NaN is mapped to 0.
+instruct convD2I_reg_ExEx(iRegIdst dst, regD src) %{
+  match(Set dst (ConvD2I src));
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    regD tmpD;
+    stackSlotL tmpS;
+    flagsReg crx;
+    cmpDUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
+    convD2IRaw_regD(tmpD, src);                         // Convert float to int (speculated).
+    moveD2L_reg_stack(tmpS, tmpD);                      // Store float to stack (speculated).
+    cmovI_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check.
+  %}
+%}
+
+instruct convF2IRaw_regF(regF dst, regF src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "FCTIWZ $dst, $src \t// convF2I, $src != NaN" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fctiwz);
+    __ fctiwz($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Float to Int conversion, NaN is mapped to 0.
+instruct convF2I_regF_ExEx(iRegIdst dst, regF src) %{
+  match(Set dst (ConvF2I src));
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    regF tmpF;
+    stackSlotL tmpS;
+    flagsReg crx;
+    cmpFUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
+    convF2IRaw_regF(tmpF, src);                         // Convert float to int (speculated).
+    moveF2L_reg_stack(tmpS, tmpF);                      // Store float to stack (speculated).
+    cmovI_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check.
+  %}
+%}
+
+// Convert to Long
+
+instruct convI2L_reg(iRegLdst dst, iRegIsrc src) %{
+  match(Set dst (ConvI2L src));
+  format %{ "EXTSW   $dst, $src \t// int->long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_extsw);
+    __ extsw($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Zero-extend: convert unsigned int to long (convUI2L).
+instruct zeroExtendL_regI(iRegLdst dst, iRegIsrc src, immL_32bits mask) %{
+  match(Set dst (AndL (ConvI2L src) mask));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CLRLDI  $dst, $src, #32 \t// zero-extend int to long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ clrldi($dst$$Register, $src$$Register, 32);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Zero-extend: convert unsigned int to long in long register.
+instruct zeroExtendL_regL(iRegLdst dst, iRegLsrc src, immL_32bits mask) %{
+  match(Set dst (AndL src mask));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CLRLDI  $dst, $src, #32 \t// zero-extend int to long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicl);
+    __ clrldi($dst$$Register, $src$$Register, 32);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct convF2LRaw_regF(regF dst, regF src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "FCTIDZ $dst, $src \t// convF2L, $src != NaN" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fctiwz);
+    __ fctidz($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsReg crx, stackSlotL src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE crx, USE src);
+  predicate(false);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "cmovL   $crx, $dst, $src" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT Compile::current()->do_hb_scheduling()*/ ? 12 : 8);
+  ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmovL_bso_stackSlotL_conLvalue0_Ex(iRegLdst dst, flagsReg crx, stackSlotL mem) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE crx, USE mem);
+  predicate(false);
+
+  format %{ "CmovL   $dst, $crx, $mem \t// postalloc expanded" %}
+  postalloc_expand %{
+    //
+    // replaces
+    //
+    //   region  dst  crx  mem
+    //    \       |    |   /
+    //     dst=cmovL_bso_stackSlotL_conLvalue0
+    //
+    // with
+    //
+    //   region  dst
+    //    \       /
+    //     dst=loadConL16(0)
+    //      |
+    //      ^  region  dst  crx  mem
+    //      |   \       |    |    /
+    //      dst=cmovL_bso_stackSlotL
+    //
+
+    // Create new nodes.
+    MachNode *m1 = new (C) loadConL16Node();
+    MachNode *m2 = new (C) cmovL_bso_stackSlotLNode();
+
+    // inputs for new nodes
+    m1->add_req(n_region);
+    m2->add_req(n_region, n_crx, n_mem);
+    m2->add_prec(m1);
+
+    // operands for new nodes
+    m1->_opnds[0] = op_dst;
+    m1->_opnds[1] = new (C) immL16Oper(0);
+    m2->_opnds[0] = op_dst;
+    m2->_opnds[1] = op_crx;
+    m2->_opnds[2] = op_mem;
+
+    // registers for new nodes
+    ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
+    ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
+
+    // Insert new nodes.
+    nodes->push(m1);
+    nodes->push(m2);
+  %}
+%}
+
+// Float to Long conversion, NaN is mapped to 0.
+instruct convF2L_reg_ExEx(iRegLdst dst, regF src) %{
+  match(Set dst (ConvF2L src));
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    regF tmpF;
+    stackSlotL tmpS;
+    flagsReg crx;
+    cmpFUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
+    convF2LRaw_regF(tmpF, src);                         // Convert float to long (speculated).
+    moveF2L_reg_stack(tmpS, tmpF);                      // Store float to stack (speculated).
+    cmovL_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check.
+  %}
+%}
+
+instruct convD2LRaw_regD(regD dst, regD src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "FCTIDZ $dst, $src \t// convD2L $src != NaN" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fctiwz);
+    __ fctidz($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Double to Long conversion, NaN is mapped to 0.
+instruct convD2L_reg_ExEx(iRegLdst dst, regD src) %{
+  match(Set dst (ConvD2L src));
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    regD tmpD;
+    stackSlotL tmpS;
+    flagsReg crx;
+    cmpDUnordered_reg_reg(crx, src, src);               // Check whether src is NaN.
+    convD2LRaw_regD(tmpD, src);                         // Convert float to long (speculated).
+    moveD2L_reg_stack(tmpS, tmpD);                      // Store float to stack (speculated).
+    cmovL_bso_stackSlotL_conLvalue0_Ex(dst, crx, tmpS); // Cmove based on NaN check.
+  %}
+%}
+
+// Convert to Float
+
+// Placed here as needed in expand.
+instruct convL2DRaw_regD(regD dst, regD src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "FCFID $dst, $src \t// convL2D" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fcfid);
+    __ fcfid($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Placed here as needed in expand.
+instruct convD2F_reg(regF dst, regD src) %{
+  match(Set dst (ConvD2F src));
+  format %{ "FRSP    $dst, $src \t// convD2F" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_frsp);
+    __ frsp($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Integer to Float conversion.
+instruct convI2F_ireg_Ex(regF dst, iRegIsrc src) %{
+  match(Set dst (ConvI2F src));
+  predicate(!VM_Version::has_fcfids());
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    iRegLdst tmpL;
+    stackSlotL tmpS;
+    regD tmpD;
+    regD tmpD2;
+    convI2L_reg(tmpL, src);              // Sign-extension int to long.
+    regL_to_stkL(tmpS, tmpL);            // Store long to stack.
+    moveL2D_stack_reg(tmpD, tmpS);       // Load long into double register.
+    convL2DRaw_regD(tmpD2, tmpD);        // Convert to double.
+    convD2F_reg(dst, tmpD2);             // Convert double to float.
+  %}
+%}
+
+instruct convL2FRaw_regF(regF dst, regD src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "FCFIDS $dst, $src \t// convL2F" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fcfid);
+    __ fcfids($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Integer to Float conversion. Special version for Power7.
+instruct convI2F_ireg_fcfids_Ex(regF dst, iRegIsrc src) %{
+  match(Set dst (ConvI2F src));
+  predicate(VM_Version::has_fcfids());
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    iRegLdst tmpL;
+    stackSlotL tmpS;
+    regD tmpD;
+    convI2L_reg(tmpL, src);              // Sign-extension int to long.
+    regL_to_stkL(tmpS, tmpL);            // Store long to stack.
+    moveL2D_stack_reg(tmpD, tmpS);       // Load long into double register.
+    convL2FRaw_regF(dst, tmpD);          // Convert to float.
+  %}
+%}
+
+// L2F to avoid runtime call.
+instruct convL2F_ireg_fcfids_Ex(regF dst, iRegLsrc src) %{
+  match(Set dst (ConvL2F src));
+  predicate(VM_Version::has_fcfids());
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    stackSlotL tmpS;
+    regD tmpD;
+    regL_to_stkL(tmpS, src);             // Store long to stack.
+    moveL2D_stack_reg(tmpD, tmpS);       // Load long into double register.
+    convL2FRaw_regF(dst, tmpD);          // Convert to float.
+  %}
+%}
+
+// Moved up as used in expand.
+//instruct convD2F_reg(regF dst, regD src) %{%}
+
+// Convert to Double
+
+// Integer to Double conversion.
+instruct convI2D_reg_Ex(regD dst, iRegIsrc src) %{
+  match(Set dst (ConvI2D src));
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    iRegLdst tmpL;
+    stackSlotL tmpS;
+    regD tmpD;
+    convI2L_reg(tmpL, src);              // Sign-extension int to long.
+    regL_to_stkL(tmpS, tmpL);            // Store long to stack.
+    moveL2D_stack_reg(tmpD, tmpS);       // Load long into double register.
+    convL2DRaw_regD(dst, tmpD);          // Convert to double.
+  %}
+%}
+
+// Long to Double conversion
+instruct convL2D_reg_Ex(regD dst, stackSlotL src) %{
+  match(Set dst (ConvL2D src));
+  ins_cost(DEFAULT_COST + MEMORY_REF_COST);
+
+  expand %{
+    regD tmpD;
+    moveL2D_stack_reg(tmpD, src);
+    convL2DRaw_regD(dst, tmpD);
+  %}
+%}
+
+instruct convF2D_reg(regD dst, regF src) %{
+  match(Set dst (ConvF2D src));
+  format %{ "FMR     $dst, $src \t// float->double" %}
+  // variable size, 0 or 4
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fmr);
+    __ fmr_if_needed($dst$$FloatRegister, $src$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+//----------Control Flow Instructions------------------------------------------
+// Compare Instructions
+
+// Compare Integers
+instruct cmpI_reg_reg(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set crx (CmpI src1 src2));
+  size(4);
+  format %{ "CMPW    $crx, $src1, $src2" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmp);
+    __ cmpw($crx$$CondRegister, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct cmpI_reg_imm16(flagsReg crx, iRegIsrc src1, immI16 src2) %{
+  match(Set crx (CmpI src1 src2));
+  format %{ "CMPWI   $crx, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmpi);
+    __ cmpwi($crx$$CondRegister, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// (src1 & src2) == 0?
+instruct testI_reg_imm(flagsRegCR0 cr0, iRegIsrc src1, uimmI16 src2, immI_0 zero) %{
+  match(Set cr0 (CmpI (AndI src1 src2) zero));
+  // r0 is killed
+  format %{ "ANDI    R0, $src1, $src2 \t// BTST int" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_andi_);
+    // FIXME: avoid andi_ ?
+    __ andi_(R0, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct cmpL_reg_reg(flagsReg crx, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set crx (CmpL src1 src2));
+  format %{ "CMPD    $crx, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmp);
+    __ cmpd($crx$$CondRegister, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct cmpL_reg_imm16(flagsReg crx, iRegLsrc src1, immL16 src2) %{
+  match(Set crx (CmpL src1 src2));
+  format %{ "CMPDI   $crx, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmpi);
+    __ cmpdi($crx$$CondRegister, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct testL_reg_reg(flagsRegCR0 cr0, iRegLsrc src1, iRegLsrc src2, immL_0 zero) %{
+  match(Set cr0 (CmpL (AndL src1 src2) zero));
+  // r0 is killed
+  format %{ "AND     R0, $src1, $src2 \t// BTST long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_and_);
+    __ and_(R0, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct testL_reg_imm(flagsRegCR0 cr0, iRegLsrc src1, uimmL16 src2, immL_0 zero) %{
+  match(Set cr0 (CmpL (AndL src1 src2) zero));
+  // r0 is killed
+  format %{ "ANDI    R0, $src1, $src2 \t// BTST long" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_andi_);
+    // FIXME: avoid andi_ ?
+    __ andi_(R0, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct cmovI_conIvalueMinus1_conIvalue1(iRegIdst dst, flagsReg crx) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE crx);
+  predicate(false);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "cmovI   $crx, $dst, -1, 0, +1" %}
+  // Worst case is branch + move + branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORTInsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 20 : 16);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmove);
+    Label done;
+    // li(Rdst, 0);              // equal -> 0
+    __ beq($crx$$CondRegister, done);
+    __ li($dst$$Register, 1);    // greater -> +1
+    __ bgt($crx$$CondRegister, done);
+    __ li($dst$$Register, -1);   // unordered or less -> -1
+    // TODO: PPC port__ endgroup_if_needed(_size == 20);
+    __ bind(done);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct cmovI_conIvalueMinus1_conIvalue0_conIvalue1_Ex(iRegIdst dst, flagsReg crx) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE crx);
+  predicate(false);
+
+  format %{ "CmovI    $crx, $dst, -1, 0, +1 \t// postalloc expanded" %}
+  postalloc_expand %{
+    //
+    // replaces
+    //
+    //   region  crx
+    //    \       |
+    //     dst=cmovI_conIvalueMinus1_conIvalue0_conIvalue1
+    //
+    // with
+    //
+    //   region
+    //    \
+    //     dst=loadConI16(0)
+    //      |
+    //      ^  region  crx
+    //      |   \       |
+    //      dst=cmovI_conIvalueMinus1_conIvalue1
+    //
+
+    // Create new nodes.
+    MachNode *m1 = new (C) loadConI16Node();
+    MachNode *m2 = new (C) cmovI_conIvalueMinus1_conIvalue1Node();
+
+    // inputs for new nodes
+    m1->add_req(n_region);
+    m2->add_req(n_region, n_crx);
+    m2->add_prec(m1);
+
+    // operands for new nodes
+    m1->_opnds[0] = op_dst;
+    m1->_opnds[1] = new (C) immI16Oper(0);
+    m2->_opnds[0] = op_dst;
+    m2->_opnds[1] = op_crx;
+
+    // registers for new nodes
+    ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
+    ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
+
+    // Insert new nodes.
+    nodes->push(m1);
+    nodes->push(m2);
+  %}
+%}
+
+// Manifest a CmpL3 result in an integer register. Very painful.
+// This is the test to avoid.
+// (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
+instruct cmpL3_reg_reg_ExEx(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
+  match(Set dst (CmpL3 src1 src2));
+  ins_cost(DEFAULT_COST*5+BRANCH_COST);
+
+  expand %{
+    flagsReg tmp1;
+    cmpL_reg_reg(tmp1, src1, src2);
+    cmovI_conIvalueMinus1_conIvalue0_conIvalue1_Ex(dst, tmp1);
+  %}
+%}
+
+// Implicit range checks.
+// A range check in the ideal world has one of the following shapes:
+//  - (If le (CmpU length index)), (IfTrue  throw exception)
+//  - (If lt (CmpU index length)), (IfFalse throw exception)
+//
+// Match range check 'If le (CmpU length index)'.
+instruct rangeCheck_iReg_uimm15(cmpOp cmp, iRegIsrc src_length, uimmI15 index, label labl) %{
+  match(If cmp (CmpU src_length index));
+  effect(USE labl);
+  predicate(TrapBasedRangeChecks &&
+            _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le &&
+            PROB_UNLIKELY(_leaf->as_If()->_prob) >= PROB_ALWAYS &&
+            (Matcher::branches_to_uncommon_trap(_leaf)));
+
+  ins_is_TrapBasedCheckNode(true);
+
+  format %{ "TWI     $index $cmp $src_length \t// RangeCheck => trap $labl" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_twi);
+    if ($cmp$$cmpcode == 0x1 /* less_equal */) {
+      __ trap_range_check_le($src_length$$Register, $index$$constant);
+    } else {
+      // Both successors are uncommon traps, probability is 0.
+      // Node got flipped during fixup flow.
+      assert($cmp$$cmpcode == 0x9, "must be greater");
+      __ trap_range_check_g($src_length$$Register, $index$$constant);
+    }
+  %}
+  ins_pipe(pipe_class_trap);
+%}
+
+// Match range check 'If lt (CmpU index length)'.
+instruct rangeCheck_iReg_iReg(cmpOp cmp, iRegIsrc src_index, iRegIsrc src_length, label labl) %{
+  match(If cmp (CmpU src_index src_length));
+  effect(USE labl);
+  predicate(TrapBasedRangeChecks &&
+            _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt &&
+            _leaf->as_If()->_prob >= PROB_ALWAYS &&
+            (Matcher::branches_to_uncommon_trap(_leaf)));
+
+  ins_is_TrapBasedCheckNode(true);
+
+  format %{ "TW      $src_index $cmp $src_length \t// RangeCheck => trap $labl" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_tw);
+    if ($cmp$$cmpcode == 0x0 /* greater_equal */) {
+      __ trap_range_check_ge($src_index$$Register, $src_length$$Register);
+    } else {
+      // Both successors are uncommon traps, probability is 0.
+      // Node got flipped during fixup flow.
+      assert($cmp$$cmpcode == 0x8, "must be less");
+      __ trap_range_check_l($src_index$$Register, $src_length$$Register);
+    }
+  %}
+  ins_pipe(pipe_class_trap);
+%}
+
+// Match range check 'If lt (CmpU index length)'.
+instruct rangeCheck_uimm15_iReg(cmpOp cmp, iRegIsrc src_index, uimmI15 length, label labl) %{
+  match(If cmp (CmpU src_index length));
+  effect(USE labl);
+  predicate(TrapBasedRangeChecks &&
+            _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt &&
+            _leaf->as_If()->_prob >= PROB_ALWAYS &&
+            (Matcher::branches_to_uncommon_trap(_leaf)));
+
+  ins_is_TrapBasedCheckNode(true);
+
+  format %{ "TWI     $src_index $cmp $length \t// RangeCheck => trap $labl" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_twi);
+    if ($cmp$$cmpcode == 0x0 /* greater_equal */) {
+      __ trap_range_check_ge($src_index$$Register, $length$$constant);
+    } else {
+      // Both successors are uncommon traps, probability is 0.
+      // Node got flipped during fixup flow.
+      assert($cmp$$cmpcode == 0x8, "must be less");
+      __ trap_range_check_l($src_index$$Register, $length$$constant);
+    }
+  %}
+  ins_pipe(pipe_class_trap);
+%}
+
+instruct compU_reg_reg(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set crx (CmpU src1 src2));
+  format %{ "CMPLW   $crx, $src1, $src2 \t// unsigned" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmpl);
+    __ cmplw($crx$$CondRegister, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct compU_reg_uimm16(flagsReg crx, iRegIsrc src1, uimmI16 src2) %{
+  match(Set crx (CmpU src1 src2));
+  size(4);
+  format %{ "CMPLWI  $crx, $src1, $src2" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmpli);
+    __ cmplwi($crx$$CondRegister, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// Implicit zero checks (more implicit null checks).
+// No constant pool entries required.
+instruct zeroCheckN_iReg_imm0(cmpOp cmp, iRegNsrc value, immN_0 zero, label labl) %{
+  match(If cmp (CmpN value zero));
+  effect(USE labl);
+  predicate(TrapBasedNullChecks &&
+            _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne &&
+            _leaf->as_If()->_prob >= PROB_LIKELY_MAG(4) &&
+            Matcher::branches_to_uncommon_trap(_leaf));
+  ins_cost(1);
+
+  ins_is_TrapBasedCheckNode(true);
+
+  format %{ "TDI     $value $cmp $zero \t// ZeroCheckN => trap $labl" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_tdi);
+    if ($cmp$$cmpcode == 0xA) {
+      __ trap_null_check($value$$Register);
+    } else {
+      // Both successors are uncommon traps, probability is 0.
+      // Node got flipped during fixup flow.
+      assert($cmp$$cmpcode == 0x2 , "must be equal(0xA) or notEqual(0x2)");
+      __ trap_null_check($value$$Register, Assembler::traptoGreaterThanUnsigned);
+    }
+  %}
+  ins_pipe(pipe_class_trap);
+%}
+
+// Compare narrow oops.
+instruct cmpN_reg_reg(flagsReg crx, iRegNsrc src1, iRegNsrc src2) %{
+  match(Set crx (CmpN src1 src2));
+
+  size(4);
+  ins_cost(DEFAULT_COST);
+  format %{ "CMPLW   $crx, $src1, $src2 \t// compressed ptr" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmpl);
+    __ cmplw($crx$$CondRegister, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct cmpN_reg_imm0(flagsReg crx, iRegNsrc src1, immN_0 src2) %{
+  match(Set crx (CmpN src1 src2));
+  // Make this more expensive than zeroCheckN_iReg_imm0.
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CMPLWI  $crx, $src1, $src2 \t// compressed ptr" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmpli);
+    __ cmplwi($crx$$CondRegister, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// Implicit zero checks (more implicit null checks).
+// No constant pool entries required.
+instruct zeroCheckP_reg_imm0(cmpOp cmp, iRegP_N2P value, immP_0 zero, label labl) %{
+  match(If cmp (CmpP value zero));
+  effect(USE labl);
+  predicate(TrapBasedNullChecks &&
+            _kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne &&
+            _leaf->as_If()->_prob >= PROB_LIKELY_MAG(4) &&
+            Matcher::branches_to_uncommon_trap(_leaf));
+
+  ins_is_TrapBasedCheckNode(true);
+
+  format %{ "TDI     $value $cmp $zero \t// ZeroCheckP => trap $labl" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_tdi);
+    if ($cmp$$cmpcode == 0xA) {
+      __ trap_null_check($value$$Register);
+    } else {
+      // Both successors are uncommon traps, probability is 0.
+      // Node got flipped during fixup flow.
+      assert($cmp$$cmpcode == 0x2 , "must be equal(0xA) or notEqual(0x2)");
+      __ trap_null_check($value$$Register, Assembler::traptoGreaterThanUnsigned);
+    }
+  %}
+  ins_pipe(pipe_class_trap);
+%}
+
+// Compare Pointers
+instruct cmpP_reg_reg(flagsReg crx, iRegP_N2P src1, iRegP_N2P src2) %{
+  match(Set crx (CmpP src1 src2));
+  format %{ "CMPLD   $crx, $src1, $src2 \t// ptr" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmpl);
+    __ cmpld($crx$$CondRegister, $src1$$Register, $src2$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// Used in postalloc expand.
+instruct cmpP_reg_imm16(flagsReg crx, iRegPsrc src1, immL16 src2) %{
+  // This match rule prevents reordering of node before a safepoint.
+  // This only makes sense if this instructions is used exclusively
+  // for the expansion of EncodeP!
+  match(Set crx (CmpP src1 src2));
+  predicate(false);
+
+  format %{ "CMPDI   $crx, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmpi);
+    __ cmpdi($crx$$CondRegister, $src1$$Register, $src2$$constant);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+//----------Float Compares----------------------------------------------------
+
+instruct cmpFUnordered_reg_reg(flagsReg crx, regF src1, regF src2) %{
+  // no match-rule, false predicate
+  effect(DEF crx, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "cmpFUrd $crx, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fcmpu);
+    __ fcmpu($crx$$CondRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmov_bns_less(flagsReg crx) %{
+  // no match-rule, false predicate
+  effect(DEF crx);
+  predicate(false);
+
+  ins_variable_size_depending_on_alignment(true);
+
+  format %{ "cmov    $crx" %}
+  // Worst case is branch + move + stop, no stop without scheduler.
+  size(false /* TODO: PPC PORT(InsertEndGroupPPC64 && Compile::current()->do_hb_scheduling())*/ ? 16 : 12);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cmovecr);
+    Label done;
+    __ bns($crx$$CondRegister, done);        // not unordered -> keep crx
+    __ li(R0, 0);
+    __ cmpwi($crx$$CondRegister, R0, 1);     // unordered -> set crx to 'less'
+    // TODO PPC port __ endgroup_if_needed(_size == 16);
+    __ bind(done);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Compare floating, generate condition code.
+instruct cmpF_reg_reg_Ex(flagsReg crx, regF src1, regF src2) %{
+  // FIXME: should we match 'If cmp (CmpF src1 src2))' ??
+  //
+  // The following code sequence occurs a lot in mpegaudio:
+  //
+  // block BXX:
+  // 0: instruct cmpFUnordered_reg_reg (cmpF_reg_reg-0):
+  //    cmpFUrd CCR6, F11, F9
+  // 4: instruct cmov_bns_less (cmpF_reg_reg-1):
+  //    cmov CCR6
+  // 8: instruct branchConSched:
+  //    B_FARle CCR6, B56  P=0.500000 C=-1.000000
+  match(Set crx (CmpF src1 src2));
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  format %{ "CmpF    $crx, $src1, $src2 \t// postalloc expanded" %}
+  postalloc_expand %{
+    //
+    // replaces
+    //
+    //   region  src1  src2
+    //    \       |     |
+    //     crx=cmpF_reg_reg
+    //
+    // with
+    //
+    //   region  src1  src2
+    //    \       |     |
+    //     crx=cmpFUnordered_reg_reg
+    //      |
+    //      ^  region
+    //      |   \
+    //      crx=cmov_bns_less
+    //
+
+    // Create new nodes.
+    MachNode *m1 = new (C) cmpFUnordered_reg_regNode();
+    MachNode *m2 = new (C) cmov_bns_lessNode();
+
+    // inputs for new nodes
+    m1->add_req(n_region, n_src1, n_src2);
+    m2->add_req(n_region);
+    m2->add_prec(m1);
+
+    // operands for new nodes
+    m1->_opnds[0] = op_crx;
+    m1->_opnds[1] = op_src1;
+    m1->_opnds[2] = op_src2;
+    m2->_opnds[0] = op_crx;
+
+    // registers for new nodes
+    ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
+    ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
+
+    // Insert new nodes.
+    nodes->push(m1);
+    nodes->push(m2);
+  %}
+%}
+
+// Compare float, generate -1,0,1
+instruct cmpF3_reg_reg_ExEx(iRegIdst dst, regF src1, regF src2) %{
+  match(Set dst (CmpF3 src1 src2));
+  ins_cost(DEFAULT_COST*5+BRANCH_COST);
+
+  expand %{
+    flagsReg tmp1;
+    cmpFUnordered_reg_reg(tmp1, src1, src2);
+    cmovI_conIvalueMinus1_conIvalue0_conIvalue1_Ex(dst, tmp1);
+  %}
+%}
+
+instruct cmpDUnordered_reg_reg(flagsReg crx, regD src1, regD src2) %{
+  // no match-rule, false predicate
+  effect(DEF crx, USE src1, USE src2);
+  predicate(false);
+
+  format %{ "cmpFUrd $crx, $src1, $src2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fcmpu);
+    __ fcmpu($crx$$CondRegister, $src1$$FloatRegister, $src2$$FloatRegister);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct cmpD_reg_reg_Ex(flagsReg crx, regD src1, regD src2) %{
+  match(Set crx (CmpD src1 src2));
+  ins_cost(DEFAULT_COST+BRANCH_COST);
+
+  format %{ "CmpD    $crx, $src1, $src2 \t// postalloc expanded" %}
+  postalloc_expand %{
+    //
+    // replaces
+    //
+    //   region  src1  src2
+    //    \       |     |
+    //     crx=cmpD_reg_reg
+    //
+    // with
+    //
+    //   region  src1  src2
+    //    \       |     |
+    //     crx=cmpDUnordered_reg_reg
+    //      |
+    //      ^  region
+    //      |   \
+    //      crx=cmov_bns_less
+    //
+
+    // create new nodes
+    MachNode *m1 = new (C) cmpDUnordered_reg_regNode();
+    MachNode *m2 = new (C) cmov_bns_lessNode();
+
+    // inputs for new nodes
+    m1->add_req(n_region, n_src1, n_src2);
+    m2->add_req(n_region);
+    m2->add_prec(m1);
+
+    // operands for new nodes
+    m1->_opnds[0] = op_crx;
+    m1->_opnds[1] = op_src1;
+    m1->_opnds[2] = op_src2;
+    m2->_opnds[0] = op_crx;
+
+    // registers for new nodes
+    ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
+    ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
+
+    // Insert new nodes.
+    nodes->push(m1);
+    nodes->push(m2);
+  %}
+%}
+
+// Compare double, generate -1,0,1
+instruct cmpD3_reg_reg_ExEx(iRegIdst dst, regD src1, regD src2) %{
+  match(Set dst (CmpD3 src1 src2));
+  ins_cost(DEFAULT_COST*5+BRANCH_COST);
+
+  expand %{
+    flagsReg tmp1;
+    cmpDUnordered_reg_reg(tmp1, src1, src2);
+    cmovI_conIvalueMinus1_conIvalue0_conIvalue1_Ex(dst, tmp1);
+  %}
+%}
+
+//----------Branches---------------------------------------------------------
+// Jump
+
+// Direct Branch.
+instruct branch(label labl) %{
+  match(Goto);
+  effect(USE labl);
+  ins_cost(BRANCH_COST);
+
+  format %{ "B       $labl" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_b);
+     Label d;    // dummy
+     __ bind(d);
+     Label* p = $labl$$label;
+     // `p' is `NULL' when this encoding class is used only to
+     // determine the size of the encoded instruction.
+     Label& l = (NULL == p)? d : *(p);
+     __ b(l);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Conditional Near Branch
+instruct branchCon(cmpOp cmp, flagsReg crx, label lbl) %{
+  // Same match rule as `branchConFar'.
+  match(If cmp crx);
+  effect(USE lbl);
+  ins_cost(BRANCH_COST);
+
+  // If set to 1 this indicates that the current instruction is a
+  // short variant of a long branch. This avoids using this
+  // instruction in first-pass matching. It will then only be used in
+  // the `Shorten_branches' pass.
+  ins_short_branch(1);
+
+  format %{ "B$cmp     $crx, $lbl" %}
+  size(4);
+  ins_encode( enc_bc(crx, cmp, lbl) );
+  ins_pipe(pipe_class_default);
+%}
+
+// This is for cases when the ppc64 `bc' instruction does not
+// reach far enough. So we emit a far branch here, which is more
+// expensive.
+//
+// Conditional Far Branch
+instruct branchConFar(cmpOp cmp, flagsReg crx, label lbl) %{
+  // Same match rule as `branchCon'.
+  match(If cmp crx);
+  effect(USE crx, USE lbl);
+  predicate(!false /* TODO: PPC port HB_Schedule*/);
+  // Higher cost than `branchCon'.
+  ins_cost(5*BRANCH_COST);
+
+  // This is not a short variant of a branch, but the long variant.
+  ins_short_branch(0);
+
+  format %{ "B_FAR$cmp $crx, $lbl" %}
+  size(8);
+  ins_encode( enc_bc_far(crx, cmp, lbl) );
+  ins_pipe(pipe_class_default);
+%}
+
+// Conditional Branch used with Power6 scheduler (can be far or short).
+instruct branchConSched(cmpOp cmp, flagsReg crx, label lbl) %{
+  // Same match rule as `branchCon'.
+  match(If cmp crx);
+  effect(USE crx, USE lbl);
+  predicate(false /* TODO: PPC port HB_Schedule*/);
+  // Higher cost than `branchCon'.
+  ins_cost(5*BRANCH_COST);
+
+  // Actually size doesn't depend on alignment but on shortening.
+  ins_variable_size_depending_on_alignment(true);
+  // long variant.
+  ins_short_branch(0);
+
+  format %{ "B_FAR$cmp $crx, $lbl" %}
+  size(8); // worst case
+  ins_encode( enc_bc_short_far(crx, cmp, lbl) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct branchLoopEnd(cmpOp cmp, flagsReg crx, label labl) %{
+  match(CountedLoopEnd cmp crx);
+  effect(USE labl);
+  ins_cost(BRANCH_COST);
+
+  // short variant.
+  ins_short_branch(1);
+
+  format %{ "B$cmp     $crx, $labl \t// counted loop end" %}
+  size(4);
+  ins_encode( enc_bc(crx, cmp, labl) );
+  ins_pipe(pipe_class_default);
+%}
+
+instruct branchLoopEndFar(cmpOp cmp, flagsReg crx, label labl) %{
+  match(CountedLoopEnd cmp crx);
+  effect(USE labl);
+  predicate(!false /* TODO: PPC port HB_Schedule */);
+  ins_cost(BRANCH_COST);
+
+  // Long variant.
+  ins_short_branch(0);
+
+  format %{ "B_FAR$cmp $crx, $labl \t// counted loop end" %}
+  size(8);
+  ins_encode( enc_bc_far(crx, cmp, labl) );
+  ins_pipe(pipe_class_default);
+%}
+
+// Conditional Branch used with Power6 scheduler (can be far or short).
+instruct branchLoopEndSched(cmpOp cmp, flagsReg crx, label labl) %{
+  match(CountedLoopEnd cmp crx);
+  effect(USE labl);
+  predicate(false /* TODO: PPC port HB_Schedule */);
+  // Higher cost than `branchCon'.
+  ins_cost(5*BRANCH_COST);
+
+  // Actually size doesn't depend on alignment but on shortening.
+  ins_variable_size_depending_on_alignment(true);
+  // Long variant.
+  ins_short_branch(0);
+
+  format %{ "B_FAR$cmp $crx, $labl \t// counted loop end" %}
+  size(8); // worst case
+  ins_encode( enc_bc_short_far(crx, cmp, labl) );
+  ins_pipe(pipe_class_default);
+%}
+
+// ============================================================================
+// Java runtime operations, intrinsics and other complex operations.
+
+// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
+// array for an instance of the superklass. Set a hidden internal cache on a
+// hit (cache is checked with exposed code in gen_subtype_check()). Return
+// not zero for a miss or zero for a hit. The encoding ALSO sets flags.
+//
+// GL TODO: Improve this.
+// - result should not be a TEMP
+// - Add match rule as on sparc avoiding additional Cmp.
+instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P superklass,
+                             iRegPdst tmp_klass, iRegPdst tmp_arrayptr) %{
+  match(Set result (PartialSubtypeCheck subklass superklass));
+  effect(TEMP result, TEMP tmp_klass, TEMP tmp_arrayptr);
+  ins_cost(DEFAULT_COST*10);
+
+  format %{ "PartialSubtypeCheck $result = ($subklass instanceOf $superklass) tmp: $tmp_klass, $tmp_arrayptr" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ check_klass_subtype_slow_path($subklass$$Register, $superklass$$Register, $tmp_arrayptr$$Register, 
+                                     $tmp_klass$$Register, NULL, $result$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// inlined locking and unlocking
+
+instruct cmpFastLock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
+  match(Set crx (FastLock oop box));
+  effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
+  // TODO PPC port predicate(!UseNewFastLockPPC64 || UseBiasedLocking);
+
+  format %{ "FASTLOCK  $oop, $box, $tmp1, $tmp2, $tmp3" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
+                                 $tmp3$$Register, $tmp1$$Register, $tmp2$$Register);
+    // If locking was successfull, crx should indicate 'EQ'.
+    // The compiler generates a branch to the runtime call to
+    // _complete_monitor_locking_Java for the case where crx is 'NE'.
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+instruct cmpFastUnlock(flagsReg crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
+  match(Set crx (FastUnlock oop box));
+  effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
+
+  format %{ "FASTUNLOCK  $oop, $box, $tmp1, $tmp2" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
+                                   $tmp3$$Register, $tmp1$$Register, $tmp2$$Register);
+    // If unlocking was successfull, crx should indicate 'EQ'.
+    // The compiler generates a branch to the runtime call to
+    // _complete_monitor_unlocking_Java for the case where crx is 'NE'.
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// Align address.
+instruct align_addr(iRegPdst dst, iRegPsrc src, immLnegpow2 mask) %{
+  match(Set dst (CastX2P (AndL (CastP2X src) mask)));
+
+  format %{ "ANDDI   $dst, $src, $mask \t// next aligned address" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldicr);
+    __ clrrdi($dst$$Register, $src$$Register, log2_long((jlong)-$mask$$constant));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Array size computation.
+instruct array_size(iRegLdst dst, iRegPsrc end, iRegPsrc start) %{
+  match(Set dst (SubL (CastP2X end) (CastP2X start)));
+
+  format %{ "SUB     $dst, $end, $start \t// array size in bytes" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_subf);
+    __ subf($dst$$Register, $start$$Register, $end$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Clear-array with dynamic array-size.
+instruct inlineCallClearArray(rarg1RegL cnt, rarg2RegP base, Universe dummy, regCTR ctr) %{
+  match(Set dummy (ClearArray cnt base));
+  effect(USE_KILL cnt, USE_KILL base, KILL ctr);
+  ins_cost(MEMORY_REF_COST);
+
+  ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
+
+  format %{ "ClearArray $cnt, $base" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ clear_memory_doubleword($base$$Register, $cnt$$Register); // kills cnt, base, R0
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// String_IndexOf for needle of length 1.
+//
+// Match needle into immediate operands: no loadConP node needed. Saves one
+// register and two instructions over string_indexOf_imm1Node.
+//
+// Assumes register result differs from all input registers.
+//
+// Preserves registers haystack, haycnt
+// Kills     registers tmp1, tmp2
+// Defines   registers result
+//
+// Use dst register classes if register gets killed, as it is the case for tmp registers!
+//
+// Unfortunately this does not match too often. In many situations the AddP is used
+// by several nodes, even several StrIndexOf nodes, breaking the match tree.
+instruct string_indexOf_imm1_char(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
+                                  immP needleImm, immL offsetImm, immI_1 needlecntImm,
+                                  iRegIdst tmp1, iRegIdst tmp2,
+                                  flagsRegCR0 cr0, flagsRegCR1 cr1) %{
+  predicate(SpecialStringIndexOf);  // type check implicit by parameter type, See Matcher::match_rule_supported
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
+
+  effect(TEMP result, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1);
+
+  ins_cost(150);
+  format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
+            "-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
+
+  ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    immPOper *needleOper = (immPOper *)$needleImm;
+    const TypeOopPtr *t = needleOper->type()->isa_oopptr();
+    ciTypeArray* needle_values = t->const_oop()->as_type_array();  // Pointer to live char *
+
+    __ string_indexof_1($result$$Register,
+                        $haystack$$Register, $haycnt$$Register,
+                        R0, needle_values->char_at(0),
+                        $tmp1$$Register, $tmp2$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// String_IndexOf for needle of length 1.
+//
+// Special case requires less registers and emits less instructions.
+//
+// Assumes register result differs from all input registers.
+//
+// Preserves registers haystack, haycnt
+// Kills     registers tmp1, tmp2, needle
+// Defines   registers result
+//
+// Use dst register classes if register gets killed, as it is the case for tmp registers!
+instruct string_indexOf_imm1(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
+                             rscratch2RegP needle, immI_1 needlecntImm,
+                             iRegIdst tmp1, iRegIdst tmp2,
+                             flagsRegCR0 cr0, flagsRegCR1 cr1) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
+  effect(USE_KILL needle, /* TDEF needle, */ TEMP result,
+         TEMP tmp1, TEMP tmp2);
+  // Required for EA: check if it is still a type_array.
+  predicate(SpecialStringIndexOf && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
+  ins_cost(180);
+
+  ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
+
+  format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]"
+            " -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Node *ndl = in(operand_index($needle));  // The node that defines needle.
+    ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
+    guarantee(needle_values, "sanity");
+    if (needle_values != NULL) {
+      __ string_indexof_1($result$$Register,
+                          $haystack$$Register, $haycnt$$Register,
+                          R0, needle_values->char_at(0),
+                          $tmp1$$Register, $tmp2$$Register);
+    } else {
+      __ string_indexof_1($result$$Register,
+                          $haystack$$Register, $haycnt$$Register,
+                          $needle$$Register, 0,
+                          $tmp1$$Register, $tmp2$$Register);
+    }
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// String_IndexOf.
+//
+// Length of needle as immediate. This saves instruction loading constant needle
+// length.
+// @@@ TODO Specify rules for length < 8 or so, and roll out comparison of needle
+// completely or do it in vector instruction. This should save registers for
+// needlecnt and needle.
+//
+// Assumes register result differs from all input registers.
+// Overwrites haycnt, needlecnt.
+// Use dst register classes if register gets killed, as it is the case for tmp registers!
+instruct string_indexOf_imm(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt,
+                            iRegPsrc needle, uimmI15 needlecntImm,
+                            iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
+                            flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
+  effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP result,
+         TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6);
+  // Required for EA: check if it is still a type_array.
+  predicate(SpecialStringIndexOf && n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
+            n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
+  ins_cost(250);
+
+  ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
+
+  format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]"
+            " -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    Node *ndl = in(operand_index($needle));  // The node that defines needle.
+    ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
+
+    __ string_indexof($result$$Register,
+                      $haystack$$Register, $haycnt$$Register,
+                      $needle$$Register, needle_values, $tmp5$$Register, $needlecntImm$$constant,
+                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// StrIndexOf node.
+//
+// Assumes register result differs from all input registers.
+// Overwrites haycnt, needlecnt.
+// Use dst register classes if register gets killed, as it is the case for tmp registers!
+instruct string_indexOf(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt,
+                        iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4,
+                        flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6) %{
+  match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
+  effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
+         TEMP result,
+         TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6);
+  predicate(SpecialStringIndexOf);  // See Matcher::match_rule_supported.
+  ins_cost(300);
+
+  ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
+
+  format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]"
+             " -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ string_indexof($result$$Register,
+                      $haystack$$Register, $haycnt$$Register,
+                      $needle$$Register, NULL, $needlecnt$$Register, 0,  // needlecnt not constant.
+                      $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// String equals with immediate.
+instruct string_equals_imm(iRegPsrc str1, iRegPsrc str2, uimmI15 cntImm, iRegIdst result,
+                           iRegPdst tmp1, iRegPdst tmp2,
+                           flagsRegCR0 cr0, flagsRegCR6 cr6, regCTR ctr) %{
+  match(Set result (StrEquals (Binary str1 str2) cntImm));
+  effect(TEMP result, TEMP tmp1, TEMP tmp2,
+         KILL cr0, KILL cr6, KILL ctr);
+  predicate(SpecialStringEquals);  // See Matcher::match_rule_supported.
+  ins_cost(250);
+
+  ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
+
+  format %{ "String Equals SCL [0..$cntImm]($str1),[0..$cntImm]($str2)"
+            " -> $result \t// KILL $cr0, $cr6, $ctr, TEMP $result, $tmp1, $tmp2" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ char_arrays_equalsImm($str1$$Register, $str2$$Register, $cntImm$$constant,
+                             $result$$Register, $tmp1$$Register, $tmp2$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// String equals.
+// Use dst register classes if register gets killed, as it is the case for TEMP operands!
+instruct string_equals(iRegPsrc str1, iRegPsrc str2, iRegIsrc cnt, iRegIdst result,
+                       iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3, iRegPdst tmp4, iRegPdst tmp5,
+                       flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
+  match(Set result (StrEquals (Binary str1 str2) cnt));
+  effect(TEMP result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
+         KILL cr0, KILL cr1, KILL cr6, KILL ctr);
+  predicate(SpecialStringEquals);  // See Matcher::match_rule_supported.
+  ins_cost(300);
+
+  ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
+
+  format %{ "String Equals [0..$cnt]($str1),[0..$cnt]($str2) -> $result"
+            " \t// KILL $cr0, $cr1, $cr6, $ctr, TEMP $result, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ char_arrays_equals($str1$$Register, $str2$$Register, $cnt$$Register, $result$$Register,
+                          $tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, $tmp5$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+// String compare.
+// Char[] pointers are passed in.
+// Use dst register classes if register gets killed, as it is the case for TEMP operands!
+instruct string_compare(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
+                        iRegPdst tmp, flagsRegCR0 cr0, regCTR ctr) %{
+  match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
+  effect(USE_KILL cnt1, USE_KILL cnt2, USE_KILL str1, USE_KILL str2, TEMP result, TEMP tmp, KILL cr0, KILL ctr);
+  ins_cost(300);
+
+  ins_alignment(8); // 'compute_padding()' gets called, up to this number-1 nops will get inserted.
+
+  format %{ "String Compare $str1[0..$cnt1], $str2[0..$cnt2] -> $result"
+            " \t// TEMP $tmp, $result KILLs $str1, $cnt1, $str2, $cnt2, $cr0, $ctr" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ string_compare($str1$$Register, $str2$$Register, $cnt1$$Register, $cnt2$$Register,
+                      $result$$Register, $tmp$$Register);
+  %}
+  ins_pipe(pipe_class_compare);
+%}
+
+//---------- Min/Max Instructions ---------------------------------------------
+
+instruct minI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (MinI src1 src2));
+  ins_cost(DEFAULT_COST*6);
+
+  expand %{
+    iRegIdst src1s;
+    iRegIdst src2s;
+    iRegIdst diff;
+    iRegIdst sm;
+    iRegIdst doz; // difference or zero
+    sxtI_reg(src1s, src1); // Ensure proper sign extention.
+    sxtI_reg(src2s, src2); // Ensure proper sign extention.
+    subI_reg_reg(diff, src2s, src1s);
+    // Need to consider >=33 bit result, therefore we need signmaskL.
+    signmask64I_regI(sm, diff);
+    andI_reg_reg(doz, diff, sm); // <=0
+    addI_reg_reg(dst, doz, src1s);
+  %}
+%}
+
+instruct maxI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
+  match(Set dst (MaxI src1 src2));
+  ins_cost(DEFAULT_COST*6);
+
+  expand %{
+    immI_minus1 m1 %{ -1 %}
+    iRegIdst src1s;
+    iRegIdst src2s;
+    iRegIdst diff;
+    iRegIdst sm;
+    iRegIdst doz; // difference or zero
+    sxtI_reg(src1s, src1); // Ensure proper sign extention.
+    sxtI_reg(src2s, src2); // Ensure proper sign extention.
+    subI_reg_reg(diff, src2s, src1s);
+    // Need to consider >=33 bit result, therefore we need signmaskL.
+    signmask64I_regI(sm, diff);
+    andcI_reg_reg(doz, sm, m1, diff); // >=0
+    addI_reg_reg(dst, doz, src1s);
+  %}
+%}
+
+//---------- Population Count Instructions ------------------------------------
+
+// Popcnt for Power7.
+instruct popCountI(iRegIdst dst, iRegIsrc src) %{
+  match(Set dst (PopCountI src));
+  predicate(UsePopCountInstruction && VM_Version::has_popcntw());
+  ins_cost(DEFAULT_COST);
+
+  format %{ "POPCNTW $dst, $src" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_popcntb);
+    __ popcntw($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Popcnt for Power7.
+instruct popCountL(iRegIdst dst, iRegLsrc src) %{
+  predicate(UsePopCountInstruction && VM_Version::has_popcntw());
+  match(Set dst (PopCountL src));
+  ins_cost(DEFAULT_COST);
+
+  format %{ "POPCNTD $dst, $src" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_popcntb);
+    __ popcntd($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct countLeadingZerosI(iRegIdst dst, iRegIsrc src) %{
+  match(Set dst (CountLeadingZerosI src));
+  predicate(UseCountLeadingZerosInstructionsPPC64);  // See Matcher::match_rule_supported.
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CNTLZW  $dst, $src" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cntlzw);
+    __ cntlzw($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct countLeadingZerosL(iRegIdst dst, iRegLsrc src) %{
+  match(Set dst (CountLeadingZerosL src));
+  predicate(UseCountLeadingZerosInstructionsPPC64);  // See Matcher::match_rule_supported.
+  ins_cost(DEFAULT_COST);
+
+  format %{ "CNTLZD  $dst, $src" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cntlzd);
+    __ cntlzd($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct countLeadingZerosP(iRegIdst dst, iRegPsrc src) %{
+  // no match-rule, false predicate
+  effect(DEF dst, USE src);
+  predicate(false);
+
+  format %{ "CNTLZD  $dst, $src" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_cntlzd);
+    __ cntlzd($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct countTrailingZerosI_Ex(iRegIdst dst, iRegIsrc src) %{
+  match(Set dst (CountTrailingZerosI src));
+  predicate(UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    immI16 imm1 %{ (int)-1 %}
+    immI16 imm2 %{ (int)32 %}
+    immI_minus1 m1 %{ -1 %}
+    iRegIdst tmpI1;
+    iRegIdst tmpI2;
+    iRegIdst tmpI3;
+    addI_reg_imm16(tmpI1, src, imm1);
+    andcI_reg_reg(tmpI2, src, m1, tmpI1);
+    countLeadingZerosI(tmpI3, tmpI2);
+    subI_imm16_reg(dst, imm2, tmpI3);
+  %}
+%}
+
+instruct countTrailingZerosL_Ex(iRegIdst dst, iRegLsrc src) %{
+  match(Set dst (CountTrailingZerosL src));
+  predicate(UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    immL16 imm1 %{ (long)-1 %}
+    immI16 imm2 %{ (int)64 %}
+    iRegLdst tmpL1;
+    iRegLdst tmpL2;
+    iRegIdst tmpL3;
+    addL_reg_imm16(tmpL1, src, imm1);
+    andcL_reg_reg(tmpL2, tmpL1, src);
+    countLeadingZerosL(tmpL3, tmpL2);
+    subI_imm16_reg(dst, imm2, tmpL3);
+ %}
+%}
+
+// Expand nodes for byte_reverse_int.
+instruct insrwi_a(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
+  effect(DEF dst, USE src, USE pos, USE shift);
+  predicate(false);
+
+  format %{ "INSRWI  $dst, $src, $pos, $shift" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rlwimi);
+    __ insrwi($dst$$Register, $src$$Register, $shift$$constant, $pos$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// As insrwi_a, but with USE_DEF.
+instruct insrwi(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
+  effect(USE_DEF dst, USE src, USE pos, USE shift);
+  predicate(false);
+
+  format %{ "INSRWI  $dst, $src, $pos, $shift" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rlwimi);
+    __ insrwi($dst$$Register, $src$$Register, $shift$$constant, $pos$$constant);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Just slightly faster than java implementation.
+instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
+  match(Set dst (ReverseBytesI src));
+  predicate(UseCountLeadingZerosInstructionsPPC64);
+  ins_cost(DEFAULT_COST);
+
+  expand %{
+    immI16 imm24 %{ (int) 24 %}
+    immI16 imm16 %{ (int) 16 %}
+    immI16  imm8 %{ (int)  8 %}
+    immI16  imm4 %{ (int)  4 %}
+    immI16  imm0 %{ (int)  0 %}
+    iRegLdst tmpI1;
+    iRegLdst tmpI2;
+    iRegLdst tmpI3;
+
+    urShiftI_reg_imm(tmpI1, src, imm24);
+    insrwi_a(dst, tmpI1, imm24, imm8);
+    urShiftI_reg_imm(tmpI2, src, imm16);
+    insrwi(dst, tmpI2, imm8, imm16);
+    urShiftI_reg_imm(tmpI3, src, imm8);
+    insrwi(dst, tmpI3, imm8, imm8);
+    insrwi(dst, src, imm0, imm8);
+  %}
+%}
+
+//---------- Replicate Vector Instructions ------------------------------------
+
+// Insrdi does replicate if src == dst.
+instruct repl32(iRegLdst dst) %{
+  predicate(false);
+  effect(USE_DEF dst);
+
+  format %{ "INSRDI  $dst, #0, $dst, #32 \t// replicate" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldimi);
+    __ insrdi($dst$$Register, $dst$$Register, 32, 0);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Insrdi does replicate if src == dst.
+instruct repl48(iRegLdst dst) %{
+  predicate(false);
+  effect(USE_DEF dst);
+
+  format %{ "INSRDI  $dst, #0, $dst, #48 \t// replicate" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldimi);
+    __ insrdi($dst$$Register, $dst$$Register, 48, 0);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Insrdi does replicate if src == dst.
+instruct repl56(iRegLdst dst) %{
+  predicate(false);
+  effect(USE_DEF dst);
+
+  format %{ "INSRDI  $dst, #0, $dst, #56 \t// replicate" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_rldimi);
+    __ insrdi($dst$$Register, $dst$$Register, 56, 0);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl8B_reg_Ex(iRegLdst dst, iRegIsrc src) %{
+  match(Set dst (ReplicateB src));
+  predicate(n->as_Vector()->length() == 8);
+  expand %{
+    moveReg(dst, src);
+    repl56(dst);
+    repl48(dst);
+    repl32(dst);
+  %}
+%}
+
+instruct repl8B_immI0(iRegLdst dst, immI_0 zero) %{
+  match(Set dst (ReplicateB zero));
+  predicate(n->as_Vector()->length() == 8);
+  format %{ "LI      $dst, #0 \t// replicate8B" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, (int)((short)($zero$$constant & 0xFFFF)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl8B_immIminus1(iRegLdst dst, immI_minus1 src) %{
+  match(Set dst (ReplicateB src));
+  predicate(n->as_Vector()->length() == 8);
+  format %{ "LI      $dst, #-1 \t// replicate8B" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl4S_reg_Ex(iRegLdst dst, iRegIsrc src) %{
+  match(Set dst (ReplicateS src));
+  predicate(n->as_Vector()->length() == 4);
+  expand %{
+    moveReg(dst, src);
+    repl48(dst);
+    repl32(dst);
+  %}
+%}
+
+instruct repl4S_immI0(iRegLdst dst, immI_0 zero) %{
+  match(Set dst (ReplicateS zero));
+  predicate(n->as_Vector()->length() == 4);
+  format %{ "LI      $dst, #0 \t// replicate4C" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, (int)((short)($zero$$constant & 0xFFFF)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl4S_immIminus1(iRegLdst dst, immI_minus1 src) %{
+  match(Set dst (ReplicateS src));
+  predicate(n->as_Vector()->length() == 4);
+  format %{ "LI      $dst, -1 \t// replicate4C" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl2I_reg_Ex(iRegLdst dst, iRegIsrc src) %{
+  match(Set dst (ReplicateI src));
+  predicate(n->as_Vector()->length() == 2);
+  ins_cost(2 * DEFAULT_COST);
+  expand %{
+    moveReg(dst, src);
+    repl32(dst);
+  %}
+%}
+
+instruct repl2I_immI0(iRegLdst dst, immI_0 zero) %{
+  match(Set dst (ReplicateI zero));
+  predicate(n->as_Vector()->length() == 2);
+  format %{ "LI      $dst, #0 \t// replicate4C" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, (int)((short)($zero$$constant & 0xFFFF)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct repl2I_immIminus1(iRegLdst dst, immI_minus1 src) %{
+  match(Set dst (ReplicateI src));
+  predicate(n->as_Vector()->length() == 2);
+  format %{ "LI      $dst, -1 \t// replicate4C" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Move float to int register via stack, replicate.
+instruct repl2F_reg_Ex(iRegLdst dst, regF src) %{
+  match(Set dst (ReplicateF src));
+  predicate(n->as_Vector()->length() == 2);
+  ins_cost(2 * MEMORY_REF_COST + DEFAULT_COST);
+  expand %{
+    stackSlotL tmpS;
+    iRegIdst tmpI;
+    moveF2I_reg_stack(tmpS, src);   // Move float to stack.
+    moveF2I_stack_reg(tmpI, tmpS);  // Move stack to int reg.
+    moveReg(dst, tmpI);             // Move int to long reg.
+    repl32(dst);                    // Replicate bitpattern.
+  %}
+%}
+
+// Replicate scalar constant to packed float values in Double register
+instruct repl2F_immF_Ex(iRegLdst dst, immF src) %{
+  match(Set dst (ReplicateF src));
+  predicate(n->as_Vector()->length() == 2);
+  ins_cost(5 * DEFAULT_COST);
+
+  format %{ "LD      $dst, offset, $constanttablebase\t// load replicated float $src $src from table, postalloc expanded" %}
+  postalloc_expand( postalloc_expand_load_replF_constant(dst, src, constanttablebase) );
+%}
+
+// Replicate scalar zero constant to packed float values in Double register
+instruct repl2F_immF0(iRegLdst dst, immF_0 zero) %{
+  match(Set dst (ReplicateF zero));
+  predicate(n->as_Vector()->length() == 2);
+
+  format %{ "LI      $dst, #0 \t// replicate2F" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_addi);
+    __ li($dst$$Register, 0x0);
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// ============================================================================
+// Safepoint Instruction
+
+instruct safePoint_poll(iRegPdst poll) %{
+  match(SafePoint poll);
+  predicate(LoadPollAddressFromThread);
+
+  // It caused problems to add the effect that r0 is killed, but this
+  // effect no longer needs to be mentioned, since r0 is not contained
+  // in a reg_class.
+
+  format %{ "LD      R0, #0, $poll \t// Safepoint poll for GC" %}
+  size(4);
+  ins_encode( enc_poll(0x0, poll) );
+  ins_pipe(pipe_class_default);
+%}
+
+// Safepoint without per-thread support. Load address of page to poll
+// as constant.
+// Rscratch2RegP is R12.
+// LoadConPollAddr node is added in pd_post_matching_hook(). It must be
+// a seperate node so that the oop map is at the right location.
+instruct safePoint_poll_conPollAddr(rscratch2RegP poll) %{
+  match(SafePoint poll);
+  predicate(!LoadPollAddressFromThread);
+
+  // It caused problems to add the effect that r0 is killed, but this
+  // effect no longer needs to be mentioned, since r0 is not contained
+  // in a reg_class.
+
+  format %{ "LD      R12, addr of polling page\n\t"
+            "LD      R0, #0, R12 \t// Safepoint poll for GC" %}
+  ins_encode( enc_poll(0x0, poll) );
+  ins_pipe(pipe_class_default);
+%}
+
+// ============================================================================
+// Call Instructions
+
+// Call Java Static Instruction
+
+// Schedulable version of call static node.
+instruct CallStaticJavaDirect(method meth) %{
+  match(CallStaticJava);
+  effect(USE meth);
+  predicate(!((CallStaticJavaNode*)n)->is_method_handle_invoke());
+  ins_cost(CALL_COST);
+
+  ins_num_consts(3 /* up to 3 patchable constants: inline cache, 2 call targets. */);
+
+  format %{ "CALL,static $meth \t// ==> " %}
+  size(4);
+  ins_encode( enc_java_static_call(meth) );
+  ins_pipe(pipe_class_call);
+%}
+
+// Schedulable version of call static node.
+instruct CallStaticJavaDirectHandle(method meth) %{
+  match(CallStaticJava);
+  effect(USE meth);
+  predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
+  ins_cost(CALL_COST);
+
+  ins_num_consts(3 /* up to 3 patchable constants: inline cache, 2 call targets. */);
+
+  format %{ "CALL,static $meth \t// ==> " %}
+  ins_encode( enc_java_handle_call(meth) );
+  ins_pipe(pipe_class_call);
+%}
+
+// Call Java Dynamic Instruction
+
+// Used by postalloc expand of CallDynamicJavaDirectSchedEx (actual call).
+// Loading of IC was postalloc expanded. The nodes loading the IC are reachable
+// via fields ins_field_load_ic_hi_node and ins_field_load_ic_node.
+// The call destination must still be placed in the constant pool.
+instruct CallDynamicJavaDirectSched(method meth) %{
+  match(CallDynamicJava); // To get all the data fields we need ...
+  effect(USE meth);
+  predicate(false);       // ... but never match.
+
+  ins_field_load_ic_hi_node(loadConL_hiNode*);
+  ins_field_load_ic_node(loadConLNode*);
+  ins_num_consts(1 /* 1 patchable constant: call destination */);
+
+  format %{ "BL        \t// dynamic $meth ==> " %}
+  size(4);
+  ins_encode( enc_java_dynamic_call_sched(meth) );
+  ins_pipe(pipe_class_call);
+%}
+
+// Schedulable (i.e. postalloc expanded) version of call dynamic java.
+// We use postalloc expanded calls if we use inline caches
+// and do not update method data.
+//
+// This instruction has two constants: inline cache (IC) and call destination.
+// Loading the inline cache will be postalloc expanded, thus leaving a call with
+// one constant.
+instruct CallDynamicJavaDirectSched_Ex(method meth) %{
+  match(CallDynamicJava);
+  effect(USE meth);
+  predicate(UseInlineCaches);
+  ins_cost(CALL_COST);
+
+  ins_num_consts(2 /* 2 patchable constants: inline cache, call destination. */);
+
+  format %{ "CALL,dynamic $meth \t// postalloc expanded" %}
+  postalloc_expand( postalloc_expand_java_dynamic_call_sched(meth, constanttablebase) );
+%}
+
+// Compound version of call dynamic java
+// We use postalloc expanded calls if we use inline caches
+// and do not update method data.
+instruct CallDynamicJavaDirect(method meth) %{
+  match(CallDynamicJava);
+  effect(USE meth);
+  predicate(!UseInlineCaches);
+  ins_cost(CALL_COST);
+
+  // Enc_java_to_runtime_call needs up to 4 constants (method data oop).
+  ins_num_consts(4);
+
+  format %{ "CALL,dynamic $meth \t// ==> " %}
+  ins_encode( enc_java_dynamic_call(meth, constanttablebase) );
+  ins_pipe(pipe_class_call);
+%}
+
+// Call Runtime Instruction
+
+instruct CallRuntimeDirect(method meth) %{
+  match(CallRuntime);
+  effect(USE meth);
+  ins_cost(CALL_COST);
+
+  // Enc_java_to_runtime_call needs up to 3 constants: call target,
+  // env for callee, C-toc.
+  ins_num_consts(3);
+
+  format %{ "CALL,runtime" %}
+  ins_encode( enc_java_to_runtime_call(meth) );
+  ins_pipe(pipe_class_call);
+%}
+
+// Call Leaf
+
+// Used by postalloc expand of CallLeafDirect_Ex (mtctr).
+instruct CallLeafDirect_mtctr(iRegLdst dst, iRegLsrc src) %{
+  effect(DEF dst, USE src);
+
+  ins_num_consts(1);
+
+  format %{ "MTCTR   $src" %}
+  size(4);
+  ins_encode( enc_leaf_call_mtctr(src) );
+  ins_pipe(pipe_class_default);
+%}
+
+// Used by postalloc expand of CallLeafDirect_Ex (actual call).
+instruct CallLeafDirect(method meth) %{
+  match(CallLeaf);   // To get the data all the data fields we need ...
+  effect(USE meth);
+  predicate(false);  // but never match.
+
+  format %{ "BCTRL     \t// leaf call $meth ==> " %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_bctrl);
+    __ bctrl();
+  %}
+  ins_pipe(pipe_class_call);
+%}
+
+// postalloc expand of CallLeafDirect.
+// Load adress to call from TOC, then bl to it.
+instruct CallLeafDirect_Ex(method meth) %{
+  match(CallLeaf);
+  effect(USE meth);
+  ins_cost(CALL_COST);
+
+  // Postalloc_expand_java_to_runtime_call needs up to 3 constants: call target,
+  // env for callee, C-toc.
+  ins_num_consts(3);
+
+  format %{ "CALL,runtime leaf $meth \t// postalloc expanded" %}
+  postalloc_expand( postalloc_expand_java_to_runtime_call(meth, constanttablebase) );
+%}
+
+// Call runtime without safepoint - same as CallLeaf.
+// postalloc expand of CallLeafNoFPDirect.
+// Load adress to call from TOC, then bl to it.
+instruct CallLeafNoFPDirect_Ex(method meth) %{
+  match(CallLeafNoFP);
+  effect(USE meth);
+  ins_cost(CALL_COST);
+
+  // Enc_java_to_runtime_call needs up to 3 constants: call target,
+  // env for callee, C-toc.
+  ins_num_consts(3);
+
+  format %{ "CALL,runtime leaf nofp $meth \t// postalloc expanded" %}
+  postalloc_expand( postalloc_expand_java_to_runtime_call(meth, constanttablebase) );
+%}
+
+// Tail Call; Jump from runtime stub to Java code.
+// Also known as an 'interprocedural jump'.
+// Target of jump will eventually return to caller.
+// TailJump below removes the return address.
+instruct TailCalljmpInd(iRegPdstNoScratch jump_target, inline_cache_regP method_oop) %{
+  match(TailCall jump_target method_oop);
+  ins_cost(CALL_COST);
+
+  format %{ "MTCTR   $jump_target \t// $method_oop holds method oop\n\t"
+            "BCTR         \t// tail call" %}
+  size(8);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ mtctr($jump_target$$Register);
+    __ bctr();
+  %}
+  ins_pipe(pipe_class_call);
+%}
+
+// Return Instruction
+instruct Ret() %{
+  match(Return);
+  format %{ "BLR      \t// branch to link register" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_blr);
+    // LR is restored in MachEpilogNode. Just do the RET here.
+    __ blr();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Tail Jump; remove the return address; jump to target.
+// TailCall above leaves the return address around.
+// TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
+// ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
+// "restore" before this instruction (in Epilogue), we need to materialize it
+// in %i0.
+instruct tailjmpInd(iRegPdstNoScratch jump_target, rarg1RegP ex_oop) %{
+  match(TailJump jump_target ex_oop);
+  ins_cost(CALL_COST);
+
+  format %{ "LD      R4_ARG2 = LR\n\t"
+            "MTCTR   $jump_target\n\t"
+            "BCTR     \t// TailJump, exception oop: $ex_oop" %}
+  size(12);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    __ ld(R4_ARG2/* issuing pc */, _abi(lr), R1_SP);
+    __ mtctr($jump_target$$Register);
+    __ bctr();
+  %}
+  ins_pipe(pipe_class_call);
+%}
+
+// Create exception oop: created by stack-crawling runtime code.
+// Created exception is now available to this handler, and is setup
+// just prior to jumping to this handler. No code emitted.
+instruct CreateException(rarg1RegP ex_oop) %{
+  match(Set ex_oop (CreateEx));
+  ins_cost(0);
+
+  format %{ " -- \t// exception oop; no code emitted" %}
+  size(0);
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_default);
+%}
+
+// Rethrow exception: The exception oop will come in the first
+// argument position. Then JUMP (not call) to the rethrow stub code.
+instruct RethrowException() %{
+  match(Rethrow);
+  ins_cost(CALL_COST);
+
+  format %{ "Jmp     rethrow_stub" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_compound);
+    cbuf.set_insts_mark();
+    __ b64_patchable((address)OptoRuntime::rethrow_stub(), relocInfo::runtime_call_type);
+  %}
+  ins_pipe(pipe_class_call);
+%}
+
+// Die now.
+instruct ShouldNotReachHere() %{
+  match(Halt);
+  ins_cost(CALL_COST);
+
+  format %{ "ShouldNotReachHere" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_tdi);
+    __ trap_should_not_reach_here();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// This name is KNOWN by the ADLC and cannot be changed.  The ADLC
+// forces a 'TypeRawPtr::BOTTOM' output type for this guy.
+// Get a DEF on threadRegP, no costs, no encoding, use
+// 'ins_should_rematerialize(true)' to avoid spilling.
+instruct tlsLoadP(threadRegP dst) %{
+  match(Set dst (ThreadLocal));
+  ins_cost(0);
+
+  ins_should_rematerialize(true);
+
+  format %{ " -- \t// $dst=Thread::current(), empty" %}
+  size(0);
+  ins_encode( /*empty*/ );
+  ins_pipe(pipe_class_empty);
+%}
+
+//---Some PPC specific nodes---------------------------------------------------
+
+// Stop a group.
+instruct endGroup() %{
+  ins_cost(0);
+
+  ins_is_nop(true);
+
+  format %{ "End Bundle (ori r1, r1, 0)" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_endgroup);
+    __ endgroup();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+// Nop instructions
+
+instruct fxNop() %{
+  ins_cost(0);
+
+  ins_is_nop(true);
+
+  format %{ "fxNop" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fmr);
+    __ nop();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct fpNop0() %{
+  ins_cost(0);
+
+  ins_is_nop(true);
+
+  format %{ "fpNop0" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fmr);
+    __ fpnop0();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct fpNop1() %{
+  ins_cost(0);
+
+  ins_is_nop(true);
+
+  format %{ "fpNop1" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_fmr);
+    __ fpnop1();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct brNop0() %{
+  ins_cost(0);
+  size(4);
+  format %{ "brNop0" %}
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_mcrf);
+    __ brnop0();
+  %}
+  ins_is_nop(true);
+  ins_pipe(pipe_class_default);
+%}
+
+instruct brNop1() %{
+  ins_cost(0);
+
+  ins_is_nop(true);
+
+  format %{ "brNop1" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_mcrf);
+    __ brnop1();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+instruct brNop2() %{
+  ins_cost(0);
+
+  ins_is_nop(true);
+
+  format %{ "brNop2" %}
+  size(4);
+  ins_encode %{
+    // TODO: PPC port $archOpcode(ppc64Opcode_mcrf);
+    __ brnop2();
+  %}
+  ins_pipe(pipe_class_default);
+%}
+
+//----------PEEPHOLE RULES-----------------------------------------------------
+// These must follow all instruction definitions as they use the names
+// defined in the instructions definitions.
+//
+// peepmatch ( root_instr_name [preceeding_instruction]* );
+//
+// peepconstraint %{
+// (instruction_number.operand_name relational_op instruction_number.operand_name
+//  [, ...] );
+// // instruction numbers are zero-based using left to right order in peepmatch
+//
+// peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
+// // provide an instruction_number.operand_name for each operand that appears
+// // in the replacement instruction's match rule
+//
+// ---------VM FLAGS---------------------------------------------------------
+//
+// All peephole optimizations can be turned off using -XX:-OptoPeephole
+//
+// Each peephole rule is given an identifying number starting with zero and
+// increasing by one in the order seen by the parser. An individual peephole
+// can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
+// on the command-line.
+//
+// ---------CURRENT LIMITATIONS----------------------------------------------
+//
+// Only match adjacent instructions in same basic block
+// Only equality constraints
+// Only constraints between operands, not (0.dest_reg == EAX_enc)
+// Only one replacement instruction
+//
+// ---------EXAMPLE----------------------------------------------------------
+//
+// // pertinent parts of existing instructions in architecture description
+// instruct movI(eRegI dst, eRegI src) %{
+//   match(Set dst (CopyI src));
+// %}
+//
+// instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
+//   match(Set dst (AddI dst src));
+//   effect(KILL cr);
+// %}
+//
+// // Change (inc mov) to lea
+// peephole %{
+//   // increment preceeded by register-register move
+//   peepmatch ( incI_eReg movI );
+//   // require that the destination register of the increment
+//   // match the destination register of the move
+//   peepconstraint ( 0.dst == 1.dst );
+//   // construct a replacement instruction that sets
+//   // the destination to ( move's source register + one )
+//   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
+// %}
+//
+// Implementation no longer uses movX instructions since
+// machine-independent system no longer uses CopyX nodes.
+//
+// peephole %{
+//   peepmatch ( incI_eReg movI );
+//   peepconstraint ( 0.dst == 1.dst );
+//   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
+// %}
+//
+// peephole %{
+//   peepmatch ( decI_eReg movI );
+//   peepconstraint ( 0.dst == 1.dst );
+//   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
+// %}
+//
+// peephole %{
+//   peepmatch ( addI_eReg_imm movI );
+//   peepconstraint ( 0.dst == 1.dst );
+//   peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
+// %}
+//
+// peephole %{
+//   peepmatch ( addP_eReg_imm movP );
+//   peepconstraint ( 0.dst == 1.dst );
+//   peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
+// %}
+
+// // Change load of spilled value to only a spill
+// instruct storeI(memory mem, eRegI src) %{
+//   match(Set mem (StoreI mem src));
+// %}
+//
+// instruct loadI(eRegI dst, memory mem) %{
+//   match(Set dst (LoadI mem));
+// %}
+//
+peephole %{
+  peepmatch ( loadI storeI );
+  peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
+  peepreplace ( storeI( 1.mem 1.mem 1.src ) );
+%}
+
+peephole %{
+  peepmatch ( loadL storeL );
+  peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
+  peepreplace ( storeL( 1.mem 1.mem 1.src ) );
+%}
+
+peephole %{
+  peepmatch ( loadP storeP );
+  peepconstraint ( 1.src == 0.dst, 1.dst == 0.mem );
+  peepreplace ( storeP( 1.dst 1.dst 1.src ) );
+%}
+
+//----------SMARTSPILL RULES---------------------------------------------------
+// These must follow all instruction definitions as they use the names
+// defined in the instructions definitions.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/ppc_64.ad	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,24 @@
+//
+// Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+// Copyright 2012, 2013 SAP AG. All rights reserved.
+// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+//
+// This code is free software; you can redistribute it and/or modify it
+// under the terms of the GNU General Public License version 2 only, as
+// published by the Free Software Foundation.
+//
+// This code is distributed in the hope that it will be useful, but WITHOUT
+// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+// version 2 for more details (a copy is included in the LICENSE file that
+// accompanied this code).
+//
+// You should have received a copy of the GNU General Public License version
+// 2 along with this work; if not, write to the Free Software Foundation,
+// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+//
+// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+// or visit www.oracle.com if you need additional information or have any
+// questions.
+//
+//
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/registerMap_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_REGISTERMAP_PPC_HPP
+#define CPU_PPC_VM_REGISTERMAP_PPC_HPP
+
+// machine-dependent implemention for register maps
+  friend class frame;
+
+ private:
+  // This is the hook for finding a register in an "well-known" location,
+  // such as a register block of a predetermined format.
+  // Since there is none, we just return NULL.
+  // See registerMap_sparc.hpp for an example of grabbing registers
+  // from register save areas of a standard layout.
+  address pd_location(VMReg reg) const { return NULL; }
+
+  // no PD state to clear or copy:
+  void pd_clear() {}
+  void pd_initialize() {}
+  void pd_initialize_from(const RegisterMap* map) {}
+
+#endif // CPU_PPC_VM_REGISTERMAP_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/register_definitions_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// make sure the defines don't screw up the declarations later on in this file
+#define DONT_USE_REGISTER_DEFINES
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "asm/register.hpp"
+#include "register_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "interp_masm_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "interp_masm_ppc_64.hpp"
+#endif
+
+REGISTER_DEFINITION(Register, noreg);
+
+REGISTER_DEFINITION(FloatRegister, fnoreg);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/register_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "register_ppc.hpp"
+
+const int ConcreteRegisterImpl::max_gpr = RegisterImpl::number_of_registers * 2;
+const int ConcreteRegisterImpl::max_fpr = ConcreteRegisterImpl::max_gpr +
+                                          FloatRegisterImpl::number_of_registers * 2;
+const int ConcreteRegisterImpl::max_cnd = ConcreteRegisterImpl::max_fpr +
+                                          ConditionRegisterImpl::number_of_registers;
+
+const char* RegisterImpl::name() const {
+  const char* names[number_of_registers] = {
+    "R0",  "R1",  "R2",  "R3",  "R4",  "R5",  "R6",  "R7",
+    "R8",  "R9",  "R10", "R11", "R12", "R13", "R14", "R15",
+    "R16", "R17", "R18", "R19", "R20", "R21", "R22", "R23",
+    "R24", "R25", "R26", "R27", "R28", "R29", "R30", "R31"
+  };
+  return is_valid() ? names[encoding()] : "noreg";
+}
+
+const char* ConditionRegisterImpl::name() const {
+  const char* names[number_of_registers] = {
+    "CR0",  "CR1",  "CR2",  "CR3",  "CR4",  "CR5",  "CR6",  "CR7"
+  };
+  return is_valid() ? names[encoding()] : "cnoreg";
+}
+
+const char* FloatRegisterImpl::name() const {
+  const char* names[number_of_registers] = {
+    "F0",  "F1",  "F2",  "F3",  "F4",  "F5",  "F6",  "F7",
+    "F8",  "F9",  "F10", "F11", "F12", "F13", "F14", "F15",
+    "F16", "F17", "F18", "F19", "F20", "F21", "F22", "F23",
+    "F24", "F25", "F26", "F27", "F28", "F29", "F30", "F31"
+  };
+  return is_valid() ? names[encoding()] : "fnoreg";
+}
+
+const char* SpecialRegisterImpl::name() const {
+  const char* names[number_of_registers] = {
+    "SR_XER", "SR_LR", "SR_CTR", "SR_VRSAVE", "SR_SPEFSCR", "SR_PPR"
+  };
+  return is_valid() ? names[encoding()] : "snoreg";
+}
+
+const char* VectorRegisterImpl::name() const {
+  const char* names[number_of_registers] = {
+    "VR0",  "VR1",  "VR2",  "VR3",  "VR4",  "VR5",  "VR6",  "VR7",
+    "VR8",  "VR9",  "VR10", "VR11", "VR12", "VR13", "VR14", "VR15",
+    "VR16", "VR17", "VR18", "VR19", "VR20", "VR21", "VR22", "VR23",
+    "VR24", "VR25", "VR26", "VR27", "VR28", "VR29", "VR30", "VR31"
+  };
+  return is_valid() ? names[encoding()] : "vnoreg";
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/register_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,633 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_REGISTER_PPC_HPP
+#define CPU_PPC_VM_REGISTER_PPC_HPP
+
+#include "asm/register.hpp"
+#include "vm_version_ppc.hpp"
+
+// forward declaration
+class Address;
+class VMRegImpl;
+typedef VMRegImpl* VMReg;
+
+//  PPC64 registers
+//
+//  See "64-bit PowerPC ELF ABI Supplement 1.7", IBM Corp. (2003-10-29).
+//  (http://math-atlas.sourceforge.net/devel/assembly/PPC-elf64abi-1.7.pdf)
+//
+//  r0        Register used in function prologs (volatile)
+//  r1        Stack pointer (nonvolatile)
+//  r2        TOC pointer (volatile)
+//  r3        Parameter and return value (volatile)
+//  r4-r10    Function parameters (volatile)
+//  r11       Register used in calls by pointer and as an environment pointer for languages which require one (volatile)
+//  r12       Register used for exception handling and glink code (volatile)
+//  r13       Reserved for use as system thread ID
+//  r14-r31   Local variables (nonvolatile)
+//
+//  f0        Scratch register (volatile)
+//  f1-f4     Floating point parameters and return value (volatile)
+//  f5-f13    Floating point parameters (volatile)
+//  f14-f31   Floating point values (nonvolatile)
+//
+//  LR        Link register for return address (volatile)
+//  CTR       Loop counter (volatile)
+//  XER       Fixed point exception register (volatile)
+//  FPSCR     Floating point status and control register (volatile)
+//
+//  CR0-CR1   Condition code fields (volatile)
+//  CR2-CR4   Condition code fields (nonvolatile)
+//  CR5-CR7   Condition code fields (volatile)
+//
+//  ----------------------------------------------
+//  On processors with the VMX feature:
+//  v0-v1     Volatile scratch registers
+//  v2-v13    Volatile vector parameters registers
+//  v14-v19   Volatile scratch registers
+//  v20-v31   Non-volatile registers
+//  vrsave    Non-volatile 32-bit register
+
+
+// Use Register as shortcut
+class RegisterImpl;
+typedef RegisterImpl* Register;
+
+inline Register as_Register(int encoding) {
+  assert(encoding >= 0 && encoding < 32, "bad register encoding");
+  return (Register)(intptr_t)encoding;
+}
+
+// The implementation of integer registers for the Power architecture
+class RegisterImpl: public AbstractRegisterImpl {
+ public:
+  enum {
+    number_of_registers = 32
+  };
+
+  // general construction
+  inline friend Register as_Register(int encoding);
+
+  // accessors
+  int      encoding()  const { assert(is_valid(), "invalid register"); return value(); }
+  VMReg    as_VMReg();
+  Register successor() const { return as_Register(encoding() + 1); }
+
+  // testers
+  bool is_valid()       const { return ( 0 <= (value()&0x7F) && (value()&0x7F) <  number_of_registers); }
+  bool is_volatile()    const { return ( 0 <= (value()&0x7F) && (value()&0x7F) <= 13 ); }
+  bool is_nonvolatile() const { return (14 <= (value()&0x7F) && (value()&0x7F) <= 31 ); }
+
+  const char* name() const;
+};
+
+// The integer registers of the PPC architecture
+CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
+
+CONSTANT_REGISTER_DECLARATION(Register, R0,   (0));
+CONSTANT_REGISTER_DECLARATION(Register, R1,   (1));
+CONSTANT_REGISTER_DECLARATION(Register, R2,   (2));
+CONSTANT_REGISTER_DECLARATION(Register, R3,   (3));
+CONSTANT_REGISTER_DECLARATION(Register, R4,   (4));
+CONSTANT_REGISTER_DECLARATION(Register, R5,   (5));
+CONSTANT_REGISTER_DECLARATION(Register, R6,   (6));
+CONSTANT_REGISTER_DECLARATION(Register, R7,   (7));
+CONSTANT_REGISTER_DECLARATION(Register, R8,   (8));
+CONSTANT_REGISTER_DECLARATION(Register, R9,   (9));
+CONSTANT_REGISTER_DECLARATION(Register, R10, (10));
+CONSTANT_REGISTER_DECLARATION(Register, R11, (11));
+CONSTANT_REGISTER_DECLARATION(Register, R12, (12));
+CONSTANT_REGISTER_DECLARATION(Register, R13, (13));
+CONSTANT_REGISTER_DECLARATION(Register, R14, (14));
+CONSTANT_REGISTER_DECLARATION(Register, R15, (15));
+CONSTANT_REGISTER_DECLARATION(Register, R16, (16));
+CONSTANT_REGISTER_DECLARATION(Register, R17, (17));
+CONSTANT_REGISTER_DECLARATION(Register, R18, (18));
+CONSTANT_REGISTER_DECLARATION(Register, R19, (19));
+CONSTANT_REGISTER_DECLARATION(Register, R20, (20));
+CONSTANT_REGISTER_DECLARATION(Register, R21, (21));
+CONSTANT_REGISTER_DECLARATION(Register, R22, (22));
+CONSTANT_REGISTER_DECLARATION(Register, R23, (23));
+CONSTANT_REGISTER_DECLARATION(Register, R24, (24));
+CONSTANT_REGISTER_DECLARATION(Register, R25, (25));
+CONSTANT_REGISTER_DECLARATION(Register, R26, (26));
+CONSTANT_REGISTER_DECLARATION(Register, R27, (27));
+CONSTANT_REGISTER_DECLARATION(Register, R28, (28));
+CONSTANT_REGISTER_DECLARATION(Register, R29, (29));
+CONSTANT_REGISTER_DECLARATION(Register, R30, (30));
+CONSTANT_REGISTER_DECLARATION(Register, R31, (31));
+
+
+//
+// Because Power has many registers, #define'ing values for them is
+// beneficial in code size and is worth the cost of some of the
+// dangers of defines. If a particular file has a problem with these
+// defines then it's possible to turn them off in that file by
+// defining DONT_USE_REGISTER_DEFINES. Register_definition_ppc.cpp
+// does that so that it's able to provide real definitions of these
+// registers for use in debuggers and such.
+//
+
+#ifndef DONT_USE_REGISTER_DEFINES
+#define noreg ((Register)(noreg_RegisterEnumValue))
+
+#define R0 ((Register)(R0_RegisterEnumValue))
+#define R1 ((Register)(R1_RegisterEnumValue))
+#define R2 ((Register)(R2_RegisterEnumValue))
+#define R3 ((Register)(R3_RegisterEnumValue))
+#define R4 ((Register)(R4_RegisterEnumValue))
+#define R5 ((Register)(R5_RegisterEnumValue))
+#define R6 ((Register)(R6_RegisterEnumValue))
+#define R7 ((Register)(R7_RegisterEnumValue))
+#define R8 ((Register)(R8_RegisterEnumValue))
+#define R9 ((Register)(R9_RegisterEnumValue))
+#define R10 ((Register)(R10_RegisterEnumValue))
+#define R11 ((Register)(R11_RegisterEnumValue))
+#define R12 ((Register)(R12_RegisterEnumValue))
+#define R13 ((Register)(R13_RegisterEnumValue))
+#define R14 ((Register)(R14_RegisterEnumValue))
+#define R15 ((Register)(R15_RegisterEnumValue))
+#define R16 ((Register)(R16_RegisterEnumValue))
+#define R17 ((Register)(R17_RegisterEnumValue))
+#define R18 ((Register)(R18_RegisterEnumValue))
+#define R19 ((Register)(R19_RegisterEnumValue))
+#define R20 ((Register)(R20_RegisterEnumValue))
+#define R21 ((Register)(R21_RegisterEnumValue))
+#define R22 ((Register)(R22_RegisterEnumValue))
+#define R23 ((Register)(R23_RegisterEnumValue))
+#define R24 ((Register)(R24_RegisterEnumValue))
+#define R25 ((Register)(R25_RegisterEnumValue))
+#define R26 ((Register)(R26_RegisterEnumValue))
+#define R27 ((Register)(R27_RegisterEnumValue))
+#define R28 ((Register)(R28_RegisterEnumValue))
+#define R29 ((Register)(R29_RegisterEnumValue))
+#define R30 ((Register)(R30_RegisterEnumValue))
+#define R31 ((Register)(R31_RegisterEnumValue))
+#endif
+
+// Use ConditionRegister as shortcut
+class ConditionRegisterImpl;
+typedef ConditionRegisterImpl* ConditionRegister;
+
+inline ConditionRegister as_ConditionRegister(int encoding) {
+  assert(encoding >= 0 && encoding < 8, "bad condition register encoding");
+  return (ConditionRegister)(intptr_t)encoding;
+}
+
+// The implementation of condition register(s) for the PPC architecture
+class ConditionRegisterImpl: public AbstractRegisterImpl {
+ public:
+  enum {
+    number_of_registers = 8
+  };
+
+  // construction.
+  inline friend ConditionRegister as_ConditionRegister(int encoding);
+
+  // accessors
+  int   encoding() const { assert(is_valid(), "invalid register"); return value(); }
+  VMReg as_VMReg();
+
+  // testers
+  bool is_valid()       const { return  (0 <= value()        &&  value() < number_of_registers); }
+  bool is_nonvolatile() const { return  (2 <= (value()&0x7F) && (value()&0x7F) <= 4 );  }
+
+  const char* name() const;
+};
+
+// The (parts of the) condition register(s) of the PPC architecture
+// sys/ioctl.h on AIX defines CR0-CR3, so I name these CCR.
+CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR0,   (0));
+CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR1,   (1));
+CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR2,   (2));
+CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR3,   (3));
+CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR4,   (4));
+CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR5,   (5));
+CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR6,   (6));
+CONSTANT_REGISTER_DECLARATION(ConditionRegister, CCR7,   (7));
+
+#ifndef DONT_USE_REGISTER_DEFINES
+
+#define CCR0 ((ConditionRegister)(CCR0_ConditionRegisterEnumValue))
+#define CCR1 ((ConditionRegister)(CCR1_ConditionRegisterEnumValue))
+#define CCR2 ((ConditionRegister)(CCR2_ConditionRegisterEnumValue))
+#define CCR3 ((ConditionRegister)(CCR3_ConditionRegisterEnumValue))
+#define CCR4 ((ConditionRegister)(CCR4_ConditionRegisterEnumValue))
+#define CCR5 ((ConditionRegister)(CCR5_ConditionRegisterEnumValue))
+#define CCR6 ((ConditionRegister)(CCR6_ConditionRegisterEnumValue))
+#define CCR7 ((ConditionRegister)(CCR7_ConditionRegisterEnumValue))
+
+#endif // DONT_USE_REGISTER_DEFINES
+
+
+// Use FloatRegister as shortcut
+class FloatRegisterImpl;
+typedef FloatRegisterImpl* FloatRegister;
+
+inline FloatRegister as_FloatRegister(int encoding) {
+  assert(encoding >= 0 && encoding < 32, "bad float register encoding");
+  return (FloatRegister)(intptr_t)encoding;
+}
+
+// The implementation of float registers for the PPC architecture
+class FloatRegisterImpl: public AbstractRegisterImpl {
+ public:
+  enum {
+    number_of_registers = 32
+  };
+
+  // construction
+  inline friend FloatRegister as_FloatRegister(int encoding);
+
+  // accessors
+  int           encoding() const { assert(is_valid(), "invalid register"); return value(); }
+  VMReg         as_VMReg();
+  FloatRegister successor() const { return as_FloatRegister(encoding() + 1); }
+
+  // testers
+  bool is_valid()       const { return (0  <=  value()       &&  value()       < number_of_registers); }
+
+  const char* name() const;
+};
+
+// The float registers of the PPC architecture
+CONSTANT_REGISTER_DECLARATION(FloatRegister, fnoreg, (-1));
+
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F0,  ( 0));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F1,  ( 1));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F2,  ( 2));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F3,  ( 3));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F4,  ( 4));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F5,  ( 5));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F6,  ( 6));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F7,  ( 7));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F8,  ( 8));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F9,  ( 9));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F10, (10));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F11, (11));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F12, (12));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F13, (13));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F14, (14));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F15, (15));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F16, (16));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F17, (17));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F18, (18));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F19, (19));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F20, (20));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F21, (21));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F22, (22));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F23, (23));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F24, (24));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F25, (25));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F26, (26));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F27, (27));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F28, (28));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F29, (29));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F30, (30));
+CONSTANT_REGISTER_DECLARATION(FloatRegister, F31, (31));
+
+#ifndef DONT_USE_REGISTER_DEFINES
+#define fnoreg ((FloatRegister)(fnoreg_FloatRegisterEnumValue))
+#define F0     ((FloatRegister)(    F0_FloatRegisterEnumValue))
+#define F1     ((FloatRegister)(    F1_FloatRegisterEnumValue))
+#define F2     ((FloatRegister)(    F2_FloatRegisterEnumValue))
+#define F3     ((FloatRegister)(    F3_FloatRegisterEnumValue))
+#define F4     ((FloatRegister)(    F4_FloatRegisterEnumValue))
+#define F5     ((FloatRegister)(    F5_FloatRegisterEnumValue))
+#define F6     ((FloatRegister)(    F6_FloatRegisterEnumValue))
+#define F7     ((FloatRegister)(    F7_FloatRegisterEnumValue))
+#define F8     ((FloatRegister)(    F8_FloatRegisterEnumValue))
+#define F9     ((FloatRegister)(    F9_FloatRegisterEnumValue))
+#define F10    ((FloatRegister)(   F10_FloatRegisterEnumValue))
+#define F11    ((FloatRegister)(   F11_FloatRegisterEnumValue))
+#define F12    ((FloatRegister)(   F12_FloatRegisterEnumValue))
+#define F13    ((FloatRegister)(   F13_FloatRegisterEnumValue))
+#define F14    ((FloatRegister)(   F14_FloatRegisterEnumValue))
+#define F15    ((FloatRegister)(   F15_FloatRegisterEnumValue))
+#define F16    ((FloatRegister)(   F16_FloatRegisterEnumValue))
+#define F17    ((FloatRegister)(   F17_FloatRegisterEnumValue))
+#define F18    ((FloatRegister)(   F18_FloatRegisterEnumValue))
+#define F19    ((FloatRegister)(   F19_FloatRegisterEnumValue))
+#define F20    ((FloatRegister)(   F20_FloatRegisterEnumValue))
+#define F21    ((FloatRegister)(   F21_FloatRegisterEnumValue))
+#define F22    ((FloatRegister)(   F22_FloatRegisterEnumValue))
+#define F23    ((FloatRegister)(   F23_FloatRegisterEnumValue))
+#define F24    ((FloatRegister)(   F24_FloatRegisterEnumValue))
+#define F25    ((FloatRegister)(   F25_FloatRegisterEnumValue))
+#define F26    ((FloatRegister)(   F26_FloatRegisterEnumValue))
+#define F27    ((FloatRegister)(   F27_FloatRegisterEnumValue))
+#define F28    ((FloatRegister)(   F28_FloatRegisterEnumValue))
+#define F29    ((FloatRegister)(   F29_FloatRegisterEnumValue))
+#define F30    ((FloatRegister)(   F30_FloatRegisterEnumValue))
+#define F31    ((FloatRegister)(   F31_FloatRegisterEnumValue))
+#endif // DONT_USE_REGISTER_DEFINES
+
+// Use SpecialRegister as shortcut
+class SpecialRegisterImpl;
+typedef SpecialRegisterImpl* SpecialRegister;
+
+inline SpecialRegister as_SpecialRegister(int encoding) {
+  return (SpecialRegister)(intptr_t)encoding;
+}
+
+// The implementation of special registers for the Power architecture (LR, CTR and friends)
+class SpecialRegisterImpl: public AbstractRegisterImpl {
+ public:
+  enum {
+    number_of_registers = 6
+  };
+
+  // construction
+  inline friend SpecialRegister as_SpecialRegister(int encoding);
+
+  // accessors
+  int             encoding()  const { assert(is_valid(), "invalid register"); return value(); }
+  VMReg           as_VMReg();
+
+  // testers
+  bool is_valid()       const { return 0 <= value() && value() < number_of_registers; }
+
+  const char* name() const;
+};
+
+// The special registers of the PPC architecture
+CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_XER,     (0));
+CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_LR,      (1));
+CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_CTR,     (2));
+CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_VRSAVE,  (3));
+CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_SPEFSCR, (4));
+CONSTANT_REGISTER_DECLARATION(SpecialRegister, SR_PPR,     (5));
+
+#ifndef DONT_USE_REGISTER_DEFINES
+#define SR_XER     ((SpecialRegister)(SR_XER_SpecialRegisterEnumValue))
+#define SR_LR      ((SpecialRegister)(SR_LR_SpecialRegisterEnumValue))
+#define SR_CTR     ((SpecialRegister)(SR_CTR_SpecialRegisterEnumValue))
+#define SR_VRSAVE  ((SpecialRegister)(SR_VRSAVE_SpecialRegisterEnumValue))
+#define SR_SPEFSCR ((SpecialRegister)(SR_SPEFSCR_SpecialRegisterEnumValue))
+#define SR_PPR     ((SpecialRegister)(SR_PPR_SpecialRegisterEnumValue))
+#endif // DONT_USE_REGISTER_DEFINES
+
+
+// Use VectorRegister as shortcut
+class VectorRegisterImpl;
+typedef VectorRegisterImpl* VectorRegister;
+
+inline VectorRegister as_VectorRegister(int encoding) {
+  return (VectorRegister)(intptr_t)encoding;
+}
+
+// The implementation of vector registers for the Power architecture
+class VectorRegisterImpl: public AbstractRegisterImpl {
+ public:
+  enum {
+    number_of_registers = 32
+  };
+
+  // construction
+  inline friend VectorRegister as_VectorRegister(int encoding);
+
+  // accessors
+  int            encoding()  const { assert(is_valid(), "invalid register"); return value(); }
+
+  // testers
+  bool is_valid()       const { return   0 <=  value()       &&  value() < number_of_registers; }
+
+  const char* name() const;
+};
+
+// The Vector registers of the Power architecture
+
+CONSTANT_REGISTER_DECLARATION(VectorRegister, vnoreg, (-1));
+
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR0,  ( 0));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR1,  ( 1));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR2,  ( 2));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR3,  ( 3));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR4,  ( 4));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR5,  ( 5));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR6,  ( 6));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR7,  ( 7));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR8,  ( 8));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR9,  ( 9));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR10, (10));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR11, (11));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR12, (12));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR13, (13));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR14, (14));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR15, (15));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR16, (16));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR17, (17));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR18, (18));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR19, (19));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR20, (20));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR21, (21));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR22, (22));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR23, (23));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR24, (24));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR25, (25));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR26, (26));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR27, (27));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR28, (28));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR29, (29));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR30, (30));
+CONSTANT_REGISTER_DECLARATION(VectorRegister, VR31, (31));
+
+#ifndef DONT_USE_REGISTER_DEFINES
+#define vnoreg ((VectorRegister)(vnoreg_VectorRegisterEnumValue))
+#define VR0    ((VectorRegister)(   VR0_VectorRegisterEnumValue))
+#define VR1    ((VectorRegister)(   VR1_VectorRegisterEnumValue))
+#define VR2    ((VectorRegister)(   VR2_VectorRegisterEnumValue))
+#define VR3    ((VectorRegister)(   VR3_VectorRegisterEnumValue))
+#define VR4    ((VectorRegister)(   VR4_VectorRegisterEnumValue))
+#define VR5    ((VectorRegister)(   VR5_VectorRegisterEnumValue))
+#define VR6    ((VectorRegister)(   VR6_VectorRegisterEnumValue))
+#define VR7    ((VectorRegister)(   VR7_VectorRegisterEnumValue))
+#define VR8    ((VectorRegister)(   VR8_VectorRegisterEnumValue))
+#define VR9    ((VectorRegister)(   VR9_VectorRegisterEnumValue))
+#define VR10   ((VectorRegister)(  VR10_VectorRegisterEnumValue))
+#define VR11   ((VectorRegister)(  VR11_VectorRegisterEnumValue))
+#define VR12   ((VectorRegister)(  VR12_VectorRegisterEnumValue))
+#define VR13   ((VectorRegister)(  VR13_VectorRegisterEnumValue))
+#define VR14   ((VectorRegister)(  VR14_VectorRegisterEnumValue))
+#define VR15   ((VectorRegister)(  VR15_VectorRegisterEnumValue))
+#define VR16   ((VectorRegister)(  VR16_VectorRegisterEnumValue))
+#define VR17   ((VectorRegister)(  VR17_VectorRegisterEnumValue))
+#define VR18   ((VectorRegister)(  VR18_VectorRegisterEnumValue))
+#define VR19   ((VectorRegister)(  VR19_VectorRegisterEnumValue))
+#define VR20   ((VectorRegister)(  VR20_VectorRegisterEnumValue))
+#define VR21   ((VectorRegister)(  VR21_VectorRegisterEnumValue))
+#define VR22   ((VectorRegister)(  VR22_VectorRegisterEnumValue))
+#define VR23   ((VectorRegister)(  VR23_VectorRegisterEnumValue))
+#define VR24   ((VectorRegister)(  VR24_VectorRegisterEnumValue))
+#define VR25   ((VectorRegister)(  VR25_VectorRegisterEnumValue))
+#define VR26   ((VectorRegister)(  VR26_VectorRegisterEnumValue))
+#define VR27   ((VectorRegister)(  VR27_VectorRegisterEnumValue))
+#define VR28   ((VectorRegister)(  VR28_VectorRegisterEnumValue))
+#define VR29   ((VectorRegister)(  VR29_VectorRegisterEnumValue))
+#define VR30   ((VectorRegister)(  VR30_VectorRegisterEnumValue))
+#define VR31   ((VectorRegister)(  VR31_VectorRegisterEnumValue))
+#endif // DONT_USE_REGISTER_DEFINES
+
+
+// Maximum number of incoming arguments that can be passed in i registers.
+const int PPC_ARGS_IN_REGS_NUM = 8;
+
+
+// Need to know the total number of registers of all sorts for SharedInfo.
+// Define a class that exports it.
+class ConcreteRegisterImpl : public AbstractRegisterImpl {
+ public:
+  enum {
+    // This number must be large enough to cover REG_COUNT (defined by c2) registers.
+    // There is no requirement that any ordering here matches any ordering c2 gives
+    // it's optoregs.
+    number_of_registers =
+      ( RegisterImpl::number_of_registers +
+        FloatRegisterImpl::number_of_registers )
+      * 2                                          // register halves
+      + ConditionRegisterImpl::number_of_registers // condition code registers
+      + SpecialRegisterImpl::number_of_registers   // special registers
+      + VectorRegisterImpl::number_of_registers    // vector registers
+  };
+
+  static const int max_gpr;
+  static const int max_fpr;
+  static const int max_cnd;
+};
+
+// Common register declarations used in assembler code.
+REGISTER_DECLARATION(Register,      R0_SCRATCH, R0);  // volatile
+REGISTER_DECLARATION(Register,      R1_SP,      R1);  // non-volatile
+REGISTER_DECLARATION(Register,      R2_TOC,     R2);  // volatile
+REGISTER_DECLARATION(Register,      R3_RET,     R3);  // volatile
+REGISTER_DECLARATION(Register,      R3_ARG1,    R3);  // volatile
+REGISTER_DECLARATION(Register,      R4_ARG2,    R4);  // volatile
+REGISTER_DECLARATION(Register,      R5_ARG3,    R5);  // volatile
+REGISTER_DECLARATION(Register,      R6_ARG4,    R6);  // volatile
+REGISTER_DECLARATION(Register,      R7_ARG5,    R7);  // volatile
+REGISTER_DECLARATION(Register,      R8_ARG6,    R8);  // volatile
+REGISTER_DECLARATION(Register,      R9_ARG7,    R9);  // volatile
+REGISTER_DECLARATION(Register,      R10_ARG8,   R10); // volatile
+REGISTER_DECLARATION(FloatRegister, F0_SCRATCH, F0);  // volatile
+REGISTER_DECLARATION(FloatRegister, F1_RET,     F1);  // volatile
+REGISTER_DECLARATION(FloatRegister, F1_ARG1,    F1);  // volatile
+REGISTER_DECLARATION(FloatRegister, F2_ARG2,    F2);  // volatile
+REGISTER_DECLARATION(FloatRegister, F3_ARG3,    F3);  // volatile
+REGISTER_DECLARATION(FloatRegister, F4_ARG4,    F4);  // volatile
+REGISTER_DECLARATION(FloatRegister, F5_ARG5,    F5);  // volatile
+REGISTER_DECLARATION(FloatRegister, F6_ARG6,    F6);  // volatile
+REGISTER_DECLARATION(FloatRegister, F7_ARG7,    F7);  // volatile
+REGISTER_DECLARATION(FloatRegister, F8_ARG8,    F8);  // volatile
+REGISTER_DECLARATION(FloatRegister, F9_ARG9,    F9);  // volatile
+REGISTER_DECLARATION(FloatRegister, F10_ARG10,  F10); // volatile
+REGISTER_DECLARATION(FloatRegister, F11_ARG11,  F11); // volatile
+REGISTER_DECLARATION(FloatRegister, F12_ARG12,  F12); // volatile
+REGISTER_DECLARATION(FloatRegister, F13_ARG13,  F13); // volatile
+
+#ifndef DONT_USE_REGISTER_DEFINES
+#define R0_SCRATCH         AS_REGISTER(Register, R0)
+#define R1_SP              AS_REGISTER(Register, R1)
+#define R2_TOC             AS_REGISTER(Register, R2)
+#define R3_RET             AS_REGISTER(Register, R3)
+#define R3_ARG1            AS_REGISTER(Register, R3)
+#define R4_ARG2            AS_REGISTER(Register, R4)
+#define R5_ARG3            AS_REGISTER(Register, R5)
+#define R6_ARG4            AS_REGISTER(Register, R6)
+#define R7_ARG5            AS_REGISTER(Register, R7)
+#define R8_ARG6            AS_REGISTER(Register, R8)
+#define R9_ARG7            AS_REGISTER(Register, R9)
+#define R10_ARG8           AS_REGISTER(Register, R10)
+#define F0_SCRATCH         AS_REGISTER(FloatRegister, F0)
+#define F1_RET             AS_REGISTER(FloatRegister, F1)
+#define F1_ARG1            AS_REGISTER(FloatRegister, F1)
+#define F2_ARG2            AS_REGISTER(FloatRegister, F2)
+#define F3_ARG3            AS_REGISTER(FloatRegister, F3)
+#define F4_ARG4            AS_REGISTER(FloatRegister, F4)
+#define F5_ARG5            AS_REGISTER(FloatRegister, F5)
+#define F6_ARG6            AS_REGISTER(FloatRegister, F6)
+#define F7_ARG7            AS_REGISTER(FloatRegister, F7)
+#define F8_ARG8            AS_REGISTER(FloatRegister, F8)
+#define F9_ARG9            AS_REGISTER(FloatRegister, F9)
+#define F10_ARG10          AS_REGISTER(FloatRegister, F10)
+#define F11_ARG11          AS_REGISTER(FloatRegister, F11)
+#define F12_ARG12          AS_REGISTER(FloatRegister, F12)
+#define F13_ARG13          AS_REGISTER(FloatRegister, F13)
+#endif
+
+// Register declarations to be used in frame manager assembly code.
+// Use only non-volatile registers in order to keep values across C-calls.
+REGISTER_DECLARATION(Register, R14_state,      R14);      // address of new cInterpreter.
+REGISTER_DECLARATION(Register, R15_prev_state, R15);      // address of old cInterpreter
+REGISTER_DECLARATION(Register, R16_thread,     R16);      // address of current thread
+REGISTER_DECLARATION(Register, R17_tos,        R17);      // address of Java tos (prepushed).
+REGISTER_DECLARATION(Register, R18_locals,     R18);      // address of first param slot (receiver).
+REGISTER_DECLARATION(Register, R19_method,     R19);      // address of current method
+#ifndef DONT_USE_REGISTER_DEFINES
+#define R14_state         AS_REGISTER(Register, R14)
+#define R15_prev_state    AS_REGISTER(Register, R15)
+#define R16_thread        AS_REGISTER(Register, R16)
+#define R17_tos           AS_REGISTER(Register, R17)
+#define R18_locals        AS_REGISTER(Register, R18)
+#define R19_method        AS_REGISTER(Register, R19)
+#define R21_sender_SP     AS_REGISTER(Register, R21)
+#define R23_method_handle AS_REGISTER(Register, R23)
+#endif
+
+// Temporary registers to be used within frame manager. We can use
+// the non-volatiles because the call stub has saved them.
+// Use only non-volatile registers in order to keep values across C-calls.
+REGISTER_DECLARATION(Register, R21_tmp1, R21);
+REGISTER_DECLARATION(Register, R22_tmp2, R22);
+REGISTER_DECLARATION(Register, R23_tmp3, R23);
+REGISTER_DECLARATION(Register, R24_tmp4, R24);
+REGISTER_DECLARATION(Register, R25_tmp5, R25);
+REGISTER_DECLARATION(Register, R26_tmp6, R26);
+REGISTER_DECLARATION(Register, R27_tmp7, R27);
+REGISTER_DECLARATION(Register, R28_tmp8, R28);
+REGISTER_DECLARATION(Register, R29_tmp9, R29);
+#ifndef DONT_USE_REGISTER_DEFINES
+#define R21_tmp1         AS_REGISTER(Register, R21)
+#define R22_tmp2         AS_REGISTER(Register, R22)
+#define R23_tmp3         AS_REGISTER(Register, R23)
+#define R24_tmp4         AS_REGISTER(Register, R24)
+#define R25_tmp5         AS_REGISTER(Register, R25)
+#define R26_tmp6         AS_REGISTER(Register, R26)
+#define R27_tmp7         AS_REGISTER(Register, R27)
+#define R28_tmp8         AS_REGISTER(Register, R28)
+#define R29_tmp9         AS_REGISTER(Register, R29)
+
+#define CCR4_is_synced AS_REGISTER(ConditionRegister, CCR4)
+#endif
+
+// Scratch registers are volatile.
+REGISTER_DECLARATION(Register, R11_scratch1, R11);
+REGISTER_DECLARATION(Register, R12_scratch2, R12);
+#ifndef DONT_USE_REGISTER_DEFINES
+#define R11_scratch1   AS_REGISTER(Register, R11)
+#define R12_scratch2   AS_REGISTER(Register, R12)
+#endif
+
+#endif // CPU_PPC_VM_REGISTER_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/relocInfo_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.inline.hpp"
+#include "assembler_ppc.inline.hpp"
+#include "code/relocInfo.hpp"
+#include "nativeInst_ppc.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/safepoint.hpp"
+
+void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
+  bool copy_back_to_oop_pool = true;  // TODO: PPC port
+  // The following comment is from the declaration of DataRelocation:
+  //
+  //  "The "o" (displacement) argument is relevant only to split relocations
+  //   on RISC machines.  In some CPUs (SPARC), the set-hi and set-lo ins'ns
+  //   can encode more than 32 bits between them.  This allows compilers to
+  //   share set-hi instructions between addresses that differ by a small
+  //   offset (e.g., different static variables in the same class).
+  //   On such machines, the "x" argument to set_value on all set-lo
+  //   instructions must be the same as the "x" argument for the
+  //   corresponding set-hi instructions.  The "o" arguments for the
+  //   set-hi instructions are ignored, and must not affect the high-half
+  //   immediate constant.  The "o" arguments for the set-lo instructions are
+  //   added into the low-half immediate constant, and must not overflow it."
+  //
+  // Currently we don't support splitting of relocations, so o must be
+  // zero:
+  assert(o == 0, "tried to split relocations");
+
+  if (!verify_only) {
+    if (format() != 1) {
+      nativeMovConstReg_at(addr())->set_data_plain(((intptr_t)x), code());
+    } else {
+      assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type,
+             "how to encode else?");
+      narrowOop no = (type() == relocInfo::oop_type) ?
+        oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
+      nativeMovConstReg_at(addr())->set_narrow_oop(no, code());
+    }
+  } else {
+    assert((address) (nativeMovConstReg_at(addr())->data()) == x, "data must match");
+  }
+}
+
+address Relocation::pd_call_destination(address orig_addr) {
+  intptr_t adj = 0;
+  address inst_loc = addr();
+
+  if (orig_addr != NULL) {
+    // We just moved this call instruction from orig_addr to addr().
+    // This means its target will appear to have grown by addr() - orig_addr.
+    adj = -(inst_loc - orig_addr);
+  }
+  if (NativeFarCall::is_far_call_at(inst_loc)) {
+    NativeFarCall* call = nativeFarCall_at(inst_loc);
+    return call->destination() + (intptr_t)(call->is_pcrelative() ? adj : 0);
+  } else if (NativeJump::is_jump_at(inst_loc)) {
+    NativeJump* jump = nativeJump_at(inst_loc);
+    return jump->jump_destination() + (intptr_t)(jump->is_pcrelative() ? adj : 0);
+  } else if (NativeConditionalFarBranch::is_conditional_far_branch_at(inst_loc)) {
+    NativeConditionalFarBranch* branch = NativeConditionalFarBranch_at(inst_loc);
+    return branch->branch_destination();
+  } else {
+    // There are two instructions at the beginning of a stub, therefore we
+    // load at orig_addr + 8.
+    orig_addr = nativeCall_at(inst_loc)->get_trampoline();
+    if (orig_addr == NULL) {
+      return (address) -1;
+    } else {
+      return (address) nativeMovConstReg_at(orig_addr + 8)->data();
+    }
+  }
+}
+
+void Relocation::pd_set_call_destination(address x) {
+  address inst_loc = addr();
+
+  if (NativeFarCall::is_far_call_at(inst_loc)) {
+    NativeFarCall* call = nativeFarCall_at(inst_loc);
+    call->set_destination(x);
+  } else if (NativeJump::is_jump_at(inst_loc)) {
+    NativeJump* jump= nativeJump_at(inst_loc);
+    jump->set_jump_destination(x);
+  } else if (NativeConditionalFarBranch::is_conditional_far_branch_at(inst_loc)) {
+    NativeConditionalFarBranch* branch = NativeConditionalFarBranch_at(inst_loc);
+    branch->set_branch_destination(x);
+  } else {
+    NativeCall* call = nativeCall_at(inst_loc);
+    call->set_destination_mt_safe(x, false);
+  }
+}
+
+address* Relocation::pd_address_in_code() {
+  ShouldNotReachHere();
+  return 0;
+}
+
+address Relocation::pd_get_address_from_code() {
+  return (address)(nativeMovConstReg_at(addr())->data());
+}
+
+void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
+}
+
+void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
+}
+
+void metadata_Relocation::pd_fix_value(address x) {
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/relocInfo_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_RELOCINFO_PPC_HPP
+#define CPU_PPC_VM_RELOCINFO_PPC_HPP
+
+  // machine-dependent parts of class relocInfo
+ private:
+  enum {
+    // Since Power instructions are whole words,
+    // the two low-order offset bits can always be discarded.
+    offset_unit        =  4,
+
+    // There is no need for format bits; the instructions are
+    // sufficiently self-identifying.
+#ifndef _LP64
+    format_width       =  0
+#else
+    // Except narrow oops in 64-bits VM.
+    format_width       =  1
+#endif
+  };
+
+#endif // CPU_PPC_VM_RELOCINFO_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/runtime_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#ifdef COMPILER2
+#include "asm/assembler.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "code/vmreg.hpp"
+#include "interpreter/interpreter.hpp"
+#include "nativeInst_ppc.hpp"
+#include "opto/runtime.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "vmreg_ppc.inline.hpp"
+#endif
+
+#define __ masm->
+
+
+#ifdef COMPILER2
+
+// SP adjustment (must use unextended SP) for method handle call sites
+// during exception handling.
+static intptr_t adjust_SP_for_methodhandle_callsite(JavaThread *thread) {
+  RegisterMap map(thread, false);
+  // The frame constructor will do the correction for us (see frame::adjust_unextended_SP).
+  frame mh_caller_frame = thread->last_frame().sender(&map);
+  assert(mh_caller_frame.is_compiled_frame(), "Only may reach here for compiled MH call sites");
+  return (intptr_t) mh_caller_frame.unextended_sp();
+}
+
+//------------------------------generate_exception_blob---------------------------
+// Creates exception blob at the end.
+// Using exception blob, this code is jumped from a compiled method.
+//
+// Given an exception pc at a call we call into the runtime for the
+// handler in this method. This handler might merely restore state
+// (i.e. callee save registers) unwind the frame and jump to the
+// exception handler for the nmethod if there is no Java level handler
+// for the nmethod.
+//
+// This code is entered with a jmp.
+//
+// Arguments:
+//   R3_ARG1: exception oop
+//   R4_ARG2: exception pc
+//
+// Results:
+//   R3_ARG1: exception oop
+//   R4_ARG2: exception pc in caller
+//   destination: exception handler of caller
+//
+// Note: the exception pc MUST be at a call (precise debug information)
+//
+void OptoRuntime::generate_exception_blob() {
+  // Allocate space for the code.
+  ResourceMark rm;
+  // Setup code generation tools.
+  CodeBuffer buffer("exception_blob", 2048, 1024);
+  InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
+
+  address start = __ pc();
+
+  int frame_size_in_bytes = frame::abi_112_size;
+  OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
+
+  // Exception pc is 'return address' for stack walker.
+  __ std(R4_ARG2/*exception pc*/, _abi(lr), R1_SP);
+
+  // Store the exception in the Thread object.
+  __ std(R3_ARG1/*exception oop*/, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
+  __ std(R4_ARG2/*exception pc*/,  in_bytes(JavaThread::exception_pc_offset()),  R16_thread);
+
+  // Save callee-saved registers.
+  // Push a C frame for the exception blob. It is needed for the C call later on.
+  __ push_frame_abi112(0, R11_scratch1);
+
+  // This call does all the hard work. It checks if an exception handler
+  // exists in the method.
+  // If so, it returns the handler address.
+  // If not, it prepares for stack-unwinding, restoring the callee-save
+  // registers of the frame being removed.
+  __ set_last_Java_frame(/*sp=*/R1_SP, noreg);
+
+  __ mr(R3_ARG1, R16_thread);
+  __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, OptoRuntime::handle_exception_C),
+            relocInfo::none);
+  address calls_return_pc = __ last_calls_return_pc();
+# ifdef ASSERT
+  __ cmpdi(CCR0, R3_RET, 0);
+  __ asm_assert_ne("handle_exception_C must not return NULL", 0x601);
+# endif
+
+  // Set an oopmap for the call site. This oopmap will only be used if we
+  // are unwinding the stack. Hence, all locations will be dead.
+  // Callee-saved registers will be the same as the frame above (i.e.,
+  // handle_exception_stub), since they were restored when we got the
+  // exception.
+  OopMapSet* oop_maps = new OopMapSet();
+  oop_maps->add_gc_map(calls_return_pc - start, map);
+
+  // Get unextended_sp for method handle call sites.
+  Label mh_callsite, mh_done; // Use a 2nd c call if it's a method handle call site.
+  __ lwa(R4_ARG2, in_bytes(JavaThread::is_method_handle_return_offset()), R16_thread);
+  __ cmpwi(CCR0, R4_ARG2, 0);
+  __ bne(CCR0, mh_callsite);
+
+  __ mtctr(R3_RET); // Move address of exception handler to SR_CTR.
+  __ reset_last_Java_frame();
+  __ pop_frame();
+
+  __ bind(mh_done);
+  // We have a handler in register SR_CTR (could be deopt blob).
+
+  // Get the exception oop.
+  __ ld(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
+
+  // Get the exception pc in case we are deoptimized.
+  __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
+
+  // Reset thread values.
+  __ li(R0, 0);
+#ifdef ASSERT
+  __ std(R0, in_bytes(JavaThread::exception_handler_pc_offset()), R16_thread);
+  __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
+#endif
+  // Clear the exception oop so GC no longer processes it as a root.
+  __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
+
+  // Move exception pc into SR_LR.
+  __ mtlr(R4_ARG2);
+  __ bctr();
+
+
+  // Same as above, but also set sp to unextended_sp.
+  __ bind(mh_callsite);
+  __ mr(R31, R3_RET); // Save branch address.
+  __ mr(R3_ARG1, R16_thread);
+  __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, adjust_SP_for_methodhandle_callsite), relocInfo::none);
+  // Returns unextended_sp in R3_RET.
+
+  __ mtctr(R31); // Move address of exception handler to SR_CTR.
+  __ reset_last_Java_frame();
+
+  __ mr(R1_SP, R3_RET); // Set sp to unextended_sp.
+  __ b(mh_done);
+
+
+  // Make sure all code is generated.
+  masm->flush();
+
+  // Set exception blob.
+  _exception_blob = ExceptionBlob::create(&buffer, oop_maps,
+                                          frame_size_in_bytes/wordSize);
+}
+
+#endif // COMPILER2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,3218 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "code/debugInfoRec.hpp"
+#include "code/icBuffer.hpp"
+#include "code/vtableStubs.hpp"
+#include "interpreter/interpreter.hpp"
+#include "oops/compiledICHolder.hpp"
+#include "prims/jvmtiRedefineClassesTrace.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/vframeArray.hpp"
+#include "vmreg_ppc.inline.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/runtime.hpp"
+#endif
+
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) // nothing
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+
+// Used by generate_deopt_blob.  Defined in .ad file.
+extern uint size_deopt_handler();
+
+
+class RegisterSaver {
+ // Used for saving volatile registers.
+ public:
+
+  // Support different return pc locations.
+  enum ReturnPCLocation {
+    return_pc_is_lr,
+    return_pc_is_r4,
+    return_pc_is_thread_saved_exception_pc
+  };
+
+  static OopMap* push_frame_abi112_and_save_live_registers(MacroAssembler* masm,
+                         int* out_frame_size_in_bytes,
+                         bool generate_oop_map,
+                         int return_pc_adjustment,
+                         ReturnPCLocation return_pc_location);
+  static void    restore_live_registers_and_pop_frame(MacroAssembler* masm,
+                         int frame_size_in_bytes,
+                         bool restore_ctr);
+
+  static void push_frame_and_save_argument_registers(MacroAssembler* masm,
+                         Register r_temp,
+                         int frame_size,
+                         int total_args,
+                         const VMRegPair *regs, const VMRegPair *regs2 = NULL);
+  static void restore_argument_registers_and_pop_frame(MacroAssembler*masm,
+                         int frame_size,
+                         int total_args,
+                         const VMRegPair *regs, const VMRegPair *regs2 = NULL);
+
+  // During deoptimization only the result registers need to be restored
+  // all the other values have already been extracted.
+  static void restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes);
+
+  // Constants and data structures:
+
+  typedef enum {
+    int_reg           = 0,
+    float_reg         = 1,
+    special_reg       = 2
+  } RegisterType;
+
+  typedef enum {
+    reg_size          = 8,
+    half_reg_size     = reg_size / 2,
+  } RegisterConstants;
+
+  typedef struct {
+    RegisterType        reg_type;
+    int                 reg_num;
+    VMReg               vmreg;
+  } LiveRegType;
+};
+
+
+#define RegisterSaver_LiveSpecialReg(regname) \
+  { RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() }
+
+#define RegisterSaver_LiveIntReg(regname) \
+  { RegisterSaver::int_reg,     regname->encoding(), regname->as_VMReg() }
+
+#define RegisterSaver_LiveFloatReg(regname) \
+  { RegisterSaver::float_reg,   regname->encoding(), regname->as_VMReg() }
+
+static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
+  // Live registers which get spilled to the stack. Register
+  // positions in this array correspond directly to the stack layout.
+
+  //
+  // live special registers:
+  //
+  RegisterSaver_LiveSpecialReg(SR_CTR),
+  //
+  // live float registers:
+  //
+  RegisterSaver_LiveFloatReg( F0  ),
+  RegisterSaver_LiveFloatReg( F1  ),
+  RegisterSaver_LiveFloatReg( F2  ),
+  RegisterSaver_LiveFloatReg( F3  ),
+  RegisterSaver_LiveFloatReg( F4  ),
+  RegisterSaver_LiveFloatReg( F5  ),
+  RegisterSaver_LiveFloatReg( F6  ),
+  RegisterSaver_LiveFloatReg( F7  ),
+  RegisterSaver_LiveFloatReg( F8  ),
+  RegisterSaver_LiveFloatReg( F9  ),
+  RegisterSaver_LiveFloatReg( F10 ),
+  RegisterSaver_LiveFloatReg( F11 ),
+  RegisterSaver_LiveFloatReg( F12 ),
+  RegisterSaver_LiveFloatReg( F13 ),
+  RegisterSaver_LiveFloatReg( F14 ),
+  RegisterSaver_LiveFloatReg( F15 ),
+  RegisterSaver_LiveFloatReg( F16 ),
+  RegisterSaver_LiveFloatReg( F17 ),
+  RegisterSaver_LiveFloatReg( F18 ),
+  RegisterSaver_LiveFloatReg( F19 ),
+  RegisterSaver_LiveFloatReg( F20 ),
+  RegisterSaver_LiveFloatReg( F21 ),
+  RegisterSaver_LiveFloatReg( F22 ),
+  RegisterSaver_LiveFloatReg( F23 ),
+  RegisterSaver_LiveFloatReg( F24 ),
+  RegisterSaver_LiveFloatReg( F25 ),
+  RegisterSaver_LiveFloatReg( F26 ),
+  RegisterSaver_LiveFloatReg( F27 ),
+  RegisterSaver_LiveFloatReg( F28 ),
+  RegisterSaver_LiveFloatReg( F29 ),
+  RegisterSaver_LiveFloatReg( F30 ),
+  RegisterSaver_LiveFloatReg( F31 ),
+  //
+  // live integer registers:
+  //
+  RegisterSaver_LiveIntReg(   R0  ),
+  //RegisterSaver_LiveIntReg( R1  ), // stack pointer
+  RegisterSaver_LiveIntReg(   R2  ),
+  RegisterSaver_LiveIntReg(   R3  ),
+  RegisterSaver_LiveIntReg(   R4  ),
+  RegisterSaver_LiveIntReg(   R5  ),
+  RegisterSaver_LiveIntReg(   R6  ),
+  RegisterSaver_LiveIntReg(   R7  ),
+  RegisterSaver_LiveIntReg(   R8  ),
+  RegisterSaver_LiveIntReg(   R9  ),
+  RegisterSaver_LiveIntReg(   R10 ),
+  RegisterSaver_LiveIntReg(   R11 ),
+  RegisterSaver_LiveIntReg(   R12 ),
+  //RegisterSaver_LiveIntReg( R13 ), // system thread id
+  RegisterSaver_LiveIntReg(   R14 ),
+  RegisterSaver_LiveIntReg(   R15 ),
+  RegisterSaver_LiveIntReg(   R16 ),
+  RegisterSaver_LiveIntReg(   R17 ),
+  RegisterSaver_LiveIntReg(   R18 ),
+  RegisterSaver_LiveIntReg(   R19 ),
+  RegisterSaver_LiveIntReg(   R20 ),
+  RegisterSaver_LiveIntReg(   R21 ),
+  RegisterSaver_LiveIntReg(   R22 ),
+  RegisterSaver_LiveIntReg(   R23 ),
+  RegisterSaver_LiveIntReg(   R24 ),
+  RegisterSaver_LiveIntReg(   R25 ),
+  RegisterSaver_LiveIntReg(   R26 ),
+  RegisterSaver_LiveIntReg(   R27 ),
+  RegisterSaver_LiveIntReg(   R28 ),
+  RegisterSaver_LiveIntReg(   R29 ),
+  RegisterSaver_LiveIntReg(   R31 ),
+  RegisterSaver_LiveIntReg(   R30 ), // r30 must be the last register
+};
+
+OopMap* RegisterSaver::push_frame_abi112_and_save_live_registers(MacroAssembler* masm,
+                         int* out_frame_size_in_bytes,
+                         bool generate_oop_map,
+                         int return_pc_adjustment,
+                         ReturnPCLocation return_pc_location) {
+  // Push an abi112-frame and store all registers which may be live.
+  // If requested, create an OopMap: Record volatile registers as
+  // callee-save values in an OopMap so their save locations will be
+  // propagated to the RegisterMap of the caller frame during
+  // StackFrameStream construction (needed for deoptimization; see
+  // compiledVFrame::create_stack_value).
+  // If return_pc_adjustment != 0 adjust the return pc by return_pc_adjustment.
+
+  int i;
+  int offset;
+
+  // calcualte frame size
+  const int regstosave_num       = sizeof(RegisterSaver_LiveRegs) /
+                                   sizeof(RegisterSaver::LiveRegType);
+  const int register_save_size   = regstosave_num * reg_size;
+  const int frame_size_in_bytes  = round_to(register_save_size, frame::alignment_in_bytes)
+                                   + frame::abi_112_size;
+  *out_frame_size_in_bytes       = frame_size_in_bytes;
+  const int frame_size_in_slots  = frame_size_in_bytes / sizeof(jint);
+  const int register_save_offset = frame_size_in_bytes - register_save_size;
+
+  // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words.
+  OopMap* map = generate_oop_map ? new OopMap(frame_size_in_slots, 0) : NULL;
+
+  BLOCK_COMMENT("push_frame_abi112_and_save_live_registers {");
+
+  // Save r30 in the last slot of the not yet pushed frame so that we
+  // can use it as scratch reg.
+  __ std(R30, -reg_size, R1_SP);
+  assert(-reg_size == register_save_offset - frame_size_in_bytes + ((regstosave_num-1)*reg_size),
+         "consistency check");
+
+  // save the flags
+  // Do the save_LR_CR by hand and adjust the return pc if requested.
+  __ mfcr(R30);
+  __ std(R30, _abi(cr), R1_SP);
+  switch (return_pc_location) {
+    case return_pc_is_lr:    __ mflr(R30);           break;
+    case return_pc_is_r4:    __ mr(R30, R4);     break;
+    case return_pc_is_thread_saved_exception_pc:
+                                 __ ld(R30, thread_(saved_exception_pc)); break;
+    default: ShouldNotReachHere();
+  }
+  if (return_pc_adjustment != 0)
+    __ addi(R30, R30, return_pc_adjustment);
+  __ std(R30, _abi(lr), R1_SP);
+
+  // push a new frame
+  __ push_frame(frame_size_in_bytes, R30);
+
+  // save all registers (ints and floats)
+  offset = register_save_offset;
+  for (int i = 0; i < regstosave_num; i++) {
+    int reg_num  = RegisterSaver_LiveRegs[i].reg_num;
+    int reg_type = RegisterSaver_LiveRegs[i].reg_type;
+
+    switch (reg_type) {
+      case RegisterSaver::int_reg: {
+        if (reg_num != 30) { // We spilled R30 right at the beginning.
+          __ std(as_Register(reg_num), offset, R1_SP);
+        }
+        break;
+      }
+      case RegisterSaver::float_reg: {
+        __ stfd(as_FloatRegister(reg_num), offset, R1_SP);
+        break;
+      }
+      case RegisterSaver::special_reg: {
+        if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
+          __ mfctr(R30);
+          __ std(R30, offset, R1_SP);
+        } else {
+          Unimplemented();
+        }
+        break;
+      }
+      default:
+        ShouldNotReachHere();
+    }
+
+    if (generate_oop_map) {
+      map->set_callee_saved(VMRegImpl::stack2reg(offset>>2),
+                            RegisterSaver_LiveRegs[i].vmreg);
+      map->set_callee_saved(VMRegImpl::stack2reg((offset + half_reg_size)>>2),
+                            RegisterSaver_LiveRegs[i].vmreg->next());
+    }
+    offset += reg_size;
+  }
+
+  BLOCK_COMMENT("} push_frame_abi112_and_save_live_registers");
+
+  // And we're done.
+  return map;
+}
+
+
+// Pop the current frame and restore all the registers that we
+// saved.
+void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
+                                                         int frame_size_in_bytes,
+                                                         bool restore_ctr) {
+  int i;
+  int offset;
+  const int regstosave_num       = sizeof(RegisterSaver_LiveRegs) /
+                                   sizeof(RegisterSaver::LiveRegType);
+  const int register_save_size   = regstosave_num * reg_size;
+  const int register_save_offset = frame_size_in_bytes - register_save_size;
+
+  BLOCK_COMMENT("restore_live_registers_and_pop_frame {");
+
+  // restore all registers (ints and floats)
+  offset = register_save_offset;
+  for (int i = 0; i < regstosave_num; i++) {
+    int reg_num  = RegisterSaver_LiveRegs[i].reg_num;
+    int reg_type = RegisterSaver_LiveRegs[i].reg_type;
+
+    switch (reg_type) {
+      case RegisterSaver::int_reg: {
+        if (reg_num != 30) // R30 restored at the end, it's the tmp reg!
+          __ ld(as_Register(reg_num), offset, R1_SP);
+        break;
+      }
+      case RegisterSaver::float_reg: {
+        __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
+        break;
+      }
+      case RegisterSaver::special_reg: {
+        if (reg_num == SR_CTR_SpecialRegisterEnumValue) {
+          if (restore_ctr) { // Nothing to do here if ctr already contains the next address.
+            __ ld(R30, offset, R1_SP);
+            __ mtctr(R30);
+          }
+        } else {
+          Unimplemented();
+        }
+        break;
+      }
+      default:
+        ShouldNotReachHere();
+    }
+    offset += reg_size;
+  }
+
+  // pop the frame
+  __ pop_frame();
+
+  // restore the flags
+  __ restore_LR_CR(R30);
+
+  // restore scratch register's value
+  __ ld(R30, -reg_size, R1_SP);
+
+  BLOCK_COMMENT("} restore_live_registers_and_pop_frame");
+}
+
+void RegisterSaver::push_frame_and_save_argument_registers(MacroAssembler* masm, Register r_temp,
+                                                           int frame_size,int total_args, const VMRegPair *regs,
+                                                           const VMRegPair *regs2) {
+  __ push_frame(frame_size, r_temp);
+  int st_off = frame_size - wordSize;
+  for (int i = 0; i < total_args; i++) {
+    VMReg r_1 = regs[i].first();
+    VMReg r_2 = regs[i].second();
+    if (!r_1->is_valid()) {
+      assert(!r_2->is_valid(), "");
+      continue;
+    }
+    if (r_1->is_Register()) {
+      Register r = r_1->as_Register();
+      __ std(r, st_off, R1_SP);
+      st_off -= wordSize;
+    } else if (r_1->is_FloatRegister()) {
+      FloatRegister f = r_1->as_FloatRegister();
+      __ stfd(f, st_off, R1_SP);
+      st_off -= wordSize;
+    }
+  }
+  if (regs2 != NULL) {
+    for (int i = 0; i < total_args; i++) {
+      VMReg r_1 = regs2[i].first();
+      VMReg r_2 = regs2[i].second();
+      if (!r_1->is_valid()) {
+        assert(!r_2->is_valid(), "");
+        continue;
+      }
+      if (r_1->is_Register()) {
+        Register r = r_1->as_Register();
+        __ std(r, st_off, R1_SP);
+        st_off -= wordSize;
+      } else if (r_1->is_FloatRegister()) {
+        FloatRegister f = r_1->as_FloatRegister();
+        __ stfd(f, st_off, R1_SP);
+        st_off -= wordSize;
+      }
+    }
+  }
+}
+
+void RegisterSaver::restore_argument_registers_and_pop_frame(MacroAssembler*masm, int frame_size,
+                                                             int total_args, const VMRegPair *regs,
+                                                             const VMRegPair *regs2) {
+  int st_off = frame_size - wordSize;
+  for (int i = 0; i < total_args; i++) {
+    VMReg r_1 = regs[i].first();
+    VMReg r_2 = regs[i].second();
+    if (r_1->is_Register()) {
+      Register r = r_1->as_Register();
+      __ ld(r, st_off, R1_SP);
+      st_off -= wordSize;
+    } else if (r_1->is_FloatRegister()) {
+      FloatRegister f = r_1->as_FloatRegister();
+      __ lfd(f, st_off, R1_SP);
+      st_off -= wordSize;
+    }
+  }
+  if (regs2 != NULL)
+    for (int i = 0; i < total_args; i++) {
+      VMReg r_1 = regs2[i].first();
+      VMReg r_2 = regs2[i].second();
+      if (r_1->is_Register()) {
+        Register r = r_1->as_Register();
+        __ ld(r, st_off, R1_SP);
+        st_off -= wordSize;
+      } else if (r_1->is_FloatRegister()) {
+        FloatRegister f = r_1->as_FloatRegister();
+        __ lfd(f, st_off, R1_SP);
+        st_off -= wordSize;
+      }
+    }
+  __ pop_frame();
+}
+
+// Restore the registers that might be holding a result.
+void RegisterSaver::restore_result_registers(MacroAssembler* masm, int frame_size_in_bytes) {
+  int i;
+  int offset;
+  const int regstosave_num       = sizeof(RegisterSaver_LiveRegs) /
+                                   sizeof(RegisterSaver::LiveRegType);
+  const int register_save_size   = regstosave_num * reg_size;
+  const int register_save_offset = frame_size_in_bytes - register_save_size;
+
+  // restore all result registers (ints and floats)
+  offset = register_save_offset;
+  for (int i = 0; i < regstosave_num; i++) {
+    int reg_num  = RegisterSaver_LiveRegs[i].reg_num;
+    int reg_type = RegisterSaver_LiveRegs[i].reg_type;
+    switch (reg_type) {
+      case RegisterSaver::int_reg: {
+        if (as_Register(reg_num)==R3_RET) // int result_reg
+          __ ld(as_Register(reg_num), offset, R1_SP);
+        break;
+      }
+      case RegisterSaver::float_reg: {
+        if (as_FloatRegister(reg_num)==F1_RET) // float result_reg
+          __ lfd(as_FloatRegister(reg_num), offset, R1_SP);
+        break;
+      }
+      case RegisterSaver::special_reg: {
+        // Special registers don't hold a result.
+        break;
+      }
+      default:
+        ShouldNotReachHere();
+    }
+    offset += reg_size;
+  }
+}
+
+// Is vector's size (in bytes) bigger than a size saved by default?
+bool SharedRuntime::is_wide_vector(int size) {
+  ResourceMark rm;
+  // Note, MaxVectorSize == 8 on PPC64.
+  assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
+  return size > 8;
+}
+#ifdef COMPILER2
+static int reg2slot(VMReg r) {
+  return r->reg2stack() + SharedRuntime::out_preserve_stack_slots();
+}
+
+static int reg2offset(VMReg r) {
+  return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
+}
+#endif
+
+// ---------------------------------------------------------------------------
+// Read the array of BasicTypes from a signature, and compute where the
+// arguments should go. Values in the VMRegPair regs array refer to 4-byte
+// quantities. Values less than VMRegImpl::stack0 are registers, those above
+// refer to 4-byte stack slots. All stack slots are based off of the stack pointer
+// as framesizes are fixed.
+// VMRegImpl::stack0 refers to the first slot 0(sp).
+// and VMRegImpl::stack0+1 refers to the memory word 4-bytes higher. Register
+// up to RegisterImpl::number_of_registers) are the 64-bit
+// integer registers.
+
+// Note: the INPUTS in sig_bt are in units of Java argument words, which are
+// either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
+// units regardless of build. Of course for i486 there is no 64 bit build
+
+// The Java calling convention is a "shifted" version of the C ABI.
+// By skipping the first C ABI register we can call non-static jni methods
+// with small numbers of arguments without having to shuffle the arguments
+// at all. Since we control the java ABI we ought to at least get some
+// advantage out of it.
+
+const VMReg java_iarg_reg[8] = {
+  R3->as_VMReg(),
+  R4->as_VMReg(),
+  R5->as_VMReg(),
+  R6->as_VMReg(),
+  R7->as_VMReg(),
+  R8->as_VMReg(),
+  R9->as_VMReg(),
+  R10->as_VMReg()
+};
+
+const VMReg java_farg_reg[13] = {
+  F1->as_VMReg(),
+  F2->as_VMReg(),
+  F3->as_VMReg(),
+  F4->as_VMReg(),
+  F5->as_VMReg(),
+  F6->as_VMReg(),
+  F7->as_VMReg(),
+  F8->as_VMReg(),
+  F9->as_VMReg(),
+  F10->as_VMReg(),
+  F11->as_VMReg(),
+  F12->as_VMReg(),
+  F13->as_VMReg()
+};
+
+const int num_java_iarg_registers = sizeof(java_iarg_reg) / sizeof(java_iarg_reg[0]);
+const int num_java_farg_registers = sizeof(java_farg_reg) / sizeof(java_farg_reg[0]);
+
+int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
+                                           VMRegPair *regs,
+                                           int total_args_passed,
+                                           int is_outgoing) {
+  // C2c calling conventions for compiled-compiled calls.
+  // Put 8 ints/longs into registers _AND_ 13 float/doubles into
+  // registers _AND_ put the rest on the stack.
+
+  const int inc_stk_for_intfloat   = 1; // 1 slots for ints and floats
+  const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
+
+  int i;
+  VMReg reg;
+  int stk = 0;
+  int ireg = 0;
+  int freg = 0;
+
+  // We put the first 8 arguments into registers and the rest on the
+  // stack, float arguments are already in their argument registers
+  // due to c2c calling conventions (see calling_convention).
+  for (int i = 0; i < total_args_passed; ++i) {
+    switch(sig_bt[i]) {
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_BYTE:
+    case T_SHORT:
+    case T_INT:
+      if (ireg < num_java_iarg_registers) {
+        // Put int/ptr in register
+        reg = java_iarg_reg[ireg];
+        ++ireg;
+      } else {
+        // Put int/ptr on stack.
+        reg = VMRegImpl::stack2reg(stk);
+        stk += inc_stk_for_intfloat;
+      }
+      regs[i].set1(reg);
+      break;
+    case T_LONG:
+      assert(sig_bt[i+1] == T_VOID, "expecting half");
+      if (ireg < num_java_iarg_registers) {
+        // Put long in register.
+        reg = java_iarg_reg[ireg];
+        ++ireg;
+      } else {
+        // Put long on stack. They must be aligned to 2 slots.
+        if (stk & 0x1) ++stk;
+        reg = VMRegImpl::stack2reg(stk);
+        stk += inc_stk_for_longdouble;
+      }
+      regs[i].set2(reg);
+      break;
+    case T_OBJECT:
+    case T_ARRAY:
+    case T_ADDRESS:
+      if (ireg < num_java_iarg_registers) {
+        // Put ptr in register.
+        reg = java_iarg_reg[ireg];
+        ++ireg;
+      } else {
+        // Put ptr on stack. Objects must be aligned to 2 slots too,
+        // because "64-bit pointers record oop-ishness on 2 aligned
+        // adjacent registers." (see OopFlow::build_oop_map).
+        if (stk & 0x1) ++stk;
+        reg = VMRegImpl::stack2reg(stk);
+        stk += inc_stk_for_longdouble;
+      }
+      regs[i].set2(reg);
+      break;
+    case T_FLOAT:
+      if (freg < num_java_farg_registers) {
+        // Put float in register.
+        reg = java_farg_reg[freg];
+        ++freg;
+      } else {
+        // Put float on stack.
+        reg = VMRegImpl::stack2reg(stk);
+        stk += inc_stk_for_intfloat;
+      }
+      regs[i].set1(reg);
+      break;
+    case T_DOUBLE:
+      assert(sig_bt[i+1] == T_VOID, "expecting half");
+      if (freg < num_java_farg_registers) {
+        // Put double in register.
+        reg = java_farg_reg[freg];
+        ++freg;
+      } else {
+        // Put double on stack. They must be aligned to 2 slots.
+        if (stk & 0x1) ++stk;
+        reg = VMRegImpl::stack2reg(stk);
+        stk += inc_stk_for_longdouble;
+      }
+      regs[i].set2(reg);
+      break;
+    case T_VOID:
+      // Do not count halves.
+      regs[i].set_bad();
+      break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
+  return round_to(stk, 2);
+}
+
+#ifdef COMPILER2
+// Calling convention for calling C code.
+int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
+                                        VMRegPair *regs,
+                                        VMRegPair *regs2,
+                                        int total_args_passed) {
+  // Calling conventions for C runtime calls and calls to JNI native methods.
+  //
+  // PPC64 convention: Hoist the first 8 int/ptr/long's in the first 8
+  // int regs, leaving int regs undefined if the arg is flt/dbl. Hoist
+  // the first 13 flt/dbl's in the first 13 fp regs but additionally
+  // copy flt/dbl to the stack if they are beyond the 8th argument.
+
+  const VMReg iarg_reg[8] = {
+    R3->as_VMReg(),
+    R4->as_VMReg(),
+    R5->as_VMReg(),
+    R6->as_VMReg(),
+    R7->as_VMReg(),
+    R8->as_VMReg(),
+    R9->as_VMReg(),
+    R10->as_VMReg()
+  };
+
+  const VMReg farg_reg[13] = {
+    F1->as_VMReg(),
+    F2->as_VMReg(),
+    F3->as_VMReg(),
+    F4->as_VMReg(),
+    F5->as_VMReg(),
+    F6->as_VMReg(),
+    F7->as_VMReg(),
+    F8->as_VMReg(),
+    F9->as_VMReg(),
+    F10->as_VMReg(),
+    F11->as_VMReg(),
+    F12->as_VMReg(),
+    F13->as_VMReg()
+  };
+
+  // Check calling conventions consistency.
+  assert(sizeof(iarg_reg) / sizeof(iarg_reg[0]) == Argument::n_int_register_parameters_c &&
+         sizeof(farg_reg) / sizeof(farg_reg[0]) == Argument::n_float_register_parameters_c,
+         "consistency");
+
+  // `Stk' counts stack slots. Due to alignment, 32 bit values occupy
+  // 2 such slots, like 64 bit values do.
+  const int inc_stk_for_intfloat   = 2; // 2 slots for ints and floats
+  const int inc_stk_for_longdouble = 2; // 2 slots for longs and doubles
+
+  int i;
+  VMReg reg;
+  // Leave room for C-compatible ABI_112.
+  int stk = (frame::abi_112_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size;
+  int arg = 0;
+  int freg = 0;
+
+  // Avoid passing C arguments in the wrong stack slots.
+  assert((SharedRuntime::out_preserve_stack_slots() + stk) * VMRegImpl::stack_slot_size == 112,
+         "passing C arguments in wrong stack slots");
+
+  // We fill-out regs AND regs2 if an argument must be passed in a
+  // register AND in a stack slot. If regs2 is NULL in such a
+  // situation, we bail-out with a fatal error.
+  for (int i = 0; i < total_args_passed; ++i, ++arg) {
+    // Initialize regs2 to BAD.
+    if (regs2 != NULL) regs2[i].set_bad();
+
+    switch(sig_bt[i]) {
+
+    //
+    // If arguments 0-7 are integers, they are passed in integer registers.
+    // Argument i is placed in iarg_reg[i].
+    //
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_BYTE:
+    case T_SHORT:
+    case T_INT:
+      // We must cast ints to longs and use full 64 bit stack slots
+      // here. We do the cast in GraphKit::gen_stub() and just guard
+      // here against loosing that change.
+      assert(CCallingConventionRequiresIntsAsLongs,
+             "argument of type int should be promoted to type long");
+      guarantee(i > 0 && sig_bt[i-1] == T_LONG,
+                "argument of type (bt) should have been promoted to type (T_LONG,bt) for bt in "
+                "{T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
+      // Do not count halves.
+      regs[i].set_bad();
+      --arg;
+      break;
+    case T_LONG:
+      guarantee(sig_bt[i+1] == T_VOID    ||
+                sig_bt[i+1] == T_BOOLEAN || sig_bt[i+1] == T_CHAR  ||
+                sig_bt[i+1] == T_BYTE    || sig_bt[i+1] == T_SHORT ||
+                sig_bt[i+1] == T_INT,
+                "expecting type (T_LONG,half) or type (T_LONG,bt) with bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
+    case T_OBJECT:
+    case T_ARRAY:
+    case T_ADDRESS:
+    case T_METADATA:
+      // Oops are already boxed if required (JNI).
+      if (arg < Argument::n_int_register_parameters_c) {
+        reg = iarg_reg[arg];
+      } else {
+        reg = VMRegImpl::stack2reg(stk);
+        stk += inc_stk_for_longdouble;
+      }
+      regs[i].set2(reg);
+      break;
+
+    //
+    // Floats are treated differently from int regs:  The first 13 float arguments
+    // are passed in registers (not the float args among the first 13 args).
+    // Thus argument i is NOT passed in farg_reg[i] if it is float.  It is passed
+    // in farg_reg[j] if argument i is the j-th float argument of this call.
+    //
+    case T_FLOAT:
+      if (freg < Argument::n_float_register_parameters_c) {
+        // Put float in register ...
+        reg = farg_reg[freg];
+        ++freg;
+
+        // Argument i for i > 8 is placed on the stack even if it's
+        // placed in a register (if it's a float arg). Aix disassembly
+        // shows that xlC places these float args on the stack AND in
+        // a register. This is not documented, but we follow this
+        // convention, too.
+        if (arg >= Argument::n_regs_not_on_stack_c) {
+          // ... and on the stack.
+          guarantee(regs2 != NULL, "must pass float in register and stack slot");
+          VMReg reg2 = VMRegImpl::stack2reg(stk LINUX_ONLY(+1));
+          regs2[i].set1(reg2);
+          stk += inc_stk_for_intfloat;
+        }
+
+      } else {
+        // Put float on stack.
+        reg = VMRegImpl::stack2reg(stk LINUX_ONLY(+1));
+        stk += inc_stk_for_intfloat;
+      }
+      regs[i].set1(reg);
+      break;
+    case T_DOUBLE:
+      assert(sig_bt[i+1] == T_VOID, "expecting half");
+      if (freg < Argument::n_float_register_parameters_c) {
+        // Put double in register ...
+        reg = farg_reg[freg];
+        ++freg;
+
+        // Argument i for i > 8 is placed on the stack even if it's
+        // placed in a register (if it's a double arg). Aix disassembly
+        // shows that xlC places these float args on the stack AND in
+        // a register. This is not documented, but we follow this
+        // convention, too.
+        if (arg >= Argument::n_regs_not_on_stack_c) {
+          // ... and on the stack.
+          guarantee(regs2 != NULL, "must pass float in register and stack slot");
+          VMReg reg2 = VMRegImpl::stack2reg(stk);
+          regs2[i].set2(reg2);
+          stk += inc_stk_for_longdouble;
+        }
+      } else {
+        // Put double on stack.
+        reg = VMRegImpl::stack2reg(stk);
+        stk += inc_stk_for_longdouble;
+      }
+      regs[i].set2(reg);
+      break;
+
+    case T_VOID:
+      // Do not count halves.
+      regs[i].set_bad();
+      --arg;
+      break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
+
+  return round_to(stk, 2);
+}
+#endif // COMPILER2
+
+static address gen_c2i_adapter(MacroAssembler *masm,
+                            int total_args_passed,
+                            int comp_args_on_stack,
+                            const BasicType *sig_bt,
+                            const VMRegPair *regs,
+                            Label& call_interpreter,
+                            const Register& ientry) {
+
+  address c2i_entrypoint;
+
+  const Register sender_SP = R21_sender_SP; // == R21_tmp1
+  const Register code      = R22_tmp2;
+  //const Register ientry  = R23_tmp3;
+  const Register value_regs[] = { R24_tmp4, R25_tmp5, R26_tmp6 };
+  const int num_value_regs = sizeof(value_regs) / sizeof(Register);
+  int value_regs_index = 0;
+
+  const Register return_pc = R27_tmp7;
+  const Register tmp       = R28_tmp8;
+
+  assert_different_registers(sender_SP, code, ientry, return_pc, tmp);
+
+  // Adapter needs TOP_IJAVA_FRAME_ABI.
+  const int adapter_size = frame::top_ijava_frame_abi_size +
+                           round_to(total_args_passed * wordSize, frame::alignment_in_bytes);
+
+  // regular (verified) c2i entry point
+  c2i_entrypoint = __ pc();
+
+  // Does compiled code exists? If yes, patch the caller's callsite.
+  __ ld(code, method_(code));
+  __ cmpdi(CCR0, code, 0);
+  __ ld(ientry, method_(interpreter_entry)); // preloaded
+  __ beq(CCR0, call_interpreter);
+
+
+  // Patch caller's callsite, method_(code) was not NULL which means that
+  // compiled code exists.
+  __ mflr(return_pc);
+  __ std(return_pc, _abi(lr), R1_SP);
+  RegisterSaver::push_frame_and_save_argument_registers(masm, tmp, adapter_size, total_args_passed, regs);
+
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), R19_method, return_pc);
+
+  RegisterSaver::restore_argument_registers_and_pop_frame(masm, adapter_size, total_args_passed, regs);
+  __ ld(return_pc, _abi(lr), R1_SP);
+  __ ld(ientry, method_(interpreter_entry)); // preloaded
+  __ mtlr(return_pc);
+
+
+  // Call the interpreter.
+  __ BIND(call_interpreter);
+  __ mtctr(ientry);
+
+  // Get a copy of the current SP for loading caller's arguments.
+  __ mr(sender_SP, R1_SP);
+
+  // Add space for the adapter.
+  __ resize_frame(-adapter_size, R12_scratch2);
+
+  int st_off = adapter_size - wordSize;
+
+  // Write the args into the outgoing interpreter space.
+  for (int i = 0; i < total_args_passed; i++) {
+    VMReg r_1 = regs[i].first();
+    VMReg r_2 = regs[i].second();
+    if (!r_1->is_valid()) {
+      assert(!r_2->is_valid(), "");
+      continue;
+    }
+    if (r_1->is_stack()) {
+      Register tmp_reg = value_regs[value_regs_index];
+      value_regs_index = (value_regs_index + 1) % num_value_regs;
+      // The calling convention produces OptoRegs that ignore the out
+      // preserve area (JIT's ABI). We must account for it here.
+      int ld_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
+      if (!r_2->is_valid()) {
+        __ lwz(tmp_reg, ld_off, sender_SP);
+      } else {
+        __ ld(tmp_reg, ld_off, sender_SP);
+      }
+      // Pretend stack targets were loaded into tmp_reg.
+      r_1 = tmp_reg->as_VMReg();
+    }
+
+    if (r_1->is_Register()) {
+      Register r = r_1->as_Register();
+      if (!r_2->is_valid()) {
+        __ stw(r, st_off, R1_SP);
+        st_off-=wordSize;
+      } else {
+        // Longs are given 2 64-bit slots in the interpreter, but the
+        // data is passed in only 1 slot.
+        if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
+          DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
+          st_off-=wordSize;
+        }
+        __ std(r, st_off, R1_SP);
+        st_off-=wordSize;
+      }
+    } else {
+      assert(r_1->is_FloatRegister(), "");
+      FloatRegister f = r_1->as_FloatRegister();
+      if (!r_2->is_valid()) {
+        __ stfs(f, st_off, R1_SP);
+        st_off-=wordSize;
+      } else {
+        // In 64bit, doubles are given 2 64-bit slots in the interpreter, but the
+        // data is passed in only 1 slot.
+        // One of these should get known junk...
+        DEBUG_ONLY( __ li(tmp, 0); __ std(tmp, st_off, R1_SP); )
+        st_off-=wordSize;
+        __ stfd(f, st_off, R1_SP);
+        st_off-=wordSize;
+      }
+    }
+  }
+
+  // Jump to the interpreter just as if interpreter was doing it.
+
+#ifdef CC_INTERP
+  const Register tos = R17_tos;
+#endif
+
+  // load TOS
+  __ addi(tos, R1_SP, st_off);
+
+  // Frame_manager expects initial_caller_sp (= SP without resize by c2i) in R21_tmp1.
+  assert(sender_SP == R21_sender_SP, "passing initial caller's SP in wrong register");
+  __ bctr();
+
+  return c2i_entrypoint;
+}
+
+static void gen_i2c_adapter(MacroAssembler *masm,
+                            int total_args_passed,
+                            int comp_args_on_stack,
+                            const BasicType *sig_bt,
+                            const VMRegPair *regs) {
+
+  // Load method's entry-point from methodOop.
+  __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
+  __ mtctr(R12_scratch2);
+
+  // We will only enter here from an interpreted frame and never from after
+  // passing thru a c2i. Azul allowed this but we do not. If we lose the
+  // race and use a c2i we will remain interpreted for the race loser(s).
+  // This removes all sorts of headaches on the x86 side and also eliminates
+  // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
+
+  // Note: r13 contains the senderSP on entry. We must preserve it since
+  // we may do a i2c -> c2i transition if we lose a race where compiled
+  // code goes non-entrant while we get args ready.
+  // In addition we use r13 to locate all the interpreter args as
+  // we must align the stack to 16 bytes on an i2c entry else we
+  // lose alignment we expect in all compiled code and register
+  // save code can segv when fxsave instructions find improperly
+  // aligned stack pointer.
+
+#ifdef CC_INTERP
+  const Register ld_ptr = R17_tos;
+#endif
+  const Register value_regs[] = { R22_tmp2, R23_tmp3, R24_tmp4, R25_tmp5, R26_tmp6 };
+  const int num_value_regs = sizeof(value_regs) / sizeof(Register);
+  int value_regs_index = 0;
+
+  int ld_offset = total_args_passed*wordSize;
+
+  // Cut-out for having no stack args. Since up to 2 int/oop args are passed
+  // in registers, we will occasionally have no stack args.
+  int comp_words_on_stack = 0;
+  if (comp_args_on_stack) {
+    // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
+    // registers are below. By subtracting stack0, we either get a negative
+    // number (all values in registers) or the maximum stack slot accessed.
+
+    // Convert 4-byte c2 stack slots to words.
+    comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
+    // Round up to miminum stack alignment, in wordSize.
+    comp_words_on_stack = round_to(comp_words_on_stack, 2);
+    __ resize_frame(-comp_words_on_stack * wordSize, R11_scratch1);
+  }
+
+  // Now generate the shuffle code.  Pick up all register args and move the
+  // rest through register value=Z_R12.
+  BLOCK_COMMENT("Shuffle arguments");
+  for (int i = 0; i < total_args_passed; i++) {
+    if (sig_bt[i] == T_VOID) {
+      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
+      continue;
+    }
+
+    // Pick up 0, 1 or 2 words from ld_ptr.
+    assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
+            "scrambled load targets?");
+    VMReg r_1 = regs[i].first();
+    VMReg r_2 = regs[i].second();
+    if (!r_1->is_valid()) {
+      assert(!r_2->is_valid(), "");
+      continue;
+    }
+    if (r_1->is_FloatRegister()) {
+      if (!r_2->is_valid()) {
+        __ lfs(r_1->as_FloatRegister(), ld_offset, ld_ptr);
+        ld_offset-=wordSize;
+      } else {
+        // Skip the unused interpreter slot.
+        __ lfd(r_1->as_FloatRegister(), ld_offset-wordSize, ld_ptr);
+        ld_offset-=2*wordSize;
+      }
+    } else {
+      Register r;
+      if (r_1->is_stack()) {
+        // Must do a memory to memory move thru "value".
+        r = value_regs[value_regs_index];
+        value_regs_index = (value_regs_index + 1) % num_value_regs;
+      } else {
+        r = r_1->as_Register();
+      }
+      if (!r_2->is_valid()) {
+        // Not sure we need to do this but it shouldn't hurt.
+        if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ADDRESS || sig_bt[i] == T_ARRAY) {
+          __ ld(r, ld_offset, ld_ptr);
+          ld_offset-=wordSize;
+        } else {
+          __ lwz(r, ld_offset, ld_ptr);
+          ld_offset-=wordSize;
+        }
+      } else {
+        // In 64bit, longs are given 2 64-bit slots in the interpreter, but the
+        // data is passed in only 1 slot.
+        if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
+          ld_offset-=wordSize;
+        }
+        __ ld(r, ld_offset, ld_ptr);
+        ld_offset-=wordSize;
+      }
+
+      if (r_1->is_stack()) {
+        // Now store value where the compiler expects it
+        int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots())*VMRegImpl::stack_slot_size;
+
+        if (sig_bt[i] == T_INT   || sig_bt[i] == T_FLOAT ||sig_bt[i] == T_BOOLEAN ||
+            sig_bt[i] == T_SHORT || sig_bt[i] == T_CHAR  || sig_bt[i] == T_BYTE) {
+          __ stw(r, st_off, R1_SP);
+        } else {
+          __ std(r, st_off, R1_SP);
+        }
+      }
+    }
+  }
+
+  BLOCK_COMMENT("Store method oop");
+  // Store method oop into thread->callee_target.
+  // We might end up in handle_wrong_method if the callee is
+  // deoptimized as we race thru here. If that happens we don't want
+  // to take a safepoint because the caller frame will look
+  // interpreted and arguments are now "compiled" so it is much better
+  // to make this transition invisible to the stack walking
+  // code. Unfortunately if we try and find the callee by normal means
+  // a safepoint is possible. So we stash the desired callee in the
+  // thread and the vm will find there should this case occur.
+  __ std(R19_method, thread_(callee_target));
+
+  // Jump to the compiled code just as if compiled code was doing it.
+  __ bctr();
+}
+
+AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
+                                                            int total_args_passed,
+                                                            int comp_args_on_stack,
+                                                            const BasicType *sig_bt,
+                                                            const VMRegPair *regs,
+                                                            AdapterFingerPrint* fingerprint) {
+  address i2c_entry;
+  address c2i_unverified_entry;
+  address c2i_entry;
+
+
+  // entry: i2c
+
+  __ align(CodeEntryAlignment);
+  i2c_entry = __ pc();
+  gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
+
+
+  // entry: c2i unverified
+
+  __ align(CodeEntryAlignment);
+  BLOCK_COMMENT("c2i unverified entry");
+  c2i_unverified_entry = __ pc();
+
+  // inline_cache contains a compiledICHolder
+  const Register ic             = R19_method;
+  const Register ic_klass       = R11_scratch1;
+  const Register receiver_klass = R12_scratch2;
+  const Register code           = R21_tmp1;
+  const Register ientry         = R23_tmp3;
+
+  assert_different_registers(ic, ic_klass, receiver_klass, R3_ARG1, code, ientry);
+  assert(R11_scratch1 == R11, "need prologue scratch register");
+
+  Label call_interpreter;
+
+  assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
+         "klass offset should reach into any page");
+  // Check for NULL argument if we don't have implicit null checks.
+  if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
+    if (TrapBasedNullChecks) {
+      __ trap_null_check(R3_ARG1);
+    } else {
+      Label valid;
+      __ cmpdi(CCR0, R3_ARG1, 0);
+      __ bne_predict_taken(CCR0, valid);
+      // We have a null argument, branch to ic_miss_stub.
+      __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
+                       relocInfo::runtime_call_type);
+      __ BIND(valid);
+    }
+  }
+  // Assume argument is not NULL, load klass from receiver.
+  __ load_klass(receiver_klass, R3_ARG1);
+
+  __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic);
+
+  if (TrapBasedICMissChecks) {
+    __ trap_ic_miss_check(receiver_klass, ic_klass);
+  } else {
+    Label valid;
+    __ cmpd(CCR0, receiver_klass, ic_klass);
+    __ beq_predict_taken(CCR0, valid);
+    // We have an unexpected klass, branch to ic_miss_stub.
+    __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
+                     relocInfo::runtime_call_type);
+    __ BIND(valid);
+  }
+
+  // Argument is valid and klass is as expected, continue.
+
+  // Extract method from inline cache, verified entry point needs it.
+  __ ld(R19_method, CompiledICHolder::holder_method_offset(), ic);
+  assert(R19_method == ic, "the inline cache register is dead here");
+
+  __ ld(code, method_(code));
+  __ cmpdi(CCR0, code, 0);
+  __ ld(ientry, method_(interpreter_entry)); // preloaded
+  __ beq_predict_taken(CCR0, call_interpreter);
+
+  // Branch to ic_miss_stub.
+  __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
+
+  // entry: c2i
+
+  c2i_entry = gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, call_interpreter, ientry);
+
+  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
+}
+
+#ifdef COMPILER2
+// An oop arg. Must pass a handle not the oop itself.
+static void object_move(MacroAssembler* masm,
+                        int frame_size_in_slots,
+                        OopMap* oop_map, int oop_handle_offset,
+                        bool is_receiver, int* receiver_offset,
+                        VMRegPair src, VMRegPair dst,
+                        Register r_caller_sp, Register r_temp_1, Register r_temp_2) {
+  assert(!is_receiver || (is_receiver && (*receiver_offset == -1)),
+         "receiver has already been moved");
+
+  // We must pass a handle. First figure out the location we use as a handle.
+
+  if (src.first()->is_stack()) {
+    // stack to stack or reg
+
+    const Register r_handle = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
+    Label skip;
+    const int oop_slot_in_callers_frame = reg2slot(src.first());
+
+    guarantee(!is_receiver, "expecting receiver in register");
+    oop_map->set_oop(VMRegImpl::stack2reg(oop_slot_in_callers_frame + frame_size_in_slots));
+
+    __ addi(r_handle, r_caller_sp, reg2offset(src.first()));
+    __ ld(  r_temp_2, reg2offset(src.first()), r_caller_sp);
+    __ cmpdi(CCR0, r_temp_2, 0);
+    __ bne(CCR0, skip);
+    // Use a NULL handle if oop is NULL.
+    __ li(r_handle, 0);
+    __ bind(skip);
+
+    if (dst.first()->is_stack()) {
+      // stack to stack
+      __ std(r_handle, reg2offset(dst.first()), R1_SP);
+    } else {
+      // stack to reg
+      // Nothing to do, r_handle is already the dst register.
+    }
+  } else {
+    // reg to stack or reg
+    const Register r_oop      = src.first()->as_Register();
+    const Register r_handle   = dst.first()->is_stack() ? r_temp_1 : dst.first()->as_Register();
+    const int oop_slot        = (r_oop->encoding()-R3_ARG1->encoding()) * VMRegImpl::slots_per_word
+                                + oop_handle_offset; // in slots
+    const int oop_offset = oop_slot * VMRegImpl::stack_slot_size;
+    Label skip;
+
+    if (is_receiver) {
+      *receiver_offset = oop_offset;
+    }
+    oop_map->set_oop(VMRegImpl::stack2reg(oop_slot));
+
+    __ std( r_oop,    oop_offset, R1_SP);
+    __ addi(r_handle, R1_SP, oop_offset);
+
+    __ cmpdi(CCR0, r_oop, 0);
+    __ bne(CCR0, skip);
+    // Use a NULL handle if oop is NULL.
+    __ li(r_handle, 0);
+    __ bind(skip);
+
+    if (dst.first()->is_stack()) {
+      // reg to stack
+      __ std(r_handle, reg2offset(dst.first()), R1_SP);
+    } else {
+      // reg to reg
+      // Nothing to do, r_handle is already the dst register.
+    }
+  }
+}
+
+static void int_move(MacroAssembler*masm,
+                     VMRegPair src, VMRegPair dst,
+                     Register r_caller_sp, Register r_temp) {
+  assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long-int");
+  assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
+
+  if (src.first()->is_stack()) {
+    if (dst.first()->is_stack()) {
+      // stack to stack
+      __ lwa(r_temp, reg2offset(src.first()), r_caller_sp);
+      __ std(r_temp, reg2offset(dst.first()), R1_SP);
+    } else {
+      // stack to reg
+      __ lwa(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
+    }
+  } else if (dst.first()->is_stack()) {
+    // reg to stack
+    __ extsw(r_temp, src.first()->as_Register());
+    __ std(r_temp, reg2offset(dst.first()), R1_SP);
+  } else {
+    // reg to reg
+    __ extsw(dst.first()->as_Register(), src.first()->as_Register());
+  }
+}
+
+static void long_move(MacroAssembler*masm,
+                      VMRegPair src, VMRegPair dst,
+                      Register r_caller_sp, Register r_temp) {
+  assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be long");
+  assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be long");
+
+  if (src.first()->is_stack()) {
+    if (dst.first()->is_stack()) {
+      // stack to stack
+      __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
+      __ std(r_temp, reg2offset(dst.first()), R1_SP);
+    } else {
+      // stack to reg
+      __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
+    }
+  } else if (dst.first()->is_stack()) {
+    // reg to stack
+    __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
+  } else {
+    // reg to reg
+    if (dst.first()->as_Register() != src.first()->as_Register())
+      __ mr(dst.first()->as_Register(), src.first()->as_Register());
+  }
+}
+
+static void float_move(MacroAssembler*masm,
+                       VMRegPair src, VMRegPair dst,
+                       Register r_caller_sp, Register r_temp) {
+  assert(src.first()->is_valid() && !src.second()->is_valid(), "incoming must be float");
+  assert(dst.first()->is_valid() && !dst.second()->is_valid(), "outgoing must be float");
+
+  if (src.first()->is_stack()) {
+    if (dst.first()->is_stack()) {
+      // stack to stack
+      __ lwz(r_temp, reg2offset(src.first()), r_caller_sp);
+      __ stw(r_temp, reg2offset(dst.first()), R1_SP);
+    } else {
+      // stack to reg
+      __ lfs(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
+    }
+  } else if (dst.first()->is_stack()) {
+    // reg to stack
+    __ stfs(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
+  } else {
+    // reg to reg
+    if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
+      __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
+  }
+}
+
+static void double_move(MacroAssembler*masm,
+                        VMRegPair src, VMRegPair dst,
+                        Register r_caller_sp, Register r_temp) {
+  assert(src.first()->is_valid() && src.second() == src.first()->next(), "incoming must be double");
+  assert(dst.first()->is_valid() && dst.second() == dst.first()->next(), "outgoing must be double");
+
+  if (src.first()->is_stack()) {
+    if (dst.first()->is_stack()) {
+      // stack to stack
+      __ ld( r_temp, reg2offset(src.first()), r_caller_sp);
+      __ std(r_temp, reg2offset(dst.first()), R1_SP);
+    } else {
+      // stack to reg
+      __ lfd(dst.first()->as_FloatRegister(), reg2offset(src.first()), r_caller_sp);
+    }
+  } else if (dst.first()->is_stack()) {
+    // reg to stack
+    __ stfd(src.first()->as_FloatRegister(), reg2offset(dst.first()), R1_SP);
+  } else {
+    // reg to reg
+    if (dst.first()->as_FloatRegister() != src.first()->as_FloatRegister())
+      __ fmr(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
+  }
+}
+
+void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
+  switch (ret_type) {
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_BYTE:
+    case T_SHORT:
+    case T_INT:
+      __ stw (R3_RET,  frame_slots*VMRegImpl::stack_slot_size, R1_SP);
+      break;
+    case T_ARRAY:
+    case T_OBJECT:
+    case T_LONG:
+      __ std (R3_RET,  frame_slots*VMRegImpl::stack_slot_size, R1_SP);
+      break;
+    case T_FLOAT:
+      __ stfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
+      break;
+    case T_DOUBLE:
+      __ stfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
+      break;
+    case T_VOID:
+      break;
+    default:
+      ShouldNotReachHere();
+      break;
+  }
+}
+
+void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
+  switch (ret_type) {
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_BYTE:
+    case T_SHORT:
+    case T_INT:
+      __ lwz(R3_RET,  frame_slots*VMRegImpl::stack_slot_size, R1_SP);
+      break;
+    case T_ARRAY:
+    case T_OBJECT:
+    case T_LONG:
+      __ ld (R3_RET,  frame_slots*VMRegImpl::stack_slot_size, R1_SP);
+      break;
+    case T_FLOAT:
+      __ lfs(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
+      break;
+    case T_DOUBLE:
+      __ lfd(F1_RET, frame_slots*VMRegImpl::stack_slot_size, R1_SP);
+      break;
+    case T_VOID:
+      break;
+    default:
+      ShouldNotReachHere();
+      break;
+  }
+}
+
+static void save_or_restore_arguments(MacroAssembler* masm,
+                                      const int stack_slots,
+                                      const int total_in_args,
+                                      const int arg_save_area,
+                                      OopMap* map,
+                                      VMRegPair* in_regs,
+                                      BasicType* in_sig_bt) {
+  // If map is non-NULL then the code should store the values,
+  // otherwise it should load them.
+  int slot = arg_save_area;
+  // Save down double word first.
+  for (int i = 0; i < total_in_args; i++) {
+    if (in_regs[i].first()->is_FloatRegister() && in_sig_bt[i] == T_DOUBLE) {
+      int offset = slot * VMRegImpl::stack_slot_size;
+      slot += VMRegImpl::slots_per_word;
+      assert(slot <= stack_slots, "overflow (after DOUBLE stack slot)");
+      if (map != NULL) {
+        __ stfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
+      } else {
+        __ lfd(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
+      }
+    } else if (in_regs[i].first()->is_Register() &&
+        (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
+      int offset = slot * VMRegImpl::stack_slot_size;
+      if (map != NULL) {
+        __ std(in_regs[i].first()->as_Register(), offset, R1_SP);
+        if (in_sig_bt[i] == T_ARRAY) {
+          map->set_oop(VMRegImpl::stack2reg(slot));
+        }
+      } else {
+        __ ld(in_regs[i].first()->as_Register(), offset, R1_SP);
+      }
+      slot += VMRegImpl::slots_per_word;
+      assert(slot <= stack_slots, "overflow (after LONG/ARRAY stack slot)");
+    }
+  }
+  // Save or restore single word registers.
+  for (int i = 0; i < total_in_args; i++) {
+    // PPC64: pass ints as longs: must only deal with floats here.
+    if (in_regs[i].first()->is_FloatRegister()) {
+      if (in_sig_bt[i] == T_FLOAT) {
+        int offset = slot * VMRegImpl::stack_slot_size;
+        slot++;
+        assert(slot <= stack_slots, "overflow (after FLOAT stack slot)");
+        if (map != NULL) {
+          __ stfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
+        } else {
+          __ lfs(in_regs[i].first()->as_FloatRegister(), offset, R1_SP);
+        }
+      }
+    } else if (in_regs[i].first()->is_stack()) {
+      if (in_sig_bt[i] == T_ARRAY && map != NULL) {
+        int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
+        map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
+      }
+    }
+  }
+}
+
+// Check GC_locker::needs_gc and enter the runtime if it's true. This
+// keeps a new JNI critical region from starting until a GC has been
+// forced. Save down any oops in registers and describe them in an
+// OopMap.
+static void check_needs_gc_for_critical_native(MacroAssembler* masm,
+                                               const int stack_slots,
+                                               const int total_in_args,
+                                               const int arg_save_area,
+                                               OopMapSet* oop_maps,
+                                               VMRegPair* in_regs,
+                                               BasicType* in_sig_bt,
+                                               Register tmp_reg ) {
+  __ block_comment("check GC_locker::needs_gc");
+  Label cont;
+  __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GC_locker::needs_gc_address());
+  __ cmplwi(CCR0, tmp_reg, 0);
+  __ beq(CCR0, cont);
+
+  // Save down any values that are live in registers and call into the
+  // runtime to halt for a GC.
+  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+  save_or_restore_arguments(masm, stack_slots, total_in_args,
+                            arg_save_area, map, in_regs, in_sig_bt);
+
+  __ mr(R3_ARG1, R16_thread);
+  __ set_last_Java_frame(R1_SP, noreg);
+
+  __ block_comment("block_for_jni_critical");
+  address entry_point = CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical);
+  __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, entry_point), relocInfo::runtime_call_type);
+  address start           = __ pc() - __ offset(),
+          calls_return_pc = __ last_calls_return_pc();
+  oop_maps->add_gc_map(calls_return_pc - start, map);
+
+  __ reset_last_Java_frame();
+
+  // Reload all the register arguments.
+  save_or_restore_arguments(masm, stack_slots, total_in_args,
+                            arg_save_area, NULL, in_regs, in_sig_bt);
+
+  __ BIND(cont);
+
+#ifdef ASSERT
+  if (StressCriticalJNINatives) {
+    // Stress register saving.
+    OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+    save_or_restore_arguments(masm, stack_slots, total_in_args,
+                              arg_save_area, map, in_regs, in_sig_bt);
+    // Destroy argument registers.
+    for (int i = 0; i < total_in_args; i++) {
+      if (in_regs[i].first()->is_Register()) {
+        const Register reg = in_regs[i].first()->as_Register();
+        __ neg(reg, reg);
+      } else if (in_regs[i].first()->is_FloatRegister()) {
+        __ fneg(in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
+      }
+    }
+
+    save_or_restore_arguments(masm, stack_slots, total_in_args,
+                              arg_save_area, NULL, in_regs, in_sig_bt);
+  }
+#endif
+}
+
+static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst, Register r_caller_sp, Register r_temp) {
+  if (src.first()->is_stack()) {
+    if (dst.first()->is_stack()) {
+      // stack to stack
+      __ ld(r_temp, reg2offset(src.first()), r_caller_sp);
+      __ std(r_temp, reg2offset(dst.first()), R1_SP);
+    } else {
+      // stack to reg
+      __ ld(dst.first()->as_Register(), reg2offset(src.first()), r_caller_sp);
+    }
+  } else if (dst.first()->is_stack()) {
+    // reg to stack
+    __ std(src.first()->as_Register(), reg2offset(dst.first()), R1_SP);
+  } else {
+    if (dst.first() != src.first()) {
+      __ mr(dst.first()->as_Register(), src.first()->as_Register());
+    }
+  }
+}
+
+// Unpack an array argument into a pointer to the body and the length
+// if the array is non-null, otherwise pass 0 for both.
+static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type,
+                                  VMRegPair body_arg, VMRegPair length_arg, Register r_caller_sp,
+                                  Register tmp_reg, Register tmp2_reg) {
+  assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
+         "possible collision");
+  assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
+         "possible collision");
+
+  // Pass the length, ptr pair.
+  Label set_out_args;
+  VMRegPair tmp, tmp2;
+  tmp.set_ptr(tmp_reg->as_VMReg());
+  tmp2.set_ptr(tmp2_reg->as_VMReg());
+  if (reg.first()->is_stack()) {
+    // Load the arg up from the stack.
+    move_ptr(masm, reg, tmp, r_caller_sp, /*unused*/ R0);
+    reg = tmp;
+  }
+  __ li(tmp2_reg, 0); // Pass zeros if Array=null.
+  if (tmp_reg != reg.first()->as_Register()) __ li(tmp_reg, 0);
+  __ cmpdi(CCR0, reg.first()->as_Register(), 0);
+  __ beq(CCR0, set_out_args);
+  __ lwa(tmp2_reg, arrayOopDesc::length_offset_in_bytes(), reg.first()->as_Register());
+  __ addi(tmp_reg, reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type));
+  __ bind(set_out_args);
+  move_ptr(masm, tmp, body_arg, r_caller_sp, /*unused*/ R0);
+  move_ptr(masm, tmp2, length_arg, r_caller_sp, /*unused*/ R0); // Same as move32_64 on PPC64.
+}
+
+static void verify_oop_args(MacroAssembler* masm,
+                            methodHandle method,
+                            const BasicType* sig_bt,
+                            const VMRegPair* regs) {
+  Register temp_reg = R19_method;  // not part of any compiled calling seq
+  if (VerifyOops) {
+    for (int i = 0; i < method->size_of_parameters(); i++) {
+      if (sig_bt[i] == T_OBJECT ||
+          sig_bt[i] == T_ARRAY) {
+        VMReg r = regs[i].first();
+        assert(r->is_valid(), "bad oop arg");
+        if (r->is_stack()) {
+          __ ld(temp_reg, reg2offset(r), R1_SP);
+          __ verify_oop(temp_reg);
+        } else {
+          __ verify_oop(r->as_Register());
+        }
+      }
+    }
+  }
+}
+
+static void gen_special_dispatch(MacroAssembler* masm,
+                                 methodHandle method,
+                                 const BasicType* sig_bt,
+                                 const VMRegPair* regs) {
+  verify_oop_args(masm, method, sig_bt, regs);
+  vmIntrinsics::ID iid = method->intrinsic_id();
+
+  // Now write the args into the outgoing interpreter space
+  bool     has_receiver   = false;
+  Register receiver_reg   = noreg;
+  int      member_arg_pos = -1;
+  Register member_reg     = noreg;
+  int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
+  if (ref_kind != 0) {
+    member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
+    member_reg = R19_method;  // known to be free at this point
+    has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
+  } else if (iid == vmIntrinsics::_invokeBasic) {
+    has_receiver = true;
+  } else {
+    fatal(err_msg_res("unexpected intrinsic id %d", iid));
+  }
+
+  if (member_reg != noreg) {
+    // Load the member_arg into register, if necessary.
+    SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
+    VMReg r = regs[member_arg_pos].first();
+    if (r->is_stack()) {
+      __ ld(member_reg, reg2offset(r), R1_SP);
+    } else {
+      // no data motion is needed
+      member_reg = r->as_Register();
+    }
+  }
+
+  if (has_receiver) {
+    // Make sure the receiver is loaded into a register.
+    assert(method->size_of_parameters() > 0, "oob");
+    assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
+    VMReg r = regs[0].first();
+    assert(r->is_valid(), "bad receiver arg");
+    if (r->is_stack()) {
+      // Porting note:  This assumes that compiled calling conventions always
+      // pass the receiver oop in a register.  If this is not true on some
+      // platform, pick a temp and load the receiver from stack.
+      fatal("receiver always in a register");
+      receiver_reg = R11_scratch1;  // TODO (hs24): is R11_scratch1 really free at this point?
+      __ ld(receiver_reg, reg2offset(r), R1_SP);
+    } else {
+      // no data motion is needed
+      receiver_reg = r->as_Register();
+    }
+  }
+
+  // Figure out which address we are really jumping to:
+  MethodHandles::generate_method_handle_dispatch(masm, iid,
+                                                 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
+}
+
+#endif // COMPILER2
+
+// ---------------------------------------------------------------------------
+// Generate a native wrapper for a given method. The method takes arguments
+// in the Java compiled code convention, marshals them to the native
+// convention (handlizes oops, etc), transitions to native, makes the call,
+// returns to java state (possibly blocking), unhandlizes any result and
+// returns.
+//
+// Critical native functions are a shorthand for the use of
+// GetPrimtiveArrayCritical and disallow the use of any other JNI
+// functions.  The wrapper is expected to unpack the arguments before
+// passing them to the callee and perform checks before and after the
+// native call to ensure that they GC_locker
+// lock_critical/unlock_critical semantics are followed.  Some other
+// parts of JNI setup are skipped like the tear down of the JNI handle
+// block and the check for pending exceptions it's impossible for them
+// to be thrown.
+//
+// They are roughly structured like this:
+//   if (GC_locker::needs_gc())
+//     SharedRuntime::block_for_jni_critical();
+//   tranistion to thread_in_native
+//   unpack arrray arguments and call native entry point
+//   check for safepoint in progress
+//   check if any thread suspend flags are set
+//     call into JVM and possible unlock the JNI critical
+//     if a GC was suppressed while in the critical native.
+//   transition back to thread_in_Java
+//   return to caller
+//
+nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
+                                                methodHandle method,
+                                                int compile_id,
+                                                BasicType *in_sig_bt,
+                                                VMRegPair *in_regs,
+                                                BasicType ret_type) {
+#ifdef COMPILER2
+  if (method->is_method_handle_intrinsic()) {
+    vmIntrinsics::ID iid = method->intrinsic_id();
+    intptr_t start = (intptr_t)__ pc();
+    int vep_offset = ((intptr_t)__ pc()) - start;
+    gen_special_dispatch(masm,
+                         method,
+                         in_sig_bt,
+                         in_regs);
+    int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
+    __ flush();
+    int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
+    return nmethod::new_native_nmethod(method,
+                                       compile_id,
+                                       masm->code(),
+                                       vep_offset,
+                                       frame_complete,
+                                       stack_slots / VMRegImpl::slots_per_word,
+                                       in_ByteSize(-1),
+                                       in_ByteSize(-1),
+                                       (OopMapSet*)NULL);
+  }
+
+  bool is_critical_native = true;
+  address native_func = method->critical_native_function();
+  if (native_func == NULL) {
+    native_func = method->native_function();
+    is_critical_native = false;
+  }
+  assert(native_func != NULL, "must have function");
+
+  // First, create signature for outgoing C call
+  // --------------------------------------------------------------------------
+
+  int total_in_args = method->size_of_parameters();
+  // We have received a description of where all the java args are located
+  // on entry to the wrapper. We need to convert these args to where
+  // the jni function will expect them. To figure out where they go
+  // we convert the java signature to a C signature by inserting
+  // the hidden arguments as arg[0] and possibly arg[1] (static method)
+  //
+  // Additionally, on ppc64 we must convert integers to longs in the C
+  // signature. We do this in advance in order to have no trouble with
+  // indexes into the bt-arrays.
+  // So convert the signature and registers now, and adjust the total number
+  // of in-arguments accordingly.
+  int i2l_argcnt = convert_ints_to_longints_argcnt(total_in_args, in_sig_bt); // PPC64: pass ints as longs.
+
+  // Calculate the total number of C arguments and create arrays for the
+  // signature and the outgoing registers.
+  // On ppc64, we have two arrays for the outgoing registers, because
+  // some floating-point arguments must be passed in registers _and_
+  // in stack locations.
+  bool method_is_static = method->is_static();
+  int  total_c_args     = i2l_argcnt;
+
+  if (!is_critical_native) {
+    int n_hidden_args = method_is_static ? 2 : 1;
+    total_c_args += n_hidden_args;
+  } else {
+    // No JNIEnv*, no this*, but unpacked arrays (base+length).
+    for (int i = 0; i < total_in_args; i++) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        total_c_args += 2; // PPC64: T_LONG, T_INT, T_ADDRESS (see convert_ints_to_longints and c_calling_convention)
+      }
+    }
+  }
+
+  BasicType *out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
+  VMRegPair *out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+  VMRegPair *out_regs2  = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
+  BasicType* in_elem_bt = NULL;
+
+  // Create the signature for the C call:
+  //   1) add the JNIEnv*
+  //   2) add the class if the method is static
+  //   3) copy the rest of the incoming signature (shifted by the number of
+  //      hidden arguments).
+
+  int argc = 0;
+  if (!is_critical_native) {
+    convert_ints_to_longints(i2l_argcnt, total_in_args, in_sig_bt, in_regs); // PPC64: pass ints as longs.
+
+    out_sig_bt[argc++] = T_ADDRESS;
+    if (method->is_static()) {
+      out_sig_bt[argc++] = T_OBJECT;
+    }
+
+    for (int i = 0; i < total_in_args ; i++ ) {
+      out_sig_bt[argc++] = in_sig_bt[i];
+    }
+  } else {
+    Thread* THREAD = Thread::current();
+    in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, i2l_argcnt);
+    SignatureStream ss(method->signature());
+    int o = 0;
+    for (int i = 0; i < total_in_args ; i++, o++) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        // Arrays are passed as int, elem* pair
+        Symbol* atype = ss.as_symbol(CHECK_NULL);
+        const char* at = atype->as_C_string();
+        if (strlen(at) == 2) {
+          assert(at[0] == '[', "must be");
+          switch (at[1]) {
+            case 'B': in_elem_bt[o] = T_BYTE; break;
+            case 'C': in_elem_bt[o] = T_CHAR; break;
+            case 'D': in_elem_bt[o] = T_DOUBLE; break;
+            case 'F': in_elem_bt[o] = T_FLOAT; break;
+            case 'I': in_elem_bt[o] = T_INT; break;
+            case 'J': in_elem_bt[o] = T_LONG; break;
+            case 'S': in_elem_bt[o] = T_SHORT; break;
+            case 'Z': in_elem_bt[o] = T_BOOLEAN; break;
+            default: ShouldNotReachHere();
+          }
+        }
+      } else {
+        in_elem_bt[o] = T_VOID;
+        switch(in_sig_bt[i]) { // PPC64: pass ints as longs.
+          case T_BOOLEAN:
+          case T_CHAR:
+          case T_BYTE:
+          case T_SHORT:
+          case T_INT: in_elem_bt[++o] = T_VOID; break;
+          default: break;
+        }
+      }
+      if (in_sig_bt[i] != T_VOID) {
+        assert(in_sig_bt[i] == ss.type(), "must match");
+        ss.next();
+      }
+    }
+    assert(i2l_argcnt==o, "must match");
+
+    convert_ints_to_longints(i2l_argcnt, total_in_args, in_sig_bt, in_regs); // PPC64: pass ints as longs.
+
+    for (int i = 0; i < total_in_args ; i++ ) {
+      if (in_sig_bt[i] == T_ARRAY) {
+        // Arrays are passed as int, elem* pair.
+        out_sig_bt[argc++] = T_LONG; // PPC64: pass ints as longs.
+        out_sig_bt[argc++] = T_INT;
+        out_sig_bt[argc++] = T_ADDRESS;
+      } else {
+        out_sig_bt[argc++] = in_sig_bt[i];
+      }
+    }
+  }
+
+
+  // Compute the wrapper's frame size.
+  // --------------------------------------------------------------------------
+
+  // Now figure out where the args must be stored and how much stack space
+  // they require.
+  //
+  // Compute framesize for the wrapper. We need to handlize all oops in
+  // incoming registers.
+  //
+  // Calculate the total number of stack slots we will need:
+  //   1) abi requirements
+  //   2) outgoing arguments
+  //   3) space for inbound oop handle area
+  //   4) space for handlizing a klass if static method
+  //   5) space for a lock if synchronized method
+  //   6) workspace for saving return values, int <-> float reg moves, etc.
+  //   7) alignment
+  //
+  // Layout of the native wrapper frame:
+  // (stack grows upwards, memory grows downwards)
+  //
+  // NW     [ABI_112]                  <-- 1) R1_SP
+  //        [outgoing arguments]       <-- 2) R1_SP + out_arg_slot_offset
+  //        [oopHandle area]           <-- 3) R1_SP + oop_handle_offset (save area for critical natives)
+  //        klass                      <-- 4) R1_SP + klass_offset
+  //        lock                       <-- 5) R1_SP + lock_offset
+  //        [workspace]                <-- 6) R1_SP + workspace_offset
+  //        [alignment] (optional)     <-- 7)
+  // caller [JIT_TOP_ABI_48]           <-- r_callers_sp
+  //
+  // - *_slot_offset Indicates offset from SP in number of stack slots.
+  // - *_offset      Indicates offset from SP in bytes.
+
+  int stack_slots = c_calling_convention(out_sig_bt, out_regs, out_regs2, total_c_args) // 1+2)
+                  + SharedRuntime::out_preserve_stack_slots(); // See c_calling_convention.
+
+  // Now the space for the inbound oop handle area.
+  int total_save_slots = num_java_iarg_registers * VMRegImpl::slots_per_word;
+  if (is_critical_native) {
+    // Critical natives may have to call out so they need a save area
+    // for register arguments.
+    int double_slots = 0;
+    int single_slots = 0;
+    for (int i = 0; i < total_in_args; i++) {
+      if (in_regs[i].first()->is_Register()) {
+        const Register reg = in_regs[i].first()->as_Register();
+        switch (in_sig_bt[i]) {
+          case T_BOOLEAN:
+          case T_BYTE:
+          case T_SHORT:
+          case T_CHAR:
+          case T_INT:  /*single_slots++;*/ break; // PPC64: pass ints as longs.
+          case T_ARRAY:
+          case T_LONG: double_slots++; break;
+          default:  ShouldNotReachHere();
+        }
+      } else if (in_regs[i].first()->is_FloatRegister()) {
+        switch (in_sig_bt[i]) {
+          case T_FLOAT:  single_slots++; break;
+          case T_DOUBLE: double_slots++; break;
+          default:  ShouldNotReachHere();
+        }
+      }
+    }
+    total_save_slots = double_slots * 2 + round_to(single_slots, 2); // round to even
+  }
+
+  int oop_handle_slot_offset = stack_slots;
+  stack_slots += total_save_slots;                                                // 3)
+
+  int klass_slot_offset = 0;
+  int klass_offset      = -1;
+  if (method_is_static && !is_critical_native) {                                  // 4)
+    klass_slot_offset  = stack_slots;
+    klass_offset       = klass_slot_offset * VMRegImpl::stack_slot_size;
+    stack_slots       += VMRegImpl::slots_per_word;
+  }
+
+  int lock_slot_offset = 0;
+  int lock_offset      = -1;
+  if (method->is_synchronized()) {                                                // 5)
+    lock_slot_offset   = stack_slots;
+    lock_offset        = lock_slot_offset * VMRegImpl::stack_slot_size;
+    stack_slots       += VMRegImpl::slots_per_word;
+  }
+
+  int workspace_slot_offset = stack_slots;                                        // 6)
+  stack_slots         += 2;
+
+  // Now compute actual number of stack words we need.
+  // Rounding to make stack properly aligned.
+  stack_slots = round_to(stack_slots,                                             // 7)
+                         frame::alignment_in_bytes / VMRegImpl::stack_slot_size);
+  int frame_size_in_bytes = stack_slots * VMRegImpl::stack_slot_size;
+
+
+  // Now we can start generating code.
+  // --------------------------------------------------------------------------
+
+  intptr_t start_pc = (intptr_t)__ pc();
+  intptr_t vep_start_pc;
+  intptr_t frame_done_pc;
+  intptr_t oopmap_pc;
+
+  Label    ic_miss;
+  Label    handle_pending_exception;
+
+  Register r_callers_sp = R21;
+  Register r_temp_1     = R22;
+  Register r_temp_2     = R23;
+  Register r_temp_3     = R24;
+  Register r_temp_4     = R25;
+  Register r_temp_5     = R26;
+  Register r_temp_6     = R27;
+  Register r_return_pc  = R28;
+
+  Register r_carg1_jnienv        = noreg;
+  Register r_carg2_classorobject = noreg;
+  if (!is_critical_native) {
+    r_carg1_jnienv        = out_regs[0].first()->as_Register();
+    r_carg2_classorobject = out_regs[1].first()->as_Register();
+  }
+
+
+  // Generate the Unverified Entry Point (UEP).
+  // --------------------------------------------------------------------------
+  assert(start_pc == (intptr_t)__ pc(), "uep must be at start");
+
+  // Check ic: object class == cached class?
+  if (!method_is_static) {
+  Register ic = as_Register(Matcher::inline_cache_reg_encode());
+  Register receiver_klass = r_temp_1;
+
+  __ cmpdi(CCR0, R3_ARG1, 0);
+  __ beq(CCR0, ic_miss);
+  __ verify_oop(R3_ARG1);
+  __ load_klass(receiver_klass, R3_ARG1);
+
+  __ cmpd(CCR0, receiver_klass, ic);
+  __ bne(CCR0, ic_miss);
+  }
+
+
+  // Generate the Verified Entry Point (VEP).
+  // --------------------------------------------------------------------------
+  vep_start_pc = (intptr_t)__ pc();
+
+  __ save_LR_CR(r_temp_1);
+  __ generate_stack_overflow_check(frame_size_in_bytes); // Check before creating frame.
+  __ mr(r_callers_sp, R1_SP);                       // Remember frame pointer.
+  __ push_frame(frame_size_in_bytes, r_temp_1);          // Push the c2n adapter's frame.
+  frame_done_pc = (intptr_t)__ pc();
+
+  // Native nmethod wrappers never take possesion of the oop arguments.
+  // So the caller will gc the arguments.
+  // The only thing we need an oopMap for is if the call is static.
+  //
+  // An OopMap for lock (and class if static), and one for the VM call itself.
+  OopMapSet *oop_maps = new OopMapSet();
+  OopMap    *oop_map  = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+
+  if (is_critical_native) {
+    check_needs_gc_for_critical_native(masm, stack_slots, total_in_args, oop_handle_slot_offset, oop_maps, in_regs, in_sig_bt, r_temp_1);
+  }
+
+  // Move arguments from register/stack to register/stack.
+  // --------------------------------------------------------------------------
+  //
+  // We immediately shuffle the arguments so that for any vm call we have
+  // to make from here on out (sync slow path, jvmti, etc.) we will have
+  // captured the oops from our caller and have a valid oopMap for them.
+  //
+  // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
+  // (derived from JavaThread* which is in R16_thread) and, if static,
+  // the class mirror instead of a receiver. This pretty much guarantees that
+  // register layout will not match. We ignore these extra arguments during
+  // the shuffle. The shuffle is described by the two calling convention
+  // vectors we have in our possession. We simply walk the java vector to
+  // get the source locations and the c vector to get the destinations.
+
+  // Record sp-based slot for receiver on stack for non-static methods.
+  int receiver_offset = -1;
+
+  // We move the arguments backward because the floating point registers
+  // destination will always be to a register with a greater or equal
+  // register number or the stack.
+  //   in  is the index of the incoming Java arguments
+  //   out is the index of the outgoing C arguments
+
+#ifdef ASSERT
+  bool reg_destroyed[RegisterImpl::number_of_registers];
+  bool freg_destroyed[FloatRegisterImpl::number_of_registers];
+  for (int r = 0 ; r < RegisterImpl::number_of_registers ; r++) {
+    reg_destroyed[r] = false;
+  }
+  for (int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++) {
+    freg_destroyed[f] = false;
+  }
+#endif // ASSERT
+
+  for (int in = total_in_args - 1, out = total_c_args - 1; in >= 0 ; in--, out--) {
+
+#ifdef ASSERT
+    if (in_regs[in].first()->is_Register()) {
+      assert(!reg_destroyed[in_regs[in].first()->as_Register()->encoding()], "ack!");
+    } else if (in_regs[in].first()->is_FloatRegister()) {
+      assert(!freg_destroyed[in_regs[in].first()->as_FloatRegister()->encoding()], "ack!");
+    }
+    if (out_regs[out].first()->is_Register()) {
+      reg_destroyed[out_regs[out].first()->as_Register()->encoding()] = true;
+    } else if (out_regs[out].first()->is_FloatRegister()) {
+      freg_destroyed[out_regs[out].first()->as_FloatRegister()->encoding()] = true;
+    }
+    if (out_regs2[out].first()->is_Register()) {
+      reg_destroyed[out_regs2[out].first()->as_Register()->encoding()] = true;
+    } else if (out_regs2[out].first()->is_FloatRegister()) {
+      freg_destroyed[out_regs2[out].first()->as_FloatRegister()->encoding()] = true;
+    }
+#endif // ASSERT
+
+    switch (in_sig_bt[in]) {
+      case T_BOOLEAN:
+      case T_CHAR:
+      case T_BYTE:
+      case T_SHORT:
+      case T_INT:
+        guarantee(in > 0 && in_sig_bt[in-1] == T_LONG,
+                  "expecting type (T_LONG,bt) for bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
+        break;
+      case T_LONG:
+        if (in_sig_bt[in+1] == T_VOID) {
+          long_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
+        } else {
+          guarantee(in_sig_bt[in+1] == T_BOOLEAN || in_sig_bt[in+1] == T_CHAR  ||
+                    in_sig_bt[in+1] == T_BYTE    || in_sig_bt[in+1] == T_SHORT ||
+                    in_sig_bt[in+1] == T_INT,
+                 "expecting type (T_LONG,bt) for bt in {T_BOOLEAN, T_CHAR, T_BYTE, T_SHORT, T_INT}");
+          int_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
+        }
+        break;
+      case T_ARRAY:
+        if (is_critical_native) {
+          int body_arg = out;
+          out -= 2; // Point to length arg. PPC64: pass ints as longs.
+          unpack_array_argument(masm, in_regs[in], in_elem_bt[in], out_regs[body_arg], out_regs[out],
+                                r_callers_sp, r_temp_1, r_temp_2);
+          break;
+        }
+      case T_OBJECT:
+        assert(!is_critical_native, "no oop arguments");
+        object_move(masm, stack_slots,
+                    oop_map, oop_handle_slot_offset,
+                    ((in == 0) && (!method_is_static)), &receiver_offset,
+                    in_regs[in], out_regs[out],
+                    r_callers_sp, r_temp_1, r_temp_2);
+        break;
+      case T_VOID:
+        break;
+      case T_FLOAT:
+        float_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
+        if (out_regs2[out].first()->is_valid()) {
+          float_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1);
+        }
+        break;
+      case T_DOUBLE:
+        double_move(masm, in_regs[in], out_regs[out], r_callers_sp, r_temp_1);
+        if (out_regs2[out].first()->is_valid()) {
+          double_move(masm, in_regs[in], out_regs2[out], r_callers_sp, r_temp_1);
+        }
+        break;
+      case T_ADDRESS:
+        fatal("found type (T_ADDRESS) in java args");
+        break;
+      default:
+        ShouldNotReachHere();
+        break;
+    }
+  }
+
+  // Pre-load a static method's oop into ARG2.
+  // Used both by locking code and the normal JNI call code.
+  if (method_is_static && !is_critical_native) {
+    __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()),
+                        r_carg2_classorobject);
+
+    // Now handlize the static class mirror in carg2. It's known not-null.
+    __ std(r_carg2_classorobject, klass_offset, R1_SP);
+    oop_map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
+    __ addi(r_carg2_classorobject, R1_SP, klass_offset);
+  }
+
+  // Get JNIEnv* which is first argument to native.
+  if (!is_critical_native) {
+    __ addi(r_carg1_jnienv, R16_thread, in_bytes(JavaThread::jni_environment_offset()));
+  }
+
+  // NOTE:
+  //
+  // We have all of the arguments setup at this point.
+  // We MUST NOT touch any outgoing regs from this point on.
+  // So if we must call out we must push a new frame.
+
+  // Get current pc for oopmap, and load it patchable relative to global toc.
+  oopmap_pc = (intptr_t) __ pc();
+  __ calculate_address_from_global_toc(r_return_pc, (address)oopmap_pc, true, true, true, true);
+
+  // We use the same pc/oopMap repeatedly when we call out.
+  oop_maps->add_gc_map(oopmap_pc - start_pc, oop_map);
+
+  // r_return_pc now has the pc loaded that we will use when we finally call
+  // to native.
+
+  // Make sure that thread is non-volatile; it crosses a bunch of VM calls below.
+  assert(R16_thread->is_nonvolatile(), "thread must be in non-volatile register");
+
+
+# if 0
+  // DTrace method entry
+# endif
+
+  // Lock a synchronized method.
+  // --------------------------------------------------------------------------
+
+  if (method->is_synchronized()) {
+    assert(!is_critical_native, "unhandled");
+    ConditionRegister r_flag = CCR1;
+    Register          r_oop  = r_temp_4;
+    const Register    r_box  = r_temp_5;
+    Label             done, locked;
+
+    // Load the oop for the object or class. r_carg2_classorobject contains
+    // either the handlized oop from the incoming arguments or the handlized
+    // class mirror (if the method is static).
+    __ ld(r_oop, 0, r_carg2_classorobject);
+
+    // Get the lock box slot's address.
+    __ addi(r_box, R1_SP, lock_offset);
+
+#   ifdef ASSERT
+    if (UseBiasedLocking) {
+      // Making the box point to itself will make it clear it went unused
+      // but also be obviously invalid.
+      __ std(r_box, 0, r_box);
+    }
+#   endif // ASSERT
+
+    // Try fastpath for locking.
+    // fast_lock kills r_temp_1, r_temp_2, r_temp_3.
+    __ compiler_fast_lock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
+    __ beq(r_flag, locked);
+
+    // None of the above fast optimizations worked so we have to get into the
+    // slow case of monitor enter. Inline a special case of call_VM that
+    // disallows any pending_exception.
+
+    // Save argument registers and leave room for C-compatible ABI_112.
+    int frame_size = frame::abi_112_size +
+                     round_to(total_c_args * wordSize, frame::alignment_in_bytes);
+    __ mr(R11_scratch1, R1_SP);
+    RegisterSaver::push_frame_and_save_argument_registers(masm, R12_scratch2, frame_size, total_c_args, out_regs, out_regs2);
+
+    // Do the call.
+    __ set_last_Java_frame(R11_scratch1, r_return_pc);
+    assert(r_return_pc->is_nonvolatile(), "expecting return pc to be in non-volatile register");
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), r_oop, r_box, R16_thread);
+    __ reset_last_Java_frame();
+
+    RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2);
+
+    __ asm_assert_mem8_is_zero(thread_(pending_exception),
+       "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C", 0);
+
+    __ bind(locked);
+  }
+
+
+  // Publish thread state
+  // --------------------------------------------------------------------------
+
+  // Use that pc we placed in r_return_pc a while back as the current frame anchor.
+  __ set_last_Java_frame(R1_SP, r_return_pc);
+
+  // Transition from _thread_in_Java to _thread_in_native.
+  __ li(R0, _thread_in_native);
+  __ release();
+  // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
+  __ stw(R0, thread_(thread_state));
+  if (UseMembar) {
+    __ fence();
+  }
+
+
+  // The JNI call
+  // --------------------------------------------------------------------------
+
+  FunctionDescriptor* fd_native_method = (FunctionDescriptor*) native_func;
+  __ call_c(fd_native_method, relocInfo::runtime_call_type);
+
+
+  // Now, we are back from the native code.
+
+
+  // Unpack the native result.
+  // --------------------------------------------------------------------------
+
+  // For int-types, we do any needed sign-extension required.
+  // Care must be taken that the return values (R3_RET and F1_RET)
+  // will survive any VM calls for blocking or unlocking.
+  // An OOP result (handle) is done specially in the slow-path code.
+
+  switch (ret_type) {
+    case T_VOID:    break;        // Nothing to do!
+    case T_FLOAT:   break;        // Got it where we want it (unless slow-path).
+    case T_DOUBLE:  break;        // Got it where we want it (unless slow-path).
+    case T_LONG:    break;        // Got it where we want it (unless slow-path).
+    case T_OBJECT:  break;        // Really a handle.
+                                  // Cannot de-handlize until after reclaiming jvm_lock.
+    case T_ARRAY:   break;
+
+    case T_BOOLEAN: {             // 0 -> false(0); !0 -> true(1)
+      Label skip_modify;
+      __ cmpwi(CCR0, R3_RET, 0);
+      __ beq(CCR0, skip_modify);
+      __ li(R3_RET, 1);
+      __ bind(skip_modify);
+      break;
+      }
+    case T_BYTE: {                // sign extension
+      __ extsb(R3_RET, R3_RET);
+      break;
+      }
+    case T_CHAR: {                // unsigned result
+      __ andi(R3_RET, R3_RET, 0xffff);
+      break;
+      }
+    case T_SHORT: {               // sign extension
+      __ extsh(R3_RET, R3_RET);
+      break;
+      }
+    case T_INT:                   // nothing to do
+      break;
+    default:
+      ShouldNotReachHere();
+      break;
+  }
+
+
+  // Publish thread state
+  // --------------------------------------------------------------------------
+
+  // Switch thread to "native transition" state before reading the
+  // synchronization state. This additional state is necessary because reading
+  // and testing the synchronization state is not atomic w.r.t. GC, as this
+  // scenario demonstrates:
+  //   - Java thread A, in _thread_in_native state, loads _not_synchronized
+  //     and is preempted.
+  //   - VM thread changes sync state to synchronizing and suspends threads
+  //     for GC.
+  //   - Thread A is resumed to finish this native method, but doesn't block
+  //     here since it didn't see any synchronization in progress, and escapes.
+
+  // Transition from _thread_in_native to _thread_in_native_trans.
+  __ li(R0, _thread_in_native_trans);
+  __ release();
+  // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
+  __ stw(R0, thread_(thread_state));
+
+
+  // Must we block?
+  // --------------------------------------------------------------------------
+
+  // Block, if necessary, before resuming in _thread_in_Java state.
+  // In order for GC to work, don't clear the last_Java_sp until after blocking.
+  Label after_transition;
+  {
+    Label no_block, sync;
+
+    if (os::is_MP()) {
+      if (UseMembar) {
+        // Force this write out before the read below.
+        __ fence();
+      } else {
+        // Write serialization page so VM thread can do a pseudo remote membar.
+        // We use the current thread pointer to calculate a thread specific
+        // offset to write to within the page. This minimizes bus traffic
+        // due to cache line collision.
+        __ serialize_memory(R16_thread, r_temp_4, r_temp_5);
+      }
+    }
+
+    Register sync_state_addr = r_temp_4;
+    Register sync_state      = r_temp_5;
+    Register suspend_flags   = r_temp_6;
+
+    __ load_const(sync_state_addr, SafepointSynchronize::address_of_state(), /*temp*/ sync_state);
+
+    // TODO: PPC port assert(4 == SafepointSynchronize::sz_state(), "unexpected field size");
+    __ lwz(sync_state, 0, sync_state_addr);
+
+    // TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
+    __ lwz(suspend_flags, thread_(suspend_flags));
+
+    __ acquire();
+
+    Label do_safepoint;
+    // No synchronization in progress nor yet synchronized.
+    __ cmpwi(CCR0, sync_state, SafepointSynchronize::_not_synchronized);
+    // Not suspended.
+    __ cmpwi(CCR1, suspend_flags, 0);
+
+    __ bne(CCR0, sync);
+    __ beq(CCR1, no_block);
+
+    // Block. Save any potential method result value before the operation and
+    // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
+    // lets us share the oopMap we used when we went native rather than create
+    // a distinct one for this pc.
+    __ bind(sync);
+
+    address entry_point = is_critical_native
+      ? CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)
+      : CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans);
+    save_native_result(masm, ret_type, workspace_slot_offset);
+    __ call_VM_leaf(entry_point, R16_thread);
+    restore_native_result(masm, ret_type, workspace_slot_offset);
+
+    if (is_critical_native) {
+      __ b(after_transition); // No thread state transition here.
+    }
+    __ bind(no_block);
+  }
+
+  // Publish thread state.
+  // --------------------------------------------------------------------------
+
+  // Thread state is thread_in_native_trans. Any safepoint blocking has
+  // already happened so we can now change state to _thread_in_Java.
+
+  // Transition from _thread_in_native_trans to _thread_in_Java.
+  __ li(R0, _thread_in_Java);
+  __ release();
+  // TODO: PPC port assert(4 == JavaThread::sz_thread_state(), "unexpected field size");
+  __ stw(R0, thread_(thread_state));
+  if (UseMembar) {
+    __ fence();
+  }
+  __ bind(after_transition);
+
+  // Reguard any pages if necessary.
+  // --------------------------------------------------------------------------
+
+  Label no_reguard;
+  __ lwz(r_temp_1, thread_(stack_guard_state));
+  __ cmpwi(CCR0, r_temp_1, JavaThread::stack_guard_yellow_disabled);
+  __ bne(CCR0, no_reguard);
+
+  save_native_result(masm, ret_type, workspace_slot_offset);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
+  restore_native_result(masm, ret_type, workspace_slot_offset);
+
+  __ bind(no_reguard);
+
+
+  // Unlock
+  // --------------------------------------------------------------------------
+
+  if (method->is_synchronized()) {
+
+    ConditionRegister r_flag   = CCR1;
+    const Register r_oop       = r_temp_4;
+    const Register r_box       = r_temp_5;
+    const Register r_exception = r_temp_6;
+    Label done;
+
+    // Get oop and address of lock object box.
+    if (method_is_static) {
+      assert(klass_offset != -1, "");
+      __ ld(r_oop, klass_offset, R1_SP);
+    } else {
+      assert(receiver_offset != -1, "");
+      __ ld(r_oop, receiver_offset, R1_SP);
+    }
+    __ addi(r_box, R1_SP, lock_offset);
+
+    // Try fastpath for unlocking.
+    __ compiler_fast_unlock_object(r_flag, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
+    __ beq(r_flag, done);
+
+    // Save and restore any potential method result value around the unlocking operation.
+    save_native_result(masm, ret_type, workspace_slot_offset);
+
+    // Must save pending exception around the slow-path VM call. Since it's a
+    // leaf call, the pending exception (if any) can be kept in a register.
+    __ ld(r_exception, thread_(pending_exception));
+    assert(r_exception->is_nonvolatile(), "exception register must be non-volatile");
+    __ li(R0, 0);
+    __ std(R0, thread_(pending_exception));
+
+    // Slow case of monitor enter.
+    // Inline a special case of call_VM that disallows any pending_exception.
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box);
+
+    __ asm_assert_mem8_is_zero(thread_(pending_exception),
+       "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0);
+
+    restore_native_result(masm, ret_type, workspace_slot_offset);
+
+    // Check_forward_pending_exception jump to forward_exception if any pending
+    // exception is set. The forward_exception routine expects to see the
+    // exception in pending_exception and not in a register. Kind of clumsy,
+    // since all folks who branch to forward_exception must have tested
+    // pending_exception first and hence have it in a register already.
+    __ std(r_exception, thread_(pending_exception));
+
+    __ bind(done);
+  }
+
+# if 0
+  // DTrace method exit
+# endif
+
+  // Clear "last Java frame" SP and PC.
+  // --------------------------------------------------------------------------
+
+  __ reset_last_Java_frame();
+
+  // Unpack oop result.
+  // --------------------------------------------------------------------------
+
+  if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
+    Label skip_unboxing;
+    __ cmpdi(CCR0, R3_RET, 0);
+    __ beq(CCR0, skip_unboxing);
+    __ ld(R3_RET, 0, R3_RET);
+    __ bind(skip_unboxing);
+    __ verify_oop(R3_RET);
+  }
+
+
+  // Reset handle block.
+  // --------------------------------------------------------------------------
+  if (!is_critical_native) {
+  __ ld(r_temp_1, thread_(active_handles));
+  // TODO: PPC port assert(4 == JNIHandleBlock::top_size_in_bytes(), "unexpected field size");
+  __ li(r_temp_2, 0);
+  __ stw(r_temp_2, JNIHandleBlock::top_offset_in_bytes(), r_temp_1);
+
+
+  // Check for pending exceptions.
+  // --------------------------------------------------------------------------
+  __ ld(r_temp_2, thread_(pending_exception));
+  __ cmpdi(CCR0, r_temp_2, 0);
+  __ bne(CCR0, handle_pending_exception);
+  }
+
+  // Return
+  // --------------------------------------------------------------------------
+
+  __ pop_frame();
+  __ restore_LR_CR(R11);
+  __ blr();
+
+
+  // Handler for pending exceptions (out-of-line).
+  // --------------------------------------------------------------------------
+
+  // Since this is a native call, we know the proper exception handler
+  // is the empty function. We just pop this frame and then jump to
+  // forward_exception_entry.
+  if (!is_critical_native) {
+  __ align(InteriorEntryAlignment);
+  __ bind(handle_pending_exception);
+
+  __ pop_frame();
+  __ restore_LR_CR(R11);
+  __ b64_patchable((address)StubRoutines::forward_exception_entry(),
+                       relocInfo::runtime_call_type);
+  }
+
+  // Handler for a cache miss (out-of-line).
+  // --------------------------------------------------------------------------
+
+  if (!method_is_static) {
+  __ align(InteriorEntryAlignment);
+  __ bind(ic_miss);
+
+  __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
+                       relocInfo::runtime_call_type);
+  }
+
+  // Done.
+  // --------------------------------------------------------------------------
+
+  __ flush();
+
+  nmethod *nm = nmethod::new_native_nmethod(method,
+                                            compile_id,
+                                            masm->code(),
+                                            vep_start_pc-start_pc,
+                                            frame_done_pc-start_pc,
+                                            stack_slots / VMRegImpl::slots_per_word,
+                                            (method_is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
+                                            in_ByteSize(lock_offset),
+                                            oop_maps);
+
+  if (is_critical_native) {
+    nm->set_lazy_critical_native(true);
+  }
+
+  return nm;
+#else
+  ShouldNotReachHere();
+  return NULL;
+#endif // COMPILER2
+}
+
+// This function returns the adjust size (in number of words) to a c2i adapter
+// activation for use during deoptimization.
+int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
+  return round_to((callee_locals - callee_parameters) * Interpreter::stackElementWords, frame::alignment_in_bytes);
+}
+
+uint SharedRuntime::out_preserve_stack_slots() {
+#ifdef COMPILER2
+  return frame::jit_out_preserve_size / VMRegImpl::stack_slot_size;
+#else
+  return 0;
+#endif
+}
+
+#ifdef COMPILER2
+// Frame generation for deopt and uncommon trap blobs.
+static void push_skeleton_frame(MacroAssembler* masm, bool deopt,
+                                /* Read */
+                                Register unroll_block_reg,
+                                /* Update */
+                                Register frame_sizes_reg,
+                                Register number_of_frames_reg,
+                                Register pcs_reg,
+                                /* Invalidate */
+                                Register frame_size_reg,
+                                Register pc_reg) {
+
+  __ ld(pc_reg, 0, pcs_reg);
+  __ ld(frame_size_reg, 0, frame_sizes_reg);
+  __ std(pc_reg, _abi(lr), R1_SP);
+  __ push_frame(frame_size_reg, R0/*tmp*/);
+#ifdef CC_INTERP
+  __ std(R1_SP, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
+#else
+  Unimplemented();
+#endif
+  __ addi(number_of_frames_reg, number_of_frames_reg, -1);
+  __ addi(frame_sizes_reg, frame_sizes_reg, wordSize);
+  __ addi(pcs_reg, pcs_reg, wordSize);
+}
+
+// Loop through the UnrollBlock info and create new frames.
+static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
+                                 /* read */
+                                 Register unroll_block_reg,
+                                 /* invalidate */
+                                 Register frame_sizes_reg,
+                                 Register number_of_frames_reg,
+                                 Register pcs_reg,
+                                 Register frame_size_reg,
+                                 Register pc_reg) {
+  Label loop;
+
+ // _number_of_frames is of type int (deoptimization.hpp)
+  __ lwa(number_of_frames_reg,
+             Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(),
+             unroll_block_reg);
+  __ ld(pcs_reg,
+            Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(),
+            unroll_block_reg);
+  __ ld(frame_sizes_reg,
+            Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(),
+            unroll_block_reg);
+
+  // stack: (caller_of_deoptee, ...).
+
+  // At this point we either have an interpreter frame or a compiled
+  // frame on top of stack. If it is a compiled frame we push a new c2i
+  // adapter here
+
+  // Memorize top-frame stack-pointer.
+  __ mr(frame_size_reg/*old_sp*/, R1_SP);
+
+  // Resize interpreter top frame OR C2I adapter.
+
+  // At this moment, the top frame (which is the caller of the deoptee) is
+  // an interpreter frame or a newly pushed C2I adapter or an entry frame.
+  // The top frame has a TOP_IJAVA_FRAME_ABI and the frame contains the
+  // outgoing arguments.
+  //
+  // In order to push the interpreter frame for the deoptee, we need to
+  // resize the top frame such that we are able to place the deoptee's
+  // locals in the frame.
+  // Additionally, we have to turn the top frame's TOP_IJAVA_FRAME_ABI
+  // into a valid PARENT_IJAVA_FRAME_ABI.
+
+  __ lwa(R11_scratch1,
+             Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(),
+             unroll_block_reg);
+  __ neg(R11_scratch1, R11_scratch1);
+
+  // R11_scratch1 contains size of locals for frame resizing.
+  // R12_scratch2 contains top frame's lr.
+
+  // Resize frame by complete frame size prevents TOC from being
+  // overwritten by locals. A more stack space saving way would be
+  // to copy the TOC to its location in the new abi.
+  __ addi(R11_scratch1, R11_scratch1, - frame::parent_ijava_frame_abi_size);
+
+  // now, resize the frame
+  __ resize_frame(R11_scratch1, pc_reg/*tmp*/);
+
+  // In the case where we have resized a c2i frame above, the optional
+  // alignment below the locals has size 32 (why?).
+  __ std(R12_scratch2, _abi(lr), R1_SP);
+
+  // Initialize initial_caller_sp.
+  __ std(frame_size_reg/*old_sp*/, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
+
+#ifdef ASSERT
+  // Make sure that there is at least one entry in the array.
+  __ cmpdi(CCR0, number_of_frames_reg, 0);
+  __ asm_assert_ne("array_size must be > 0", 0x205);
+#endif
+
+  // Now push the new interpreter frames.
+  //
+  __ bind(loop);
+  // Allocate a new frame, fill in the pc.
+  push_skeleton_frame(masm, deopt,
+                      unroll_block_reg,
+                      frame_sizes_reg,
+                      number_of_frames_reg,
+                      pcs_reg,
+                      frame_size_reg,
+                      pc_reg);
+  __ cmpdi(CCR0, number_of_frames_reg, 0);
+  __ bne(CCR0, loop);
+
+  // Get the return address pointing into the frame manager.
+  __ ld(R0, 0, pcs_reg);
+  // Store it in the top interpreter frame.
+  __ std(R0, _abi(lr), R1_SP);
+  // Initialize frame_manager_lr of interpreter top frame.
+#ifdef CC_INTERP
+  __ std(R0, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
+#endif
+}
+#endif
+
+void SharedRuntime::generate_deopt_blob() {
+  // Allocate space for the code
+  ResourceMark rm;
+  // Setup code generation tools
+  CodeBuffer buffer("deopt_blob", 2048, 1024);
+  InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
+  Label exec_mode_initialized;
+  int frame_size_in_words;
+  OopMap* map = NULL;
+  OopMapSet *oop_maps = new OopMapSet();
+
+  // size of ABI112 plus spill slots for R3_RET and F1_RET.
+  const int frame_size_in_bytes = frame::abi_112_spill_size;
+  const int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
+  int first_frame_size_in_bytes = 0; // frame size of "unpack frame" for call to fetch_unroll_info.
+
+  const Register exec_mode_reg = R21_tmp1;
+
+  const address start = __ pc();
+
+#ifdef COMPILER2
+  // --------------------------------------------------------------------------
+  // Prolog for non exception case!
+
+  // We have been called from the deopt handler of the deoptee.
+  //
+  // deoptee:
+  //                      ...
+  //                      call X
+  //                      ...
+  //  deopt_handler:      call_deopt_stub
+  //  cur. return pc  --> ...
+  //
+  // So currently SR_LR points behind the call in the deopt handler.
+  // We adjust it such that it points to the start of the deopt handler.
+  // The return_pc has been stored in the frame of the deoptee and
+  // will replace the address of the deopt_handler in the call
+  // to Deoptimization::fetch_unroll_info below.
+  // We can't grab a free register here, because all registers may
+  // contain live values, so let the RegisterSaver do the adjustment
+  // of the return pc.
+  const int return_pc_adjustment_no_exception = -size_deopt_handler();
+
+  // Push the "unpack frame"
+  // Save everything in sight.
+  map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
+                                                                 &first_frame_size_in_bytes,
+                                                                 /*generate_oop_map=*/ true,
+                                                                 return_pc_adjustment_no_exception,
+                                                                 RegisterSaver::return_pc_is_lr);
+  assert(map != NULL, "OopMap must have been created");
+
+  __ li(exec_mode_reg, Deoptimization::Unpack_deopt);
+  // Save exec mode for unpack_frames.
+  __ b(exec_mode_initialized);
+
+  // --------------------------------------------------------------------------
+  // Prolog for exception case
+
+  // An exception is pending.
+  // We have been called with a return (interpreter) or a jump (exception blob).
+  //
+  // - R3_ARG1: exception oop
+  // - R4_ARG2: exception pc
+
+  int exception_offset = __ pc() - start;
+
+  BLOCK_COMMENT("Prolog for exception case");
+
+  // The RegisterSaves doesn't need to adjust the return pc for this situation.
+  const int return_pc_adjustment_exception = 0;
+
+  // Push the "unpack frame".
+  // Save everything in sight.
+  assert(R4 == R4_ARG2, "exception pc must be in r4");
+  RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
+                                                           &first_frame_size_in_bytes,
+                                                           /*generate_oop_map=*/ false,
+                                                           return_pc_adjustment_exception,
+                                                           RegisterSaver::return_pc_is_r4);
+
+  // Deopt during an exception. Save exec mode for unpack_frames.
+  __ li(exec_mode_reg, Deoptimization::Unpack_exception);
+
+  // Store exception oop and pc in thread (location known to GC).
+  // This is needed since the call to "fetch_unroll_info()" may safepoint.
+  __ std(R3_ARG1, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
+  __ std(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()),  R16_thread);
+
+  // fall through
+
+  // --------------------------------------------------------------------------
+  __ BIND(exec_mode_initialized);
+
+  {
+  const Register unroll_block_reg = R22_tmp2;
+
+  // We need to set `last_Java_frame' because `fetch_unroll_info' will
+  // call `last_Java_frame()'. The value of the pc in the frame is not
+  // particularly important. It just needs to identify this blob.
+  __ set_last_Java_frame(R1_SP, noreg);
+
+  // With EscapeAnalysis turned on, this call may safepoint!
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), R16_thread);
+  address calls_return_pc = __ last_calls_return_pc();
+  // Set an oopmap for the call site that describes all our saved registers.
+  oop_maps->add_gc_map(calls_return_pc - start, map);
+
+  __ reset_last_Java_frame();
+  // Save the return value.
+  __ mr(unroll_block_reg, R3_RET);
+
+  // Restore only the result registers that have been saved
+  // by save_volatile_registers(...).
+  RegisterSaver::restore_result_registers(masm, first_frame_size_in_bytes);
+
+  // In excp_deopt_mode, restore and clear exception oop which we
+  // stored in the thread during exception entry above. The exception
+  // oop will be the return value of this stub.
+  Label skip_restore_excp;
+  __ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception);
+  __ bne(CCR0, skip_restore_excp);
+  __ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
+  __ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
+  __ li(R0, 0);
+  __ std(R0, in_bytes(JavaThread::exception_pc_offset()),  R16_thread);
+  __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
+  __ BIND(skip_restore_excp);
+
+  // reload narrro_oop_base
+  if (UseCompressedOops && Universe::narrow_oop_base() != 0) {
+    __ load_const_optimized(R30, Universe::narrow_oop_base());
+  }
+
+  __ pop_frame();
+
+  // stack: (deoptee, optional i2c, caller of deoptee, ...).
+
+  // pop the deoptee's frame
+  __ pop_frame();
+
+  // stack: (caller_of_deoptee, ...).
+
+  // Loop through the `UnrollBlock' info and create interpreter frames.
+  push_skeleton_frames(masm, true/*deopt*/,
+                       unroll_block_reg,
+                       R23_tmp3,
+                       R24_tmp4,
+                       R25_tmp5,
+                       R26_tmp6,
+                       R27_tmp7);
+
+  // stack: (skeletal interpreter frame, ..., optional skeletal
+  // interpreter frame, optional c2i, caller of deoptee, ...).
+  }
+
+  // push an `unpack_frame' taking care of float / int return values.
+  __ push_frame(frame_size_in_bytes, R0/*tmp*/);
+
+  // stack: (unpack frame, skeletal interpreter frame, ..., optional
+  // skeletal interpreter frame, optional c2i, caller of deoptee,
+  // ...).
+
+  // Spill live volatile registers since we'll do a call.
+  __ std( R3_RET,  _abi_112_spill(spill_ret),  R1_SP);
+  __ stfd(F1_RET, _abi_112_spill(spill_fret), R1_SP);
+
+  // Let the unpacker layout information in the skeletal frames just
+  // allocated.
+  __ get_PC_trash_LR(R3_RET);
+  __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R3_RET);
+  // This is a call to a LEAF method, so no oop map is required.
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
+                  R16_thread/*thread*/, exec_mode_reg/*exec_mode*/);
+  __ reset_last_Java_frame();
+
+  // Restore the volatiles saved above.
+  __ ld( R3_RET, _abi_112_spill(spill_ret),  R1_SP);
+  __ lfd(F1_RET, _abi_112_spill(spill_fret), R1_SP);
+
+  // Pop the unpack frame.
+  __ pop_frame();
+  __ restore_LR_CR(R0);
+
+  // stack: (top interpreter frame, ..., optional interpreter frame,
+  // optional c2i, caller of deoptee, ...).
+
+  // Initialize R14_state.
+  __ ld(R14_state, 0, R1_SP);
+  __ addi(R14_state, R14_state, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
+  // Also inititialize R15_prev_state.
+  __ restore_prev_state();
+
+  // Return to the interpreter entry point.
+  __ blr();
+  __ flush();
+#else // COMPILER2
+  __ unimplemented("deopt blob needed only with compiler");
+  int exception_offset = __ pc() - start;
+#endif // COMPILER2
+
+  _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 0, first_frame_size_in_bytes / wordSize);
+}
+
+#ifdef COMPILER2
+void SharedRuntime::generate_uncommon_trap_blob() {
+  // Allocate space for the code.
+  ResourceMark rm;
+  // Setup code generation tools.
+  CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
+  InterpreterMacroAssembler* masm = new InterpreterMacroAssembler(&buffer);
+  address start = __ pc();
+
+  Register unroll_block_reg = R21_tmp1;
+  Register klass_index_reg  = R22_tmp2;
+  Register unc_trap_reg     = R23_tmp3;
+
+  OopMapSet* oop_maps = new OopMapSet();
+  int frame_size_in_bytes = frame::abi_112_size;
+  OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
+
+  // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
+
+  // Push a dummy `unpack_frame' and call
+  // `Deoptimization::uncommon_trap' to pack the compiled frame into a
+  // vframe array and return the `UnrollBlock' information.
+
+  // Save LR to compiled frame.
+  __ save_LR_CR(R11_scratch1);
+
+  // Push an "uncommon_trap" frame.
+  __ push_frame_abi112(0, R11_scratch1);
+
+  // stack: (unpack frame, deoptee, optional i2c, caller_of_deoptee, ...).
+
+  // Set the `unpack_frame' as last_Java_frame.
+  // `Deoptimization::uncommon_trap' expects it and considers its
+  // sender frame as the deoptee frame.
+  // Remember the offset of the instruction whose address will be
+  // moved to R11_scratch1.
+  address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
+
+  __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
+
+  __ mr(klass_index_reg, R3);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap),
+                  R16_thread, klass_index_reg);
+
+  // Set an oopmap for the call site.
+  oop_maps->add_gc_map(gc_map_pc - start, map);
+
+  __ reset_last_Java_frame();
+
+  // Pop the `unpack frame'.
+  __ pop_frame();
+
+  // stack: (deoptee, optional i2c, caller_of_deoptee, ...).
+
+  // Save the return value.
+  __ mr(unroll_block_reg, R3_RET);
+
+  // Pop the uncommon_trap frame.
+  __ pop_frame();
+
+  // stack: (caller_of_deoptee, ...).
+
+  // Allocate new interpreter frame(s) and possibly a c2i adapter
+  // frame.
+  push_skeleton_frames(masm, false/*deopt*/,
+                       unroll_block_reg,
+                       R22_tmp2,
+                       R23_tmp3,
+                       R24_tmp4,
+                       R25_tmp5,
+                       R26_tmp6);
+
+  // stack: (skeletal interpreter frame, ..., optional skeletal
+  // interpreter frame, optional c2i, caller of deoptee, ...).
+
+  // Push a dummy `unpack_frame' taking care of float return values.
+  // Call `Deoptimization::unpack_frames' to layout information in the
+  // interpreter frames just created.
+
+  // Push a simple "unpack frame" here.
+  __ push_frame_abi112(0, R11_scratch1);
+
+  // stack: (unpack frame, skeletal interpreter frame, ..., optional
+  // skeletal interpreter frame, optional c2i, caller of deoptee,
+  // ...).
+
+  // Set the "unpack_frame" as last_Java_frame.
+  __ get_PC_trash_LR(R11_scratch1);
+  __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
+
+  // Indicate it is the uncommon trap case.
+  __ li(unc_trap_reg, Deoptimization::Unpack_uncommon_trap);
+  // Let the unpacker layout information in the skeletal frames just
+  // allocated.
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames),
+                  R16_thread, unc_trap_reg);
+
+  __ reset_last_Java_frame();
+  // Pop the `unpack frame'.
+  __ pop_frame();
+  // Restore LR from top interpreter frame.
+  __ restore_LR_CR(R11_scratch1);
+
+  // stack: (top interpreter frame, ..., optional interpreter frame,
+  // optional c2i, caller of deoptee, ...).
+
+  // Initialize R14_state, ...
+  __ ld(R11_scratch1, 0, R1_SP);
+  __ addi(R14_state, R11_scratch1, -frame::interpreter_frame_cinterpreterstate_size_in_bytes());
+  // also initialize R15_prev_state.
+  __ restore_prev_state();
+  // Return to the interpreter entry point.
+  __ blr();
+
+  masm->flush();
+
+  _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, frame_size_in_bytes/wordSize);
+}
+#endif // COMPILER2
+
+// Generate a special Compile2Runtime blob that saves all registers, and setup oopmap.
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
+  assert(StubRoutines::forward_exception_entry() != NULL,
+         "must be generated before");
+
+  ResourceMark rm;
+  OopMapSet *oop_maps = new OopMapSet();
+  OopMap* map;
+
+  // Allocate space for the code. Setup code generation tools.
+  CodeBuffer buffer("handler_blob", 2048, 1024);
+  MacroAssembler* masm = new MacroAssembler(&buffer);
+
+  address start = __ pc();
+  int frame_size_in_bytes = 0;
+
+  RegisterSaver::ReturnPCLocation return_pc_location;
+  bool cause_return = (poll_type == POLL_AT_RETURN);
+  if (cause_return) {
+    // Nothing to do here. The frame has already been popped in MachEpilogNode.
+    // Register LR already contains the return pc.
+    return_pc_location = RegisterSaver::return_pc_is_lr;
+  } else {
+    // Use thread()->saved_exception_pc() as return pc.
+    return_pc_location = RegisterSaver::return_pc_is_thread_saved_exception_pc;
+  }
+
+  // Save registers, fpu state, and flags.
+  map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
+                                                                 &frame_size_in_bytes,
+                                                                 /*generate_oop_map=*/ true,
+                                                                 /*return_pc_adjustment=*/0,
+                                                                 return_pc_location);
+
+  // The following is basically a call_VM. However, we need the precise
+  // address of the call in order to generate an oopmap. Hence, we do all the
+  // work outselves.
+  __ set_last_Java_frame(/*sp=*/R1_SP, /*pc=*/noreg);
+
+  // The return address must always be correct so that the frame constructor
+  // never sees an invalid pc.
+
+  // Do the call
+  __ call_VM_leaf(call_ptr, R16_thread);
+  address calls_return_pc = __ last_calls_return_pc();
+
+  // Set an oopmap for the call site. This oopmap will map all
+  // oop-registers and debug-info registers as callee-saved. This
+  // will allow deoptimization at this safepoint to find all possible
+  // debug-info recordings, as well as let GC find all oops.
+  oop_maps->add_gc_map(calls_return_pc - start, map);
+
+  Label noException;
+
+  // Clear the last Java frame.
+  __ reset_last_Java_frame();
+
+  BLOCK_COMMENT("  Check pending exception.");
+  const Register pending_exception = R0;
+  __ ld(pending_exception, thread_(pending_exception));
+  __ cmpdi(CCR0, pending_exception, 0);
+  __ beq(CCR0, noException);
+
+  // Exception pending
+  RegisterSaver::restore_live_registers_and_pop_frame(masm,
+                                                      frame_size_in_bytes,
+                                                      /*restore_ctr=*/true);
+
+
+  BLOCK_COMMENT("  Jump to forward_exception_entry.");
+  // Jump to forward_exception_entry, with the issuing PC in LR
+  // so it looks like the original nmethod called forward_exception_entry.
+  __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
+
+  // No exception case.
+  __ BIND(noException);
+
+
+  // Normal exit, restore registers and exit.
+  RegisterSaver::restore_live_registers_and_pop_frame(masm,
+                                                      frame_size_in_bytes,
+                                                      /*restore_ctr=*/true);
+
+  __ blr();
+
+  // Make sure all code is generated
+  masm->flush();
+
+  // Fill-out other meta info
+  // CodeBlob frame size is in words.
+  return SafepointBlob::create(&buffer, oop_maps, frame_size_in_bytes / wordSize);
+}
+
+// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss)
+//
+// Generate a stub that calls into the vm to find out the proper destination
+// of a java call. All the argument registers are live at this point
+// but since this is generic code we don't know what they are and the caller
+// must do any gc of the args.
+//
+RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
+
+  // allocate space for the code
+  ResourceMark rm;
+
+  CodeBuffer buffer(name, 1000, 512);
+  MacroAssembler* masm = new MacroAssembler(&buffer);
+
+  int frame_size_in_bytes;
+
+  OopMapSet *oop_maps = new OopMapSet();
+  OopMap* map = NULL;
+
+  address start = __ pc();
+
+  map = RegisterSaver::push_frame_abi112_and_save_live_registers(masm,
+                                                                 &frame_size_in_bytes,
+                                                                 /*generate_oop_map*/ true,
+                                                                 /*return_pc_adjustment*/ 0,
+                                                                 RegisterSaver::return_pc_is_lr);
+
+  // Use noreg as last_Java_pc, the return pc will be reconstructed
+  // from the physical frame.
+  __ set_last_Java_frame(/*sp*/R1_SP, noreg);
+
+  int frame_complete = __ offset();
+
+  // Pass R19_method as 2nd (optional) argument, used by
+  // counter_overflow_stub.
+  __ call_VM_leaf(destination, R16_thread, R19_method);
+  address calls_return_pc = __ last_calls_return_pc();
+  // Set an oopmap for the call site.
+  // We need this not only for callee-saved registers, but also for volatile
+  // registers that the compiler might be keeping live across a safepoint.
+  // Create the oopmap for the call's return pc.
+  oop_maps->add_gc_map(calls_return_pc - start, map);
+
+  // R3_RET contains the address we are going to jump to assuming no exception got installed.
+
+  // clear last_Java_sp
+  __ reset_last_Java_frame();
+
+  // Check for pending exceptions.
+  BLOCK_COMMENT("Check for pending exceptions.");
+  Label pending;
+  __ ld(R11_scratch1, thread_(pending_exception));
+  __ cmpdi(CCR0, R11_scratch1, 0);
+  __ bne(CCR0, pending);
+
+  __ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame.
+
+  RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ false);
+
+  // Get the returned methodOop.
+  __ get_vm_result_2(R19_method);
+
+  __ bctr();
+
+
+  // Pending exception after the safepoint.
+  __ BIND(pending);
+
+  RegisterSaver::restore_live_registers_and_pop_frame(masm, frame_size_in_bytes, /*restore_ctr*/ true);
+
+  // exception pending => remove activation and forward to exception handler
+
+  __ li(R11_scratch1, 0);
+  __ ld(R3_ARG1, thread_(pending_exception));
+  __ std(R11_scratch1, in_bytes(JavaThread::vm_result_offset()), R16_thread);
+  __ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
+
+  // -------------
+  // Make sure all code is generated.
+  masm->flush();
+
+  // return the blob
+  // frame_size_words or bytes??
+  return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_bytes/wordSize,
+                                       oop_maps, true);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/stubGenerator_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,2082 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "nativeInst_ppc.hpp"
+#include "oops/instanceOop.hpp"
+#include "oops/method.hpp"
+#include "oops/objArrayKlass.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "utilities/top.hpp"
+#ifdef TARGET_OS_FAMILY_aix
+# include "thread_aix.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_linux
+# include "thread_linux.inline.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/runtime.hpp"
+#endif
+
+#define __ _masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) // nothing
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+class StubGenerator: public StubCodeGenerator {
+ private:
+
+  // Call stubs are used to call Java from C
+  //
+  // Arguments:
+  //
+  //   R3  - call wrapper address     : address
+  //   R4  - result                   : intptr_t*
+  //   R5  - result type              : BasicType
+  //   R6  - method                   : Method
+  //   R7  - frame mgr entry point    : address
+  //   R8  - parameter block          : intptr_t*
+  //   R9  - parameter count in words : int
+  //   R10 - thread                   : Thread*
+  //
+  address generate_call_stub(address& return_address) {
+    // Setup a new c frame, copy java arguments, call frame manager or
+    // native_entry, and process result.
+
+    StubCodeMark mark(this, "StubRoutines", "call_stub");
+
+    address start = __ emit_fd();
+
+    // some sanity checks
+    assert((sizeof(frame::abi_48) % 16) == 0,                 "unaligned");
+    assert((sizeof(frame::abi_112) % 16) == 0,                "unaligned");
+    assert((sizeof(frame::spill_nonvolatiles) % 16) == 0,     "unaligned");
+    assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
+    assert((sizeof(frame::entry_frame_locals) % 16) == 0,     "unaligned");
+
+    Register r_arg_call_wrapper_addr        = R3;
+    Register r_arg_result_addr              = R4;
+    Register r_arg_result_type              = R5;
+    Register r_arg_method                   = R6;
+    Register r_arg_entry                    = R7;
+    Register r_arg_thread                   = R10;
+
+    Register r_temp                         = R24;
+    Register r_top_of_arguments_addr        = R25;
+    Register r_entryframe_fp                = R26;
+
+    {
+      // Stack on entry to call_stub:
+      //
+      //      F1      [C_FRAME]
+      //              ...
+
+      Register r_arg_argument_addr          = R8;
+      Register r_arg_argument_count         = R9;
+      Register r_frame_alignment_in_bytes   = R27;
+      Register r_argument_addr              = R28;
+      Register r_argumentcopy_addr          = R29;
+      Register r_argument_size_in_bytes     = R30;
+      Register r_frame_size                 = R23;
+
+      Label arguments_copied;
+
+      // Save LR/CR to caller's C_FRAME.
+      __ save_LR_CR(R0);
+
+      // Zero extend arg_argument_count.
+      __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
+
+      // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
+      __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
+
+      // Keep copy of our frame pointer (caller's SP).
+      __ mr(r_entryframe_fp, R1_SP);
+
+      BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
+      // Push ENTRY_FRAME including arguments:
+      //
+      //      F0      [TOP_IJAVA_FRAME_ABI]
+      //              alignment (optional)
+      //              [outgoing Java arguments]
+      //              [ENTRY_FRAME_LOCALS]
+      //      F1      [C_FRAME]
+      //              ...
+
+      // calculate frame size
+
+      // unaligned size of arguments
+      __ sldi(r_argument_size_in_bytes,
+                  r_arg_argument_count, Interpreter::logStackElementSize);
+      // arguments alignment (max 1 slot)
+      // FIXME: use round_to() here
+      __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
+      __ sldi(r_frame_alignment_in_bytes,
+              r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
+
+      // size = unaligned size of arguments + top abi's size
+      __ addi(r_frame_size, r_argument_size_in_bytes,
+              frame::top_ijava_frame_abi_size);
+      // size += arguments alignment
+      __ add(r_frame_size,
+             r_frame_size, r_frame_alignment_in_bytes);
+      // size += size of call_stub locals
+      __ addi(r_frame_size,
+              r_frame_size, frame::entry_frame_locals_size);
+
+      // push ENTRY_FRAME
+      __ push_frame(r_frame_size, r_temp);
+
+      // initialize call_stub locals (step 1)
+      __ std(r_arg_call_wrapper_addr,
+             _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
+      __ std(r_arg_result_addr,
+             _entry_frame_locals_neg(result_address), r_entryframe_fp);
+      __ std(r_arg_result_type,
+             _entry_frame_locals_neg(result_type), r_entryframe_fp);
+      // we will save arguments_tos_address later
+
+
+      BLOCK_COMMENT("Copy Java arguments");
+      // copy Java arguments
+
+      // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
+      // FIXME: why not simply use SP+frame::top_ijava_frame_size?
+      __ addi(r_top_of_arguments_addr,
+              R1_SP, frame::top_ijava_frame_abi_size);
+      __ add(r_top_of_arguments_addr,
+             r_top_of_arguments_addr, r_frame_alignment_in_bytes);
+
+      // any arguments to copy?
+      __ cmpdi(CCR0, r_arg_argument_count, 0);
+      __ beq(CCR0, arguments_copied);
+
+      // prepare loop and copy arguments in reverse order
+      {
+        // init CTR with arg_argument_count
+        __ mtctr(r_arg_argument_count);
+
+        // let r_argumentcopy_addr point to last outgoing Java arguments P
+        __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
+
+        // let r_argument_addr point to last incoming java argument
+        __ add(r_argument_addr,
+                   r_arg_argument_addr, r_argument_size_in_bytes);
+        __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
+
+        // now loop while CTR > 0 and copy arguments
+        {
+          Label next_argument;
+          __ bind(next_argument);
+
+          __ ld(r_temp, 0, r_argument_addr);
+          // argument_addr--;
+          __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
+          __ std(r_temp, 0, r_argumentcopy_addr);
+          // argumentcopy_addr++;
+          __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
+
+          __ bdnz(next_argument);
+        }
+      }
+
+      // Arguments copied, continue.
+      __ bind(arguments_copied);
+    }
+
+    {
+      BLOCK_COMMENT("Call frame manager or native entry.");
+      // Call frame manager or native entry.
+      Register r_new_arg_entry = R14_state;
+      assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
+                                 r_arg_method, r_arg_thread);
+
+      __ mr(r_new_arg_entry, r_arg_entry);
+
+      // Register state on entry to frame manager / native entry:
+      //
+      //   tos         -  intptr_t*    sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
+      //   R19_method  -  Method
+      //   R16_thread  -  JavaThread*
+
+      // Tos must point to last argument - element_size.
+      const Register tos = R17_tos;
+      __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
+
+      // initialize call_stub locals (step 2)
+      // now save tos as arguments_tos_address
+      __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
+
+      // load argument registers for call
+      __ mr(R19_method, r_arg_method);
+      __ mr(R16_thread, r_arg_thread);
+      assert(tos != r_arg_method, "trashed r_arg_method");
+      assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
+
+      // Set R15_prev_state to 0 for simplifying checks in callee.
+      __ li(R15_prev_state, 0);
+
+      // Stack on entry to frame manager / native entry:
+      //
+      //      F0      [TOP_IJAVA_FRAME_ABI]
+      //              alignment (optional)
+      //              [outgoing Java arguments]
+      //              [ENTRY_FRAME_LOCALS]
+      //      F1      [C_FRAME]
+      //              ...
+      //
+
+      // global toc register
+      __ load_const(R29, MacroAssembler::global_toc(), R11_scratch1);
+
+      // Load narrow oop base.
+      __ reinit_heapbase(R30, R11_scratch1);
+
+      // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
+      // when called via a c2i.
+
+      // Pass initial_caller_sp to framemanager.
+      __ mr(R21_tmp1, R1_SP);
+
+      // Do a light-weight C-call here, r_new_arg_entry holds the address
+      // of the interpreter entry point (frame manager or native entry)
+      // and save runtime-value of LR in return_address.
+      assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
+             "trashed r_new_arg_entry");
+      return_address = __ call_stub(r_new_arg_entry);
+    }
+
+    {
+      BLOCK_COMMENT("Returned from frame manager or native entry.");
+      // Returned from frame manager or native entry.
+      // Now pop frame, process result, and return to caller.
+
+      // Stack on exit from frame manager / native entry:
+      //
+      //      F0      [ABI]
+      //              ...
+      //              [ENTRY_FRAME_LOCALS]
+      //      F1      [C_FRAME]
+      //              ...
+      //
+      // Just pop the topmost frame ...
+      //
+
+      Label ret_is_object;
+      Label ret_is_long;
+      Label ret_is_float;
+      Label ret_is_double;
+
+      Register r_entryframe_fp = R30;
+      Register r_lr            = R7_ARG5;
+      Register r_cr            = R8_ARG6;
+
+      // Reload some volatile registers which we've spilled before the call
+      // to frame manager / native entry.
+      // Access all locals via frame pointer, because we know nothing about
+      // the topmost frame's size.
+      __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
+      assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
+      __ ld(r_arg_result_addr,
+            _entry_frame_locals_neg(result_address), r_entryframe_fp);
+      __ ld(r_arg_result_type,
+            _entry_frame_locals_neg(result_type), r_entryframe_fp);
+      __ ld(r_cr, _abi(cr), r_entryframe_fp);
+      __ ld(r_lr, _abi(lr), r_entryframe_fp);
+
+      // pop frame and restore non-volatiles, LR and CR
+      __ mr(R1_SP, r_entryframe_fp);
+      __ mtcr(r_cr);
+      __ mtlr(r_lr);
+
+      // Store result depending on type. Everything that is not
+      // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
+      __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
+      __ cmpwi(CCR1, r_arg_result_type, T_LONG);
+      __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
+      __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
+
+      // restore non-volatile registers
+      __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
+
+
+      // Stack on exit from call_stub:
+      //
+      //      0       [C_FRAME]
+      //              ...
+      //
+      //  no call_stub frames left.
+
+      // All non-volatiles have been restored at this point!!
+      assert(R3_RET == R3, "R3_RET should be R3");
+
+      __ beq(CCR0, ret_is_object);
+      __ beq(CCR1, ret_is_long);
+      __ beq(CCR5, ret_is_float);
+      __ beq(CCR6, ret_is_double);
+
+      // default:
+      __ stw(R3_RET, 0, r_arg_result_addr);
+      __ blr(); // return to caller
+
+      // case T_OBJECT:
+      __ bind(ret_is_object);
+      __ std(R3_RET, 0, r_arg_result_addr);
+      __ blr(); // return to caller
+
+      // case T_LONG:
+      __ bind(ret_is_long);
+      __ std(R3_RET, 0, r_arg_result_addr);
+      __ blr(); // return to caller
+
+      // case T_FLOAT:
+      __ bind(ret_is_float);
+      __ stfs(F1_RET, 0, r_arg_result_addr);
+      __ blr(); // return to caller
+
+      // case T_DOUBLE:
+      __ bind(ret_is_double);
+      __ stfd(F1_RET, 0, r_arg_result_addr);
+      __ blr(); // return to caller
+    }
+
+    return start;
+  }
+
+  // Return point for a Java call if there's an exception thrown in
+  // Java code.  The exception is caught and transformed into a
+  // pending exception stored in JavaThread that can be tested from
+  // within the VM.
+  //
+  address generate_catch_exception() {
+    StubCodeMark mark(this, "StubRoutines", "catch_exception");
+
+    address start = __ pc();
+
+    // Registers alive
+    //
+    //  R16_thread
+    //  R3_ARG1 - address of pending exception
+    //  R4_ARG2 - return address in call stub
+
+    const Register exception_file = R21_tmp1;
+    const Register exception_line = R22_tmp2;
+
+    __ load_const(exception_file, (void*)__FILE__);
+    __ load_const(exception_line, (void*)__LINE__);
+
+    __ std(R3_ARG1, thread_(pending_exception));
+    // store into `char *'
+    __ std(exception_file, thread_(exception_file));
+    // store into `int'
+    __ stw(exception_line, thread_(exception_line));
+
+    // complete return to VM
+    assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
+
+    __ mtlr(R4_ARG2);
+    // continue in call stub
+    __ blr();
+
+    return start;
+  }
+
+  // Continuation point for runtime calls returning with a pending
+  // exception.  The pending exception check happened in the runtime
+  // or native call stub.  The pending exception in Thread is
+  // converted into a Java-level exception.
+  //
+  address generate_forward_exception() {
+    StubCodeMark mark(this, "StubRoutines", "forward_exception");
+    address start = __ pc();
+
+#if !defined(PRODUCT)
+    if (VerifyOops) {
+      // Get pending exception oop.
+      __ ld(R3_ARG1,
+                in_bytes(Thread::pending_exception_offset()),
+                R16_thread);
+      // Make sure that this code is only executed if there is a pending exception.
+      {
+        Label L;
+        __ cmpdi(CCR0, R3_ARG1, 0);
+        __ bne(CCR0, L);
+        __ stop("StubRoutines::forward exception: no pending exception (1)");
+        __ bind(L);
+      }
+      __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
+    }
+#endif
+
+    // Save LR/CR and copy exception pc (LR) into R4_ARG2.
+    __ save_LR_CR(R4_ARG2);
+    __ push_frame_abi112(0, R0);
+    // Find exception handler.
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address,
+                     SharedRuntime::exception_handler_for_return_address),
+                    R16_thread,
+                    R4_ARG2);
+    // Copy handler's address.
+    __ mtctr(R3_RET);
+    __ pop_frame();
+    __ restore_LR_CR(R0);
+
+    // Set up the arguments for the exception handler:
+    //  - R3_ARG1: exception oop
+    //  - R4_ARG2: exception pc.
+
+    // Load pending exception oop.
+    __ ld(R3_ARG1,
+              in_bytes(Thread::pending_exception_offset()),
+              R16_thread);
+
+    // The exception pc is the return address in the caller.
+    // Must load it into R4_ARG2.
+    __ mflr(R4_ARG2);
+
+#ifdef ASSERT
+    // Make sure exception is set.
+    {
+      Label L;
+      __ cmpdi(CCR0, R3_ARG1, 0);
+      __ bne(CCR0, L);
+      __ stop("StubRoutines::forward exception: no pending exception (2)");
+      __ bind(L);
+    }
+#endif
+
+    // Clear the pending exception.
+    __ li(R0, 0);
+    __ std(R0,
+               in_bytes(Thread::pending_exception_offset()),
+               R16_thread);
+    // Jump to exception handler.
+    __ bctr();
+
+    return start;
+  }
+
+#undef __
+#define __ masm->
+  // Continuation point for throwing of implicit exceptions that are
+  // not handled in the current activation. Fabricates an exception
+  // oop and initiates normal exception dispatching in this
+  // frame. Only callee-saved registers are preserved (through the
+  // normal register window / RegisterMap handling).  If the compiler
+  // needs all registers to be preserved between the fault point and
+  // the exception handler then it must assume responsibility for that
+  // in AbstractCompiler::continuation_for_implicit_null_exception or
+  // continuation_for_implicit_division_by_zero_exception. All other
+  // implicit exceptions (e.g., NullPointerException or
+  // AbstractMethodError on entry) are either at call sites or
+  // otherwise assume that stack unwinding will be initiated, so
+  // caller saved registers were assumed volatile in the compiler.
+  //
+  // Note that we generate only this stub into a RuntimeStub, because
+  // it needs to be properly traversed and ignored during GC, so we
+  // change the meaning of the "__" macro within this method.
+  //
+  // Note: the routine set_pc_not_at_call_for_caller in
+  // SharedRuntime.cpp requires that this code be generated into a
+  // RuntimeStub.
+  address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
+                                   Register arg1 = noreg, Register arg2 = noreg) {
+    CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
+    MacroAssembler* masm = new MacroAssembler(&code);
+
+    OopMapSet* oop_maps  = new OopMapSet();
+    int frame_size_in_bytes = frame::abi_112_size;
+    OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
+
+    StubCodeMark mark(this, "StubRoutines", "throw_exception");
+
+    address start = __ pc();
+
+    __ save_LR_CR(R11_scratch1);
+
+    // Push a frame.
+    __ push_frame_abi112(0, R11_scratch1);
+
+    address frame_complete_pc = __ pc();
+
+    if (restore_saved_exception_pc) {
+      __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
+    }
+
+    // Note that we always have a runtime stub frame on the top of
+    // stack by this point. Remember the offset of the instruction
+    // whose address will be moved to R11_scratch1.
+    address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
+
+    __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
+
+    __ mr(R3_ARG1, R16_thread);
+    if (arg1 != noreg) {
+      __ mr(R4_ARG2, arg1);
+    }
+    if (arg2 != noreg) {
+      __ mr(R5_ARG3, arg2);
+    }
+    __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry),
+              relocInfo::none);
+
+    // Set an oopmap for the call site.
+    oop_maps->add_gc_map((int)(gc_map_pc - start), map);
+
+    __ reset_last_Java_frame();
+
+#ifdef ASSERT
+    // Make sure that this code is only executed if there is a pending
+    // exception.
+    {
+      Label L;
+      __ ld(R0,
+                in_bytes(Thread::pending_exception_offset()),
+                R16_thread);
+      __ cmpdi(CCR0, R0, 0);
+      __ bne(CCR0, L);
+      __ stop("StubRoutines::throw_exception: no pending exception");
+      __ bind(L);
+    }
+#endif
+
+    // Pop frame.
+    __ pop_frame();
+
+    __ restore_LR_CR(R11_scratch1);
+
+    __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
+    __ mtctr(R11_scratch1);
+    __ bctr();
+
+    // Create runtime stub with OopMap.
+    RuntimeStub* stub =
+      RuntimeStub::new_runtime_stub(name, &code,
+                                    /*frame_complete=*/ (int)(frame_complete_pc - start),
+                                    frame_size_in_bytes/wordSize,
+                                    oop_maps,
+                                    false);
+    return stub->entry_point();
+  }
+#undef __
+#define __ _masm->
+
+  //  Generate G1 pre-write barrier for array.
+  //
+  //  Input:
+  //     from     - register containing src address (only needed for spilling)
+  //     to       - register containing starting address
+  //     count    - register containing element count
+  //     tmp      - scratch register
+  //
+  //  Kills:
+  //     nothing
+  //
+  void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1) {
+    BarrierSet* const bs = Universe::heap()->barrier_set();
+    switch (bs->kind()) {
+      case BarrierSet::G1SATBCT:
+      case BarrierSet::G1SATBCTLogging:
+        // With G1, don't generate the call if we statically know that the target in uninitialized
+        if (!dest_uninitialized) {
+          const int spill_slots = 4 * wordSize;
+          const int frame_size  = frame::abi_112_size + spill_slots;
+          Label filtered;
+
+          // Is marking active?
+          if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
+            __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
+          } else {
+            guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
+            __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()), R16_thread);
+          }
+          __ cmpdi(CCR0, Rtmp1, 0);
+          __ beq(CCR0, filtered);
+
+          __ save_LR_CR(R0);
+          __ push_frame_abi112(spill_slots, R0);
+          __ std(from,  frame_size - 1 * wordSize, R1_SP);
+          __ std(to,    frame_size - 2 * wordSize, R1_SP);
+          __ std(count, frame_size - 3 * wordSize, R1_SP);
+
+          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
+
+          __ ld(from,  frame_size - 1 * wordSize, R1_SP);
+          __ ld(to,    frame_size - 2 * wordSize, R1_SP);
+          __ ld(count, frame_size - 3 * wordSize, R1_SP);
+          __ pop_frame();
+          __ restore_LR_CR(R0);
+
+          __ bind(filtered);
+        }
+        break;
+      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableExtension:
+      case BarrierSet::ModRef:
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+  }
+
+  //  Generate CMS/G1 post-write barrier for array.
+  //
+  //  Input:
+  //     addr     - register containing starting address
+  //     count    - register containing element count
+  //     tmp      - scratch register
+  //
+  //  The input registers and R0 are overwritten.
+  //
+  void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, bool branchToEnd) {
+    BarrierSet* const bs = Universe::heap()->barrier_set();
+
+    switch (bs->kind()) {
+      case BarrierSet::G1SATBCT:
+      case BarrierSet::G1SATBCTLogging:
+        {
+          if (branchToEnd) {
+            __ save_LR_CR(R0);
+            // We need this frame only to spill LR.
+            __ push_frame_abi112(0, R0);
+            __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
+            __ pop_frame();
+            __ restore_LR_CR(R0);
+          } else {
+            // Tail call: fake call from stub caller by branching without linking.
+            address entry_point = (address)CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post);
+            __ mr_if_needed(R3_ARG1, addr);
+            __ mr_if_needed(R4_ARG2, count);
+            __ load_const(R11, entry_point, R0);
+            __ call_c_and_return_to_caller(R11);
+          }
+        }
+        break;
+      case BarrierSet::CardTableModRef:
+      case BarrierSet::CardTableExtension:
+        {
+          Label Lskip_loop, Lstore_loop;
+          if (UseConcMarkSweepGC) {
+            // TODO PPC port: contribute optimization / requires shared changes
+            __ release();
+          }
+
+          CardTableModRefBS* const ct = (CardTableModRefBS*)bs;
+          assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+          assert_different_registers(addr, count, tmp);
+
+          __ sldi(count, count, LogBytesPerHeapOop);
+          __ addi(count, count, -BytesPerHeapOop);
+          __ add(count, addr, count);
+          // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
+          __ srdi(addr, addr, CardTableModRefBS::card_shift);
+          __ srdi(count, count, CardTableModRefBS::card_shift);
+          __ subf(count, addr, count);
+          assert_different_registers(R0, addr, count, tmp);
+          __ load_const(tmp, (address)ct->byte_map_base);
+          __ addic_(count, count, 1);
+          __ beq(CCR0, Lskip_loop);
+          __ li(R0, 0);
+          __ mtctr(count);
+          // Byte store loop
+          __ bind(Lstore_loop);
+          __ stbx(R0, tmp, addr);
+          __ addi(addr, addr, 1);
+          __ bdnz(Lstore_loop);
+          __ bind(Lskip_loop);
+
+          if (!branchToEnd) __ blr();
+        }
+      break;
+      case BarrierSet::ModRef:
+        if (!branchToEnd) __ blr();
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+  }
+
+  // Support for void zero_words_aligned8(HeapWord* to, size_t count)
+  //
+  // Arguments:
+  //   to:
+  //   count:
+  //
+  // Destroys:
+  //
+  address generate_zero_words_aligned8() {
+    StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
+
+    // Implemented as in ClearArray.
+    address start = __ emit_fd();
+
+    Register base_ptr_reg   = R3_ARG1; // tohw (needs to be 8b aligned)
+    Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
+    Register tmp1_reg       = R5_ARG3;
+    Register tmp2_reg       = R6_ARG4;
+    Register zero_reg       = R7_ARG5;
+
+    // Procedure for large arrays (uses data cache block zero instruction).
+    Label dwloop, fast, fastloop, restloop, lastdword, done;
+    int cl_size=VM_Version::get_cache_line_size(), cl_dwords=cl_size>>3, cl_dwordaddr_bits=exact_log2(cl_dwords);
+    int min_dcbz=2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
+
+    // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
+    __ dcbtst(base_ptr_reg);                    // Indicate write access to first cache line ...
+    __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if number of dwords is even.
+    __ srdi_(tmp1_reg, cnt_dwords_reg, 1);      // number of double dwords
+    __ load_const_optimized(zero_reg, 0L);      // Use as zero register.
+
+    __ cmpdi(CCR1, tmp2_reg, 0);                // cnt_dwords even?
+    __ beq(CCR0, lastdword);                    // size <= 1
+    __ mtctr(tmp1_reg);                         // Speculatively preload counter for rest loop (>0).
+    __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
+    __ neg(tmp1_reg, base_ptr_reg);             // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
+
+    __ blt(CCR0, restloop);                     // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
+    __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
+
+    __ beq(CCR0, fast);                         // already 128byte aligned
+    __ mtctr(tmp1_reg);                         // Set ctr to hit 128byte boundary (0<ctr<cnt).
+    __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
+
+    // Clear in first cache line dword-by-dword if not already 128byte aligned.
+    __ bind(dwloop);
+      __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
+      __ addi(base_ptr_reg, base_ptr_reg, 8);
+    __ bdnz(dwloop);
+
+    // clear 128byte blocks
+    __ bind(fast);
+    __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
+    __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if rest even
+
+    __ mtctr(tmp1_reg);                         // load counter
+    __ cmpdi(CCR1, tmp2_reg, 0);                // rest even?
+    __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
+
+    __ bind(fastloop);
+      __ dcbz(base_ptr_reg);                    // Clear 128byte aligned block.
+      __ addi(base_ptr_reg, base_ptr_reg, cl_size);
+    __ bdnz(fastloop);
+
+    //__ dcbtst(base_ptr_reg);                  // Indicate write access to last cache line.
+    __ beq(CCR0, lastdword);                    // rest<=1
+    __ mtctr(tmp1_reg);                         // load counter
+
+    // Clear rest.
+    __ bind(restloop);
+      __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
+      __ std(zero_reg, 8, base_ptr_reg);        // Clear 8byte aligned block.
+      __ addi(base_ptr_reg, base_ptr_reg, 16);
+    __ bdnz(restloop);
+
+    __ bind(lastdword);
+    __ beq(CCR1, done);
+    __ std(zero_reg, 0, base_ptr_reg);
+    __ bind(done);
+    __ blr();                                   // return
+
+    return start;
+  }
+
+  // The following routine generates a subroutine to throw an asynchronous
+  // UnknownError when an unsafe access gets a fault that could not be
+  // reasonably prevented by the programmer.  (Example: SIGBUS/OBJERR.)
+  //
+  address generate_handler_for_unsafe_access() {
+    StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
+    address start = __ emit_fd();
+    __ unimplemented("StubRoutines::handler_for_unsafe_access", 93);
+    return start;
+  }
+
+#if !defined(PRODUCT)
+  // Wrapper which calls oopDesc::is_oop_or_null()
+  // Only called by MacroAssembler::verify_oop
+  static void verify_oop_helper(const char* message, oop o) {
+    if (!o->is_oop_or_null()) {
+      fatal(message);
+    }
+    ++ StubRoutines::_verify_oop_count;
+  }
+#endif
+
+  // Return address of code to be called from code generated by
+  // MacroAssembler::verify_oop.
+  //
+  // Don't generate, rather use C++ code.
+  address generate_verify_oop() {
+    StubCodeMark mark(this, "StubRoutines", "verify_oop");
+
+    // this is actually a `FunctionDescriptor*'.
+    address start = 0;
+
+#if !defined(PRODUCT)
+    start = CAST_FROM_FN_PTR(address, verify_oop_helper);
+#endif
+
+    return start;
+  }
+
+  // Fairer handling of safepoints for native methods.
+  //
+  // Generate code which reads from the polling page. This special handling is needed as the
+  // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
+  // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
+  // to read from the safepoint polling page.
+  address generate_load_from_poll() {
+    StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
+    address start = __ emit_fd();
+    __ unimplemented("StubRoutines::verify_oop", 95);  // TODO PPC port
+    return start;
+  }
+
+  // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
+  //
+  // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
+  // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
+  //
+  // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
+  // for turning on loop predication optimization, and hence the behavior of "array range check"
+  // and "loop invariant check" could be influenced, which potentially boosted JVM98.
+  //
+  // Generate stub for disjoint short fill. If "aligned" is true, the
+  // "to" address is assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //   to:    R3_ARG1
+  //   value: R4_ARG2
+  //   count: R5_ARG3 treated as signed
+  //
+  address generate_fill(BasicType t, bool aligned, const char* name) {
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ emit_fd();
+
+    const Register to    = R3_ARG1;   // source array address
+    const Register value = R4_ARG2;   // fill value
+    const Register count = R5_ARG3;   // elements count
+    const Register temp  = R6_ARG4;   // temp register
+
+    //assert_clean_int(count, O3);    // Make sure 'count' is clean int.
+
+    Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
+    Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
+
+    int shift = -1;
+    switch (t) {
+       case T_BYTE:
+        shift = 2;
+        // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
+        __ rldimi(value, value, 8, 48);     // 8 bit -> 16 bit
+        __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
+        __ blt(CCR0, L_fill_elements);
+        __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
+        break;
+       case T_SHORT:
+        shift = 1;
+        // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
+        __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
+        __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
+        __ blt(CCR0, L_fill_elements);
+        break;
+      case T_INT:
+        shift = 0;
+        __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
+        __ blt(CCR0, L_fill_4_bytes);
+        break;
+      default: ShouldNotReachHere();
+    }
+
+    if (!aligned && (t == T_BYTE || t == T_SHORT)) {
+      // Align source address at 4 bytes address boundary.
+      if (t == T_BYTE) {
+        // One byte misalignment happens only for byte arrays.
+        __ andi_(temp, to, 1);
+        __ beq(CCR0, L_skip_align1);
+        __ stb(value, 0, to);
+        __ addi(to, to, 1);
+        __ addi(count, count, -1);
+        __ bind(L_skip_align1);
+      }
+      // Two bytes misalignment happens only for byte and short (char) arrays.
+      __ andi_(temp, to, 2);
+      __ beq(CCR0, L_skip_align2);
+      __ sth(value, 0, to);
+      __ addi(to, to, 2);
+      __ addi(count, count, -(1 << (shift - 1)));
+      __ bind(L_skip_align2);
+    }
+
+    if (!aligned) {
+      // Align to 8 bytes, we know we are 4 byte aligned to start.
+      __ andi_(temp, to, 7);
+      __ beq(CCR0, L_fill_32_bytes);
+      __ stw(value, 0, to);
+      __ addi(to, to, 4);
+      __ addi(count, count, -(1 << shift));
+      __ bind(L_fill_32_bytes);
+    }
+
+    __ li(temp, 8<<shift);                  // Prepare for 32 byte loop.
+    // Clone bytes int->long as above.
+    __ rldimi(value, value, 32, 0);         // 32 bit -> 64 bit
+
+    Label L_check_fill_8_bytes;
+    // Fill 32-byte chunks.
+    __ subf_(count, temp, count);
+    __ blt(CCR0, L_check_fill_8_bytes);
+
+    Label L_fill_32_bytes_loop;
+    __ align(32);
+    __ bind(L_fill_32_bytes_loop);
+
+    __ std(value, 0, to);
+    __ std(value, 8, to);
+    __ subf_(count, temp, count);           // Update count.
+    __ std(value, 16, to);
+    __ std(value, 24, to);
+
+    __ addi(to, to, 32);
+    __ bge(CCR0, L_fill_32_bytes_loop);
+
+    __ bind(L_check_fill_8_bytes);
+    __ add_(count, temp, count);
+    __ beq(CCR0, L_exit);
+    __ addic_(count, count, -(2 << shift));
+    __ blt(CCR0, L_fill_4_bytes);
+
+    //
+    // Length is too short, just fill 8 bytes at a time.
+    //
+    Label L_fill_8_bytes_loop;
+    __ bind(L_fill_8_bytes_loop);
+    __ std(value, 0, to);
+    __ addic_(count, count, -(2 << shift));
+    __ addi(to, to, 8);
+    __ bge(CCR0, L_fill_8_bytes_loop);
+
+    // Fill trailing 4 bytes.
+    __ bind(L_fill_4_bytes);
+    __ andi_(temp, count, 1<<shift);
+    __ beq(CCR0, L_fill_2_bytes);
+
+    __ stw(value, 0, to);
+    if (t == T_BYTE || t == T_SHORT) {
+      __ addi(to, to, 4);
+      // Fill trailing 2 bytes.
+      __ bind(L_fill_2_bytes);
+      __ andi_(temp, count, 1<<(shift-1));
+      __ beq(CCR0, L_fill_byte);
+      __ sth(value, 0, to);
+      if (t == T_BYTE) {
+        __ addi(to, to, 2);
+        // Fill trailing byte.
+        __ bind(L_fill_byte);
+        __ andi_(count, count, 1);
+        __ beq(CCR0, L_exit);
+        __ stb(value, 0, to);
+      } else {
+        __ bind(L_fill_byte);
+      }
+    } else {
+      __ bind(L_fill_2_bytes);
+    }
+    __ bind(L_exit);
+    __ blr();
+
+    // Handle copies less than 8 bytes. Int is handled elsewhere.
+    if (t == T_BYTE) {
+      __ bind(L_fill_elements);
+      Label L_fill_2, L_fill_4;
+      __ andi_(temp, count, 1);
+      __ beq(CCR0, L_fill_2);
+      __ stb(value, 0, to);
+      __ addi(to, to, 1);
+      __ bind(L_fill_2);
+      __ andi_(temp, count, 2);
+      __ beq(CCR0, L_fill_4);
+      __ stb(value, 0, to);
+      __ stb(value, 0, to);
+      __ addi(to, to, 2);
+      __ bind(L_fill_4);
+      __ andi_(temp, count, 4);
+      __ beq(CCR0, L_exit);
+      __ stb(value, 0, to);
+      __ stb(value, 1, to);
+      __ stb(value, 2, to);
+      __ stb(value, 3, to);
+      __ blr();
+    }
+
+    if (t == T_SHORT) {
+      Label L_fill_2;
+      __ bind(L_fill_elements);
+      __ andi_(temp, count, 1);
+      __ beq(CCR0, L_fill_2);
+      __ sth(value, 0, to);
+      __ addi(to, to, 2);
+      __ bind(L_fill_2);
+      __ andi_(temp, count, 2);
+      __ beq(CCR0, L_exit);
+      __ sth(value, 0, to);
+      __ sth(value, 2, to);
+      __ blr();
+    }
+    return start;
+  }
+
+
+  // Generate overlap test for array copy stubs.
+  //
+  // Input:
+  //   R3_ARG1    -  from
+  //   R4_ARG2    -  to
+  //   R5_ARG3    -  element count
+  //
+  void array_overlap_test(address no_overlap_target, int log2_elem_size) {
+    Register tmp1 = R6_ARG4;
+    Register tmp2 = R7_ARG5;
+
+    Label l_overlap;
+#ifdef ASSERT
+    __ srdi_(tmp2, R5_ARG3, 31);
+    __ asm_assert_eq("missing zero extend", 0xAFFE);
+#endif
+
+    __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
+    __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
+    __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
+    __ cmpld(CCR1, tmp1, tmp2);
+    __ crand(/*CCR0 lt*/0, /*CCR1 lt*/4+0, /*CCR0 lt*/0);
+    __ blt(CCR0, l_overlap); // Src before dst and distance smaller than size.
+
+    // need to copy forwards
+    if (__ is_within_range_of_b(no_overlap_target, __ pc())) {
+      __ b(no_overlap_target);
+    } else {
+      __ load_const(tmp1, no_overlap_target, tmp2);
+      __ mtctr(tmp1);
+      __ bctr();
+    }
+
+    __ bind(l_overlap);
+    // need to copy backwards
+  }
+
+  // The guideline in the implementations of generate_disjoint_xxx_copy
+  // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
+  // single instructions, but to avoid alignment interrupts (see subsequent
+  // comment). Furthermore, we try to minimize misaligned access, even
+  // though they cause no alignment interrupt.
+  //
+  // In Big-Endian mode, the PowerPC architecture requires implementations to
+  // handle automatically misaligned integer halfword and word accesses,
+  // word-aligned integer doubleword accesses, and word-aligned floating-point
+  // accesses. Other accesses may or may not generate an Alignment interrupt
+  // depending on the implementation.
+  // Alignment interrupt handling may require on the order of hundreds of cycles,
+  // so every effort should be made to avoid misaligned memory values.
+  //
+  //
+  // Generate stub for disjoint byte copy.  If "aligned" is true, the
+  // "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  address generate_disjoint_byte_copy(bool aligned, const char * name) {
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ emit_fd();
+
+    Register tmp1 = R6_ARG4;
+    Register tmp2 = R7_ARG5;
+    Register tmp3 = R8_ARG6;
+    Register tmp4 = R9_ARG7;
+
+
+    Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
+    // Don't try anything fancy if arrays don't have many elements.
+    __ li(tmp3, 0);
+    __ cmpwi(CCR0, R5_ARG3, 17);
+    __ ble(CCR0, l_6); // copy 4 at a time
+
+    if (!aligned) {
+      __ xorr(tmp1, R3_ARG1, R4_ARG2);
+      __ andi_(tmp1, tmp1, 3);
+      __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
+
+      // Copy elements if necessary to align to 4 bytes.
+      __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
+      __ andi_(tmp1, tmp1, 3);
+      __ beq(CCR0, l_2);
+
+      __ subf(R5_ARG3, tmp1, R5_ARG3);
+      __ bind(l_9);
+      __ lbz(tmp2, 0, R3_ARG1);
+      __ addic_(tmp1, tmp1, -1);
+      __ stb(tmp2, 0, R4_ARG2);
+      __ addi(R3_ARG1, R3_ARG1, 1);
+      __ addi(R4_ARG2, R4_ARG2, 1);
+      __ bne(CCR0, l_9);
+
+      __ bind(l_2);
+    }
+
+    // copy 8 elements at a time
+    __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
+    __ andi_(tmp1, tmp2, 7);
+    __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
+
+    // copy a 2-element word if necessary to align to 8 bytes
+    __ andi_(R0, R3_ARG1, 7);
+    __ beq(CCR0, l_7);
+
+    __ lwzx(tmp2, R3_ARG1, tmp3);
+    __ addi(R5_ARG3, R5_ARG3, -4);
+    __ stwx(tmp2, R4_ARG2, tmp3);
+    { // FasterArrayCopy
+      __ addi(R3_ARG1, R3_ARG1, 4);
+      __ addi(R4_ARG2, R4_ARG2, 4);
+    }
+    __ bind(l_7);
+
+    { // FasterArrayCopy
+      __ cmpwi(CCR0, R5_ARG3, 31);
+      __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
+
+      __ srdi(tmp1, R5_ARG3, 5);
+      __ andi_(R5_ARG3, R5_ARG3, 31);
+      __ mtctr(tmp1);
+
+      __ bind(l_8);
+      // Use unrolled version for mass copying (copy 32 elements a time)
+      // Load feeding store gets zero latency on Power6, however not on Power5.
+      // Therefore, the following sequence is made for the good of both.
+      __ ld(tmp1, 0, R3_ARG1);
+      __ ld(tmp2, 8, R3_ARG1);
+      __ ld(tmp3, 16, R3_ARG1);
+      __ ld(tmp4, 24, R3_ARG1);
+      __ std(tmp1, 0, R4_ARG2);
+      __ std(tmp2, 8, R4_ARG2);
+      __ std(tmp3, 16, R4_ARG2);
+      __ std(tmp4, 24, R4_ARG2);
+      __ addi(R3_ARG1, R3_ARG1, 32);
+      __ addi(R4_ARG2, R4_ARG2, 32);
+      __ bdnz(l_8);
+    }
+
+    __ bind(l_6);
+
+    // copy 4 elements at a time
+    __ cmpwi(CCR0, R5_ARG3, 4);
+    __ blt(CCR0, l_1);
+    __ srdi(tmp1, R5_ARG3, 2);
+    __ mtctr(tmp1); // is > 0
+    __ andi_(R5_ARG3, R5_ARG3, 3);
+
+    { // FasterArrayCopy
+      __ addi(R3_ARG1, R3_ARG1, -4);
+      __ addi(R4_ARG2, R4_ARG2, -4);
+      __ bind(l_3);
+      __ lwzu(tmp2, 4, R3_ARG1);
+      __ stwu(tmp2, 4, R4_ARG2);
+      __ bdnz(l_3);
+      __ addi(R3_ARG1, R3_ARG1, 4);
+      __ addi(R4_ARG2, R4_ARG2, 4);
+    }
+
+    // do single element copy
+    __ bind(l_1);
+    __ cmpwi(CCR0, R5_ARG3, 0);
+    __ beq(CCR0, l_4);
+
+    { // FasterArrayCopy
+      __ mtctr(R5_ARG3);
+      __ addi(R3_ARG1, R3_ARG1, -1);
+      __ addi(R4_ARG2, R4_ARG2, -1);
+
+      __ bind(l_5);
+      __ lbzu(tmp2, 1, R3_ARG1);
+      __ stbu(tmp2, 1, R4_ARG2);
+      __ bdnz(l_5);
+    }
+
+    __ bind(l_4);
+    __ blr();
+
+    return start;
+  }
+
+  // Generate stub for conjoint byte copy.  If "aligned" is true, the
+  // "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  address generate_conjoint_byte_copy(bool aligned, const char * name) {
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ emit_fd();
+
+    Register tmp1 = R6_ARG4;
+    Register tmp2 = R7_ARG5;
+    Register tmp3 = R8_ARG6;
+
+    address nooverlap_target = aligned ?
+      ((FunctionDescriptor*)StubRoutines::arrayof_jbyte_disjoint_arraycopy())->entry() :
+      ((FunctionDescriptor*)StubRoutines::jbyte_disjoint_arraycopy())->entry();
+
+    array_overlap_test(nooverlap_target, 0);
+    // Do reverse copy. We assume the case of actual overlap is rare enough
+    // that we don't have to optimize it.
+    Label l_1, l_2;
+
+    __ b(l_2);
+    __ bind(l_1);
+    __ stbx(tmp1, R4_ARG2, R5_ARG3);
+    __ bind(l_2);
+    __ addic_(R5_ARG3, R5_ARG3, -1);
+    __ lbzx(tmp1, R3_ARG1, R5_ARG3);
+    __ bge(CCR0, l_1);
+
+    __ blr();
+
+    return start;
+  }
+
+  // Generate stub for disjoint short copy.  If "aligned" is true, the
+  // "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //  elm.count: R5_ARG3 treated as signed
+  //
+  // Strategy for aligned==true:
+  //
+  //  If length <= 9:
+  //     1. copy 2 elements at a time (l_6)
+  //     2. copy last element if original element count was odd (l_1)
+  //
+  //  If length > 9:
+  //     1. copy 4 elements at a time until less than 4 elements are left (l_7)
+  //     2. copy 2 elements at a time until less than 2 elements are left (l_6)
+  //     3. copy last element if one was left in step 2. (l_1)
+  //
+  //
+  // Strategy for aligned==false:
+  //
+  //  If length <= 9: same as aligned==true case, but NOTE: load/stores
+  //                  can be unaligned (see comment below)
+  //
+  //  If length > 9:
+  //     1. continue with step 6. if the alignment of from and to mod 4
+  //        is different.
+  //     2. align from and to to 4 bytes by copying 1 element if necessary
+  //     3. at l_2 from and to are 4 byte aligned; continue with
+  //        5. if they cannot be aligned to 8 bytes because they have
+  //        got different alignment mod 8.
+  //     4. at this point we know that both, from and to, have the same
+  //        alignment mod 8, now copy one element if necessary to get
+  //        8 byte alignment of from and to.
+  //     5. copy 4 elements at a time until less than 4 elements are
+  //        left; depending on step 3. all load/stores are aligned or
+  //        either all loads or all stores are unaligned.
+  //     6. copy 2 elements at a time until less than 2 elements are
+  //        left (l_6); arriving here from step 1., there is a chance
+  //        that all accesses are unaligned.
+  //     7. copy last element if one was left in step 6. (l_1)
+  //
+  //  There are unaligned data accesses using integer load/store
+  //  instructions in this stub. POWER allows such accesses.
+  //
+  //  According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
+  //  Chapter 2: Effect of Operand Placement on Performance) unaligned
+  //  integer load/stores have good performance. Only unaligned
+  //  floating point load/stores can have poor performance.
+  //
+  //  TODO:
+  //
+  //  1. check if aligning the backbranch target of loops is beneficial
+  //
+  address generate_disjoint_short_copy(bool aligned, const char * name) {
+    StubCodeMark mark(this, "StubRoutines", name);
+
+    Register tmp1 = R6_ARG4;
+    Register tmp2 = R7_ARG5;
+    Register tmp3 = R8_ARG6;
+    Register tmp4 = R9_ARG7;
+
+    address start = __ emit_fd();
+
+      Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
+    // don't try anything fancy if arrays don't have many elements
+    __ li(tmp3, 0);
+    __ cmpwi(CCR0, R5_ARG3, 9);
+    __ ble(CCR0, l_6); // copy 2 at a time
+
+    if (!aligned) {
+      __ xorr(tmp1, R3_ARG1, R4_ARG2);
+      __ andi_(tmp1, tmp1, 3);
+      __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
+
+      // At this point it is guaranteed that both, from and to have the same alignment mod 4.
+
+      // Copy 1 element if necessary to align to 4 bytes.
+      __ andi_(tmp1, R3_ARG1, 3);
+      __ beq(CCR0, l_2);
+
+      __ lhz(tmp2, 0, R3_ARG1);
+      __ addi(R3_ARG1, R3_ARG1, 2);
+      __ sth(tmp2, 0, R4_ARG2);
+      __ addi(R4_ARG2, R4_ARG2, 2);
+      __ addi(R5_ARG3, R5_ARG3, -1);
+      __ bind(l_2);
+
+      // At this point the positions of both, from and to, are at least 4 byte aligned.
+
+      // Copy 4 elements at a time.
+      // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
+      __ xorr(tmp2, R3_ARG1, R4_ARG2);
+      __ andi_(tmp1, tmp2, 7);
+      __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
+
+      // Copy a 2-element word if necessary to align to 8 bytes.
+      __ andi_(R0, R3_ARG1, 7);
+      __ beq(CCR0, l_7);
+
+      __ lwzx(tmp2, R3_ARG1, tmp3);
+      __ addi(R5_ARG3, R5_ARG3, -2);
+      __ stwx(tmp2, R4_ARG2, tmp3);
+      { // FasterArrayCopy
+        __ addi(R3_ARG1, R3_ARG1, 4);
+        __ addi(R4_ARG2, R4_ARG2, 4);
+      }
+    }
+
+    __ bind(l_7);
+
+    // Copy 4 elements at a time; either the loads or the stores can
+    // be unaligned if aligned == false.
+
+    { // FasterArrayCopy
+      __ cmpwi(CCR0, R5_ARG3, 15);
+      __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
+
+      __ srdi(tmp1, R5_ARG3, 4);
+      __ andi_(R5_ARG3, R5_ARG3, 15);
+      __ mtctr(tmp1);
+
+      __ bind(l_8);
+      // Use unrolled version for mass copying (copy 16 elements a time).
+      // Load feeding store gets zero latency on Power6, however not on Power5.
+      // Therefore, the following sequence is made for the good of both.
+      __ ld(tmp1, 0, R3_ARG1);
+      __ ld(tmp2, 8, R3_ARG1);
+      __ ld(tmp3, 16, R3_ARG1);
+      __ ld(tmp4, 24, R3_ARG1);
+      __ std(tmp1, 0, R4_ARG2);
+      __ std(tmp2, 8, R4_ARG2);
+      __ std(tmp3, 16, R4_ARG2);
+      __ std(tmp4, 24, R4_ARG2);
+      __ addi(R3_ARG1, R3_ARG1, 32);
+      __ addi(R4_ARG2, R4_ARG2, 32);
+      __ bdnz(l_8);
+    }
+    __ bind(l_6);
+
+    // copy 2 elements at a time
+    { // FasterArrayCopy
+      __ cmpwi(CCR0, R5_ARG3, 2);
+      __ blt(CCR0, l_1);
+      __ srdi(tmp1, R5_ARG3, 1);
+      __ andi_(R5_ARG3, R5_ARG3, 1);
+
+      __ addi(R3_ARG1, R3_ARG1, -4);
+      __ addi(R4_ARG2, R4_ARG2, -4);
+      __ mtctr(tmp1);
+
+      __ bind(l_3);
+      __ lwzu(tmp2, 4, R3_ARG1);
+      __ stwu(tmp2, 4, R4_ARG2);
+      __ bdnz(l_3);
+
+      __ addi(R3_ARG1, R3_ARG1, 4);
+      __ addi(R4_ARG2, R4_ARG2, 4);
+    }
+
+    // do single element copy
+    __ bind(l_1);
+    __ cmpwi(CCR0, R5_ARG3, 0);
+    __ beq(CCR0, l_4);
+
+    { // FasterArrayCopy
+      __ mtctr(R5_ARG3);
+      __ addi(R3_ARG1, R3_ARG1, -2);
+      __ addi(R4_ARG2, R4_ARG2, -2);
+
+      __ bind(l_5);
+      __ lhzu(tmp2, 2, R3_ARG1);
+      __ sthu(tmp2, 2, R4_ARG2);
+      __ bdnz(l_5);
+    }
+    __ bind(l_4);
+    __ blr();
+
+    return start;
+  }
+
+  // Generate stub for conjoint short copy.  If "aligned" is true, the
+  // "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  address generate_conjoint_short_copy(bool aligned, const char * name) {
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ emit_fd();
+
+    Register tmp1 = R6_ARG4;
+    Register tmp2 = R7_ARG5;
+    Register tmp3 = R8_ARG6;
+
+    address nooverlap_target = aligned ?
+        ((FunctionDescriptor*)StubRoutines::arrayof_jshort_disjoint_arraycopy())->entry() :
+        ((FunctionDescriptor*)StubRoutines::jshort_disjoint_arraycopy())->entry();
+
+    array_overlap_test(nooverlap_target, 1);
+
+    Label l_1, l_2;
+    __ sldi(tmp1, R5_ARG3, 1);
+    __ b(l_2);
+    __ bind(l_1);
+    __ sthx(tmp2, R4_ARG2, tmp1);
+    __ bind(l_2);
+    __ addic_(tmp1, tmp1, -2);
+    __ lhzx(tmp2, R3_ARG1, tmp1);
+    __ bge(CCR0, l_1);
+
+    __ blr();
+
+    return start;
+  }
+
+  // Generate core code for disjoint int copy (and oop copy on 32-bit).  If "aligned"
+  // is true, the "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  void generate_disjoint_int_copy_core(bool aligned) {
+    Register tmp1 = R6_ARG4;
+    Register tmp2 = R7_ARG5;
+    Register tmp3 = R8_ARG6;
+    Register tmp4 = R0;
+
+    Label l_1, l_2, l_3, l_4, l_5, l_6;
+    // for short arrays, just do single element copy
+    __ li(tmp3, 0);
+    __ cmpwi(CCR0, R5_ARG3, 5);
+    __ ble(CCR0, l_2);
+
+    if (!aligned) {
+        // check if arrays have same alignment mod 8.
+        __ xorr(tmp1, R3_ARG1, R4_ARG2);
+        __ andi_(R0, tmp1, 7);
+        // Not the same alignment, but ld and std just need to be 4 byte aligned.
+        __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
+
+        // copy 1 element to align to and from on an 8 byte boundary
+        __ andi_(R0, R3_ARG1, 7);
+        __ beq(CCR0, l_4);
+
+        __ lwzx(tmp2, R3_ARG1, tmp3);
+        __ addi(R5_ARG3, R5_ARG3, -1);
+        __ stwx(tmp2, R4_ARG2, tmp3);
+        { // FasterArrayCopy
+          __ addi(R3_ARG1, R3_ARG1, 4);
+          __ addi(R4_ARG2, R4_ARG2, 4);
+        }
+        __ bind(l_4);
+      }
+
+    { // FasterArrayCopy
+      __ cmpwi(CCR0, R5_ARG3, 7);
+      __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
+
+      __ srdi(tmp1, R5_ARG3, 3);
+      __ andi_(R5_ARG3, R5_ARG3, 7);
+      __ mtctr(tmp1);
+
+      __ bind(l_6);
+      // Use unrolled version for mass copying (copy 8 elements a time).
+      // Load feeding store gets zero latency on power6, however not on power 5.
+      // Therefore, the following sequence is made for the good of both.
+      __ ld(tmp1, 0, R3_ARG1);
+      __ ld(tmp2, 8, R3_ARG1);
+      __ ld(tmp3, 16, R3_ARG1);
+      __ ld(tmp4, 24, R3_ARG1);
+      __ std(tmp1, 0, R4_ARG2);
+      __ std(tmp2, 8, R4_ARG2);
+      __ std(tmp3, 16, R4_ARG2);
+      __ std(tmp4, 24, R4_ARG2);
+      __ addi(R3_ARG1, R3_ARG1, 32);
+      __ addi(R4_ARG2, R4_ARG2, 32);
+      __ bdnz(l_6);
+    }
+
+    // copy 1 element at a time
+    __ bind(l_2);
+    __ cmpwi(CCR0, R5_ARG3, 0);
+    __ beq(CCR0, l_1);
+
+    { // FasterArrayCopy
+      __ mtctr(R5_ARG3);
+      __ addi(R3_ARG1, R3_ARG1, -4);
+      __ addi(R4_ARG2, R4_ARG2, -4);
+
+      __ bind(l_3);
+      __ lwzu(tmp2, 4, R3_ARG1);
+      __ stwu(tmp2, 4, R4_ARG2);
+      __ bdnz(l_3);
+    }
+
+    __ bind(l_1);
+    return;
+  }
+
+  // Generate stub for disjoint int copy.  If "aligned" is true, the
+  // "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  address generate_disjoint_int_copy(bool aligned, const char * name) {
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ emit_fd();
+    generate_disjoint_int_copy_core(aligned);
+    __ blr();
+    return start;
+  }
+
+  // Generate core code for conjoint int copy (and oop copy on
+  // 32-bit).  If "aligned" is true, the "from" and "to" addresses
+  // are assumed to be heapword aligned.
+  //
+  // Arguments:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  void generate_conjoint_int_copy_core(bool aligned) {
+    // Do reverse copy.  We assume the case of actual overlap is rare enough
+    // that we don't have to optimize it.
+
+    Label l_1, l_2, l_3, l_4, l_5, l_6;
+
+    Register tmp1 = R6_ARG4;
+    Register tmp2 = R7_ARG5;
+    Register tmp3 = R8_ARG6;
+    Register tmp4 = R0;
+
+    { // FasterArrayCopy
+      __ cmpwi(CCR0, R5_ARG3, 0);
+      __ beq(CCR0, l_6);
+
+      __ sldi(R5_ARG3, R5_ARG3, 2);
+      __ add(R3_ARG1, R3_ARG1, R5_ARG3);
+      __ add(R4_ARG2, R4_ARG2, R5_ARG3);
+      __ srdi(R5_ARG3, R5_ARG3, 2);
+
+      __ cmpwi(CCR0, R5_ARG3, 7);
+      __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
+
+      __ srdi(tmp1, R5_ARG3, 3);
+      __ andi(R5_ARG3, R5_ARG3, 7);
+      __ mtctr(tmp1);
+
+      __ bind(l_4);
+      // Use unrolled version for mass copying (copy 4 elements a time).
+      // Load feeding store gets zero latency on Power6, however not on Power5.
+      // Therefore, the following sequence is made for the good of both.
+      __ addi(R3_ARG1, R3_ARG1, -32);
+      __ addi(R4_ARG2, R4_ARG2, -32);
+      __ ld(tmp4, 24, R3_ARG1);
+      __ ld(tmp3, 16, R3_ARG1);
+      __ ld(tmp2, 8, R3_ARG1);
+      __ ld(tmp1, 0, R3_ARG1);
+      __ std(tmp4, 24, R4_ARG2);
+      __ std(tmp3, 16, R4_ARG2);
+      __ std(tmp2, 8, R4_ARG2);
+      __ std(tmp1, 0, R4_ARG2);
+      __ bdnz(l_4);
+
+      __ cmpwi(CCR0, R5_ARG3, 0);
+      __ beq(CCR0, l_6);
+
+      __ bind(l_5);
+      __ mtctr(R5_ARG3);
+      __ bind(l_3);
+      __ lwz(R0, -4, R3_ARG1);
+      __ stw(R0, -4, R4_ARG2);
+      __ addi(R3_ARG1, R3_ARG1, -4);
+      __ addi(R4_ARG2, R4_ARG2, -4);
+      __ bdnz(l_3);
+
+      __ bind(l_6);
+    }
+  }
+
+  // Generate stub for conjoint int copy.  If "aligned" is true, the
+  // "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  address generate_conjoint_int_copy(bool aligned, const char * name) {
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ emit_fd();
+
+    address nooverlap_target = aligned ?
+      ((FunctionDescriptor*)StubRoutines::arrayof_jint_disjoint_arraycopy())->entry() :
+      ((FunctionDescriptor*)StubRoutines::jint_disjoint_arraycopy())->entry();
+
+    array_overlap_test(nooverlap_target, 2);
+
+    generate_conjoint_int_copy_core(aligned);
+
+    __ blr();
+
+    return start;
+  }
+
+  // Generate core code for disjoint long copy (and oop copy on
+  // 64-bit).  If "aligned" is true, the "from" and "to" addresses
+  // are assumed to be heapword aligned.
+  //
+  // Arguments:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  void generate_disjoint_long_copy_core(bool aligned) {
+    Register tmp1 = R6_ARG4;
+    Register tmp2 = R7_ARG5;
+    Register tmp3 = R8_ARG6;
+    Register tmp4 = R0;
+
+    Label l_1, l_2, l_3, l_4;
+
+    { // FasterArrayCopy
+      __ cmpwi(CCR0, R5_ARG3, 3);
+      __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
+
+      __ srdi(tmp1, R5_ARG3, 2);
+      __ andi_(R5_ARG3, R5_ARG3, 3);
+      __ mtctr(tmp1);
+
+      __ bind(l_4);
+      // Use unrolled version for mass copying (copy 4 elements a time).
+      // Load feeding store gets zero latency on Power6, however not on Power5.
+      // Therefore, the following sequence is made for the good of both.
+      __ ld(tmp1, 0, R3_ARG1);
+      __ ld(tmp2, 8, R3_ARG1);
+      __ ld(tmp3, 16, R3_ARG1);
+      __ ld(tmp4, 24, R3_ARG1);
+      __ std(tmp1, 0, R4_ARG2);
+      __ std(tmp2, 8, R4_ARG2);
+      __ std(tmp3, 16, R4_ARG2);
+      __ std(tmp4, 24, R4_ARG2);
+      __ addi(R3_ARG1, R3_ARG1, 32);
+      __ addi(R4_ARG2, R4_ARG2, 32);
+      __ bdnz(l_4);
+    }
+
+    // copy 1 element at a time
+    __ bind(l_3);
+    __ cmpwi(CCR0, R5_ARG3, 0);
+    __ beq(CCR0, l_1);
+
+    { // FasterArrayCopy
+      __ mtctr(R5_ARG3);
+      __ addi(R3_ARG1, R3_ARG1, -8);
+      __ addi(R4_ARG2, R4_ARG2, -8);
+
+      __ bind(l_2);
+      __ ldu(R0, 8, R3_ARG1);
+      __ stdu(R0, 8, R4_ARG2);
+      __ bdnz(l_2);
+
+    }
+    __ bind(l_1);
+  }
+
+  // Generate stub for disjoint long copy.  If "aligned" is true, the
+  // "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  address generate_disjoint_long_copy(bool aligned, const char * name) {
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ emit_fd();
+    generate_disjoint_long_copy_core(aligned);
+    __ blr();
+
+    return start;
+  }
+
+  // Generate core code for conjoint long copy (and oop copy on
+  // 64-bit).  If "aligned" is true, the "from" and "to" addresses
+  // are assumed to be heapword aligned.
+  //
+  // Arguments:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  void generate_conjoint_long_copy_core(bool aligned) {
+    Register tmp1 = R6_ARG4;
+    Register tmp2 = R7_ARG5;
+    Register tmp3 = R8_ARG6;
+    Register tmp4 = R0;
+
+    Label l_1, l_2, l_3, l_4, l_5;
+
+    __ cmpwi(CCR0, R5_ARG3, 0);
+    __ beq(CCR0, l_1);
+
+    { // FasterArrayCopy
+      __ sldi(R5_ARG3, R5_ARG3, 3);
+      __ add(R3_ARG1, R3_ARG1, R5_ARG3);
+      __ add(R4_ARG2, R4_ARG2, R5_ARG3);
+      __ srdi(R5_ARG3, R5_ARG3, 3);
+
+      __ cmpwi(CCR0, R5_ARG3, 3);
+      __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
+
+      __ srdi(tmp1, R5_ARG3, 2);
+      __ andi(R5_ARG3, R5_ARG3, 3);
+      __ mtctr(tmp1);
+
+      __ bind(l_4);
+      // Use unrolled version for mass copying (copy 4 elements a time).
+      // Load feeding store gets zero latency on Power6, however not on Power5.
+      // Therefore, the following sequence is made for the good of both.
+      __ addi(R3_ARG1, R3_ARG1, -32);
+      __ addi(R4_ARG2, R4_ARG2, -32);
+      __ ld(tmp4, 24, R3_ARG1);
+      __ ld(tmp3, 16, R3_ARG1);
+      __ ld(tmp2, 8, R3_ARG1);
+      __ ld(tmp1, 0, R3_ARG1);
+      __ std(tmp4, 24, R4_ARG2);
+      __ std(tmp3, 16, R4_ARG2);
+      __ std(tmp2, 8, R4_ARG2);
+      __ std(tmp1, 0, R4_ARG2);
+      __ bdnz(l_4);
+
+      __ cmpwi(CCR0, R5_ARG3, 0);
+      __ beq(CCR0, l_1);
+
+      __ bind(l_5);
+      __ mtctr(R5_ARG3);
+      __ bind(l_3);
+      __ ld(R0, -8, R3_ARG1);
+      __ std(R0, -8, R4_ARG2);
+      __ addi(R3_ARG1, R3_ARG1, -8);
+      __ addi(R4_ARG2, R4_ARG2, -8);
+      __ bdnz(l_3);
+
+    }
+    __ bind(l_1);
+  }
+
+  // Generate stub for conjoint long copy.  If "aligned" is true, the
+  // "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //
+  address generate_conjoint_long_copy(bool aligned, const char * name) {
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ emit_fd();
+
+    address nooverlap_target = aligned ?
+      ((FunctionDescriptor*)StubRoutines::arrayof_jlong_disjoint_arraycopy())->entry() :
+      ((FunctionDescriptor*)StubRoutines::jlong_disjoint_arraycopy())->entry();
+
+    array_overlap_test(nooverlap_target, 3);
+    generate_conjoint_long_copy_core(aligned);
+
+    __ blr();
+
+    return start;
+  }
+
+  // Generate stub for conjoint oop copy.  If "aligned" is true, the
+  // "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //      dest_uninitialized: G1 support
+  //
+  address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
+    StubCodeMark mark(this, "StubRoutines", name);
+
+    address start = __ emit_fd();
+
+    address nooverlap_target = aligned ?
+      ((FunctionDescriptor*)StubRoutines::arrayof_oop_disjoint_arraycopy())->entry() :
+      ((FunctionDescriptor*)StubRoutines::oop_disjoint_arraycopy())->entry();
+
+    gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
+
+    // Save arguments.
+    __ mr(R9_ARG7, R4_ARG2);
+    __ mr(R10_ARG8, R5_ARG3);
+
+    if (UseCompressedOops) {
+      array_overlap_test(nooverlap_target, 2);
+      generate_conjoint_int_copy_core(aligned);
+    } else {
+      array_overlap_test(nooverlap_target, 3);
+      generate_conjoint_long_copy_core(aligned);
+    }
+
+    gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
+    return start;
+  }
+
+  // Generate stub for disjoint oop copy.  If "aligned" is true, the
+  // "from" and "to" addresses are assumed to be heapword aligned.
+  //
+  // Arguments for generated stub:
+  //      from:  R3_ARG1
+  //      to:    R4_ARG2
+  //      count: R5_ARG3 treated as signed
+  //      dest_uninitialized: G1 support
+  //
+  address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
+    StubCodeMark mark(this, "StubRoutines", name);
+    address start = __ emit_fd();
+
+    gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
+
+    // save some arguments, disjoint_long_copy_core destroys them.
+    // needed for post barrier
+    __ mr(R9_ARG7, R4_ARG2);
+    __ mr(R10_ARG8, R5_ARG3);
+
+    if (UseCompressedOops) {
+      generate_disjoint_int_copy_core(aligned);
+    } else {
+      generate_disjoint_long_copy_core(aligned);
+    }
+
+    gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1, /*branchToEnd*/ false);
+
+    return start;
+  }
+
+  void generate_arraycopy_stubs() {
+    // Note: the disjoint stubs must be generated first, some of
+    // the conjoint stubs use them.
+
+    // non-aligned disjoint versions
+    StubRoutines::_jbyte_disjoint_arraycopy       = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
+    StubRoutines::_jshort_disjoint_arraycopy      = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
+    StubRoutines::_jint_disjoint_arraycopy        = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
+    StubRoutines::_jlong_disjoint_arraycopy       = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
+    StubRoutines::_oop_disjoint_arraycopy         = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
+    StubRoutines::_oop_disjoint_arraycopy_uninit  = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
+
+    // aligned disjoint versions
+    StubRoutines::_arrayof_jbyte_disjoint_arraycopy      = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
+    StubRoutines::_arrayof_jshort_disjoint_arraycopy     = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
+    StubRoutines::_arrayof_jint_disjoint_arraycopy       = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
+    StubRoutines::_arrayof_jlong_disjoint_arraycopy      = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
+    StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
+    StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
+
+    // non-aligned conjoint versions
+    StubRoutines::_jbyte_arraycopy      = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
+    StubRoutines::_jshort_arraycopy     = generate_conjoint_short_copy(false, "jshort_arraycopy");
+    StubRoutines::_jint_arraycopy       = generate_conjoint_int_copy(false, "jint_arraycopy");
+    StubRoutines::_jlong_arraycopy      = generate_conjoint_long_copy(false, "jlong_arraycopy");
+    StubRoutines::_oop_arraycopy        = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
+    StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
+
+    // aligned conjoint versions
+    StubRoutines::_arrayof_jbyte_arraycopy      = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
+    StubRoutines::_arrayof_jshort_arraycopy     = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
+    StubRoutines::_arrayof_jint_arraycopy       = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
+    StubRoutines::_arrayof_jlong_arraycopy      = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
+    StubRoutines::_arrayof_oop_arraycopy        = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
+    StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
+
+    // fill routines
+    StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
+    StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
+    StubRoutines::_jint_fill           = generate_fill(T_INT,   false, "jint_fill");
+    StubRoutines::_arrayof_jbyte_fill  = generate_fill(T_BYTE,  true, "arrayof_jbyte_fill");
+    StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
+    StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
+  }
+
+  // Safefetch stubs.
+  void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
+    // safefetch signatures:
+    //   int      SafeFetch32(int*      adr, int      errValue);
+    //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
+    //
+    // arguments:
+    //   R3_ARG1 = adr
+    //   R4_ARG2 = errValue
+    //
+    // result:
+    //   R3_RET  = *adr or errValue
+
+    StubCodeMark mark(this, "StubRoutines", name);
+
+    // Entry point, pc or function descriptor.
+    *entry = __ emit_fd();
+
+    // Load *adr into R4_ARG2, may fault.
+    *fault_pc = __ pc();
+    switch (size) {
+      case 4:
+        // int32_t, signed extended
+        __ lwa(R4_ARG2, 0, R3_ARG1);
+        break;
+      case 8:
+        // int64_t
+        __ ld(R4_ARG2, 0, R3_ARG1);
+        break;
+      default:
+        ShouldNotReachHere();
+    }
+
+    // return errValue or *adr
+    *continuation_pc = __ pc();
+    __ mr(R3_RET, R4_ARG2);
+    __ blr();
+  }
+
+  // Initialization
+  void generate_initial() {
+    // Generates all stubs and initializes the entry points
+
+    // Entry points that exist in all platforms.
+    // Note: This is code that could be shared among different platforms - however the
+    // benefit seems to be smaller than the disadvantage of having a
+    // much more complicated generator structure. See also comment in
+    // stubRoutines.hpp.
+
+    StubRoutines::_forward_exception_entry          = generate_forward_exception();
+    StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
+    StubRoutines::_catch_exception_entry            = generate_catch_exception();
+
+    // Build this early so it's available for the interpreter.
+    StubRoutines::_throw_StackOverflowError_entry   =
+      generate_throw_exception("StackOverflowError throw_exception",
+                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
+  }
+
+  void generate_all() {
+    // Generates all stubs and initializes the entry points
+
+    // These entry points require SharedInfo::stack0 to be set up in
+    // non-core builds
+    StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
+    // Handle IncompatibleClassChangeError in itable stubs.
+    StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
+    StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
+
+    StubRoutines::_handler_for_unsafe_access_entry         = generate_handler_for_unsafe_access();
+
+    // support for verify_oop (must happen after universe_init)
+    StubRoutines::_verify_oop_subroutine_entry             = generate_verify_oop();
+
+    // arraycopy stubs used by compilers
+    generate_arraycopy_stubs();
+
+    if (UseAESIntrinsics) {
+      guarantee(!UseAESIntrinsics, "not yet implemented.");
+    }
+
+    // PPC uses stubs for safefetch.
+    generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
+                                                       &StubRoutines::_safefetch32_fault_pc,
+                                                       &StubRoutines::_safefetch32_continuation_pc);
+    generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
+                                                       &StubRoutines::_safefetchN_fault_pc,
+                                                       &StubRoutines::_safefetchN_continuation_pc);
+  }
+
+ public:
+  StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
+    // replace the standard masm with a special one:
+    _masm = new MacroAssembler(code);
+    if (all) {
+      generate_all();
+    } else {
+      generate_initial();
+    }
+  }
+};
+
+void StubGenerator_generate(CodeBuffer* code, bool all) {
+  StubGenerator g(code, all);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/stubRoutines_ppc_64.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/stubRoutines.hpp"
+#ifdef TARGET_OS_FAMILY_aix
+# include "thread_aix.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_linux
+# include "thread_linux.inline.hpp"
+#endif
+
+// Implementation of the platform-specific part of StubRoutines - for
+// a description of how to extend it, see the stubRoutines.hpp file.
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/stubRoutines_ppc_64.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
+#define CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
+
+// This file holds the platform specific parts of the StubRoutines
+// definition. See stubRoutines.hpp for a description on how to
+// extend it.
+
+static bool    returns_to_call_stub(address return_pc)   { return return_pc == _call_stub_return_address; }
+
+enum platform_dependent_constants {
+  code_size1 = 20000,          // simply increase if too small (assembler will crash if too small)
+  code_size2 = 20000           // simply increase if too small (assembler will crash if too small)
+};
+
+#endif // CPU_PPC_VM_STUBROUTINES_PPC_64_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/vmStructs_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_VMSTRUCTS_PPC_HPP
+#define CPU_PPC_VM_VMSTRUCTS_PPC_HPP
+
+// These are the CPU-specific fields, types and integer
+// constants required by the Serviceability Agent. This file is
+// referenced by vmStructs.cpp.
+
+#define VM_STRUCTS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field)
+
+#define VM_TYPES_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type)
+
+#define VM_INT_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
+
+#define VM_LONG_CONSTANTS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
+
+#endif // CPU_PPC_VM_VMSTRUCTS_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/vm_version_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,477 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "assembler_ppc.inline.hpp"
+#include "compiler/disassembler.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/java.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "utilities/defaultStream.hpp"
+#include "vm_version_ppc.hpp"
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_linux
+# include "os_linux.inline.hpp"
+#endif
+
+# include <sys/sysinfo.h>
+
+int VM_Version::_features = VM_Version::unknown_m;
+int VM_Version::_measured_cache_line_size = 128; // default value
+const char* VM_Version::_features_str = "";
+bool VM_Version::_is_determine_features_test_running = false;
+
+
+#define MSG(flag)   \
+  if (flag && !FLAG_IS_DEFAULT(flag))                                  \
+      jio_fprintf(defaultStream::error_stream(),                       \
+                  "warning: -XX:+" #flag " requires -XX:+UseSIGTRAP\n" \
+                  "         -XX:+" #flag " will be disabled!\n");
+
+void VM_Version::initialize() {
+
+  // Test which instructions are supported and measure cache line size.
+  determine_features();
+
+  // If PowerArchitecturePPC64 hasn't been specified explicitly determine from features.
+  if (FLAG_IS_DEFAULT(PowerArchitecturePPC64)) {
+    if (VM_Version::has_popcntw()) {
+      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 7);
+    } else if (VM_Version::has_cmpb()) {
+      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 6);
+    } else if (VM_Version::has_popcntb()) {
+      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 5);
+    } else {
+      FLAG_SET_ERGO(uintx, PowerArchitecturePPC64, 0);
+    }
+  }
+  guarantee(PowerArchitecturePPC64 == 0 || PowerArchitecturePPC64 == 5 ||
+            PowerArchitecturePPC64 == 6 || PowerArchitecturePPC64 == 7,
+            "PowerArchitecturePPC64 should be 0, 5, 6 or 7");
+
+  if (!UseSIGTRAP) {
+    MSG(TrapBasedICMissChecks);
+    MSG(TrapBasedNotEntrantChecks);
+    MSG(TrapBasedNullChecks);
+    FLAG_SET_ERGO(bool, TrapBasedNotEntrantChecks, false);
+    FLAG_SET_ERGO(bool, TrapBasedNullChecks,       false);
+    FLAG_SET_ERGO(bool, TrapBasedICMissChecks,     false);
+  }
+
+#ifdef COMPILER2
+  if (!UseSIGTRAP) {
+    MSG(TrapBasedRangeChecks);
+    FLAG_SET_ERGO(bool, TrapBasedRangeChecks, false);
+  }
+
+  // On Power6 test for section size.
+  if (PowerArchitecturePPC64 == 6) {
+    determine_section_size();
+  // TODO: PPC port } else {
+  // TODO: PPC port PdScheduling::power6SectorSize = 0x20;
+  }
+
+  MaxVectorSize = 8;
+#endif
+
+  // Create and print feature-string.
+  char buf[(num_features+1) * 16]; // Max 16 chars per feature.
+  jio_snprintf(buf, sizeof(buf),
+               "ppc64%s%s%s%s%s%s%s%s",
+               (has_fsqrt()   ? " fsqrt"   : ""),
+               (has_isel()    ? " isel"    : ""),
+               (has_lxarxeh() ? " lxarxeh" : ""),
+               (has_cmpb()    ? " cmpb"    : ""),
+               //(has_mftgpr()? " mftgpr"  : ""),
+               (has_popcntb() ? " popcntb" : ""),
+               (has_popcntw() ? " popcntw" : ""),
+               (has_fcfids()  ? " fcfids"  : ""),
+               (has_vand()    ? " vand"    : "")
+               // Make sure number of %s matches num_features!
+              );
+  _features_str = strdup(buf);
+  NOT_PRODUCT(if (Verbose) print_features(););
+
+  // PPC64 supports 8-byte compare-exchange operations (see
+  // Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
+  // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
+  _supports_cx8 = true;
+
+  UseSSE = 0; // Only on x86 and x64
+
+  intx cache_line_size = _measured_cache_line_size;
+
+  if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) AllocatePrefetchStyle = 1;
+
+  if (AllocatePrefetchStyle == 4) {
+    AllocatePrefetchStepSize = cache_line_size; // Need exact value.
+    if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 12; // Use larger blocks by default.
+    if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 2*cache_line_size; // Default is not defined?
+  } else {
+    if (cache_line_size > AllocatePrefetchStepSize) AllocatePrefetchStepSize = cache_line_size;
+    if (FLAG_IS_DEFAULT(AllocatePrefetchLines)) AllocatePrefetchLines = 3; // Optimistic value.
+    if (AllocatePrefetchDistance < 0) AllocatePrefetchDistance = 3*cache_line_size; // Default is not defined?
+  }
+
+  assert(AllocatePrefetchLines > 0, "invalid value");
+  if (AllocatePrefetchLines < 1) // Set valid value in product VM.
+    AllocatePrefetchLines = 1; // Conservative value.
+
+  if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size)
+    AllocatePrefetchStyle = 1; // Fall back if inappropriate.
+
+  assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
+}
+
+void VM_Version::print_features() {
+  tty->print_cr("Version: %s cache_line_size = %d", cpu_features(), get_cache_line_size());
+}
+
+#ifdef COMPILER2
+// Determine section size on power6: If section size is 8 instructions,
+// there should be a difference between the two testloops of ~15 %. If
+// no difference is detected the section is assumed to be 32 instructions.
+void VM_Version::determine_section_size() {
+
+  int unroll = 80;
+
+  const int code_size = (2* unroll * 32 + 100)*BytesPerInstWord;
+
+  // Allocate space for the code.
+  ResourceMark rm;
+  CodeBuffer cb("detect_section_size", code_size, 0);
+  MacroAssembler* a = new MacroAssembler(&cb);
+
+  uint32_t *code = (uint32_t *)a->pc();
+  // Emit code.
+  void (*test1)() = (void(*)())(void *)a->emit_fd();
+
+  Label l1;
+
+  a->li(R4, 1);
+  a->sldi(R4, R4, 28);
+  a->b(l1);
+  a->align(CodeEntryAlignment);
+
+  a->bind(l1);
+
+  for (int i = 0; i < unroll; i++) {
+    // Schleife 1
+    // ------- sector 0 ------------
+    // ;; 0
+    a->nop();                   // 1
+    a->fpnop0();                // 2
+    a->fpnop1();                // 3
+    a->addi(R4,R4, -1); // 4
+
+    // ;;  1
+    a->nop();                   // 5
+    a->fmr(F6, F6);             // 6
+    a->fmr(F7, F7);             // 7
+    a->endgroup();              // 8
+    // ------- sector 8 ------------
+
+    // ;;  2
+    a->nop();                   // 9
+    a->nop();                   // 10
+    a->fmr(F8, F8);             // 11
+    a->fmr(F9, F9);             // 12
+
+    // ;;  3
+    a->nop();                   // 13
+    a->fmr(F10, F10);           // 14
+    a->fmr(F11, F11);           // 15
+    a->endgroup();              // 16
+    // -------- sector 16 -------------
+
+    // ;;  4
+    a->nop();                   // 17
+    a->nop();                   // 18
+    a->fmr(F15, F15);           // 19
+    a->fmr(F16, F16);           // 20
+
+    // ;;  5
+    a->nop();                   // 21
+    a->fmr(F17, F17);           // 22
+    a->fmr(F18, F18);           // 23
+    a->endgroup();              // 24
+    // ------- sector 24  ------------
+
+    // ;;  6
+    a->nop();                   // 25
+    a->nop();                   // 26
+    a->fmr(F19, F19);           // 27
+    a->fmr(F20, F20);           // 28
+
+    // ;;  7
+    a->nop();                   // 29
+    a->fmr(F21, F21);           // 30
+    a->fmr(F22, F22);           // 31
+    a->brnop0();                // 32
+
+    // ------- sector 32 ------------
+  }
+
+  // ;; 8
+  a->cmpdi(CCR0, R4, unroll);   // 33
+  a->bge(CCR0, l1);             // 34
+  a->blr();
+
+  // Emit code.
+  void (*test2)() = (void(*)())(void *)a->emit_fd();
+  // uint32_t *code = (uint32_t *)a->pc();
+
+  Label l2;
+
+  a->li(R4, 1);
+  a->sldi(R4, R4, 28);
+  a->b(l2);
+  a->align(CodeEntryAlignment);
+
+  a->bind(l2);
+
+  for (int i = 0; i < unroll; i++) {
+    // Schleife 2
+    // ------- sector 0 ------------
+    // ;; 0
+    a->brnop0();                  // 1
+    a->nop();                     // 2
+    //a->cmpdi(CCR0, R4, unroll);
+    a->fpnop0();                  // 3
+    a->fpnop1();                  // 4
+    a->addi(R4,R4, -1);           // 5
+
+    // ;; 1
+
+    a->nop();                     // 6
+    a->fmr(F6, F6);               // 7
+    a->fmr(F7, F7);               // 8
+    // ------- sector 8 ---------------
+
+    // ;; 2
+    a->endgroup();                // 9
+
+    // ;; 3
+    a->nop();                     // 10
+    a->nop();                     // 11
+    a->fmr(F8, F8);               // 12
+
+    // ;; 4
+    a->fmr(F9, F9);               // 13
+    a->nop();                     // 14
+    a->fmr(F10, F10);             // 15
+
+    // ;; 5
+    a->fmr(F11, F11);             // 16
+    // -------- sector 16 -------------
+
+    // ;; 6
+    a->endgroup();                // 17
+
+    // ;; 7
+    a->nop();                     // 18
+    a->nop();                     // 19
+    a->fmr(F15, F15);             // 20
+
+    // ;; 8
+    a->fmr(F16, F16);             // 21
+    a->nop();                     // 22
+    a->fmr(F17, F17);             // 23
+
+    // ;; 9
+    a->fmr(F18, F18);             // 24
+    // -------- sector 24 -------------
+
+    // ;; 10
+    a->endgroup();                // 25
+
+    // ;; 11
+    a->nop();                     // 26
+    a->nop();                     // 27
+    a->fmr(F19, F19);             // 28
+
+    // ;; 12
+    a->fmr(F20, F20);             // 29
+    a->nop();                     // 30
+    a->fmr(F21, F21);             // 31
+
+    // ;; 13
+    a->fmr(F22, F22);             // 32
+  }
+
+  // -------- sector 32 -------------
+  // ;; 14
+  a->cmpdi(CCR0, R4, unroll); // 33
+  a->bge(CCR0, l2);           // 34
+
+  a->blr();
+  uint32_t *code_end = (uint32_t *)a->pc();
+  a->flush();
+
+  double loop1_seconds,loop2_seconds, rel_diff;
+  uint64_t start1, stop1;
+
+  start1 = os::current_thread_cpu_time(false);
+  (*test1)();
+  stop1 = os::current_thread_cpu_time(false);
+  loop1_seconds = (stop1- start1) / (1000 *1000 *1000.0);
+
+
+  start1 = os::current_thread_cpu_time(false);
+  (*test2)();
+  stop1 = os::current_thread_cpu_time(false);
+
+  loop2_seconds = (stop1 - start1) / (1000 *1000 *1000.0);
+
+  rel_diff = (loop2_seconds - loop1_seconds) / loop1_seconds *100;
+
+  if (PrintAssembly) {
+    ttyLocker ttyl;
+    tty->print_cr("Decoding section size detection stub at " INTPTR_FORMAT " before execution:", code);
+    Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
+    tty->print_cr("Time loop1 :%f", loop1_seconds);
+    tty->print_cr("Time loop2 :%f", loop2_seconds);
+    tty->print_cr("(time2 - time1) / time1 = %f %%", rel_diff);
+
+    if (rel_diff > 12.0) {
+      tty->print_cr("Section Size 8 Instructions");
+    } else{
+      tty->print_cr("Section Size 32 Instructions or Power5");
+    }
+  }
+
+#if 0 // TODO: PPC port
+  // Set sector size (if not set explicitly).
+  if (FLAG_IS_DEFAULT(Power6SectorSize128PPC64)) {
+    if (rel_diff > 12.0) {
+      PdScheduling::power6SectorSize = 0x20;
+    } else {
+      PdScheduling::power6SectorSize = 0x80;
+    }
+  } else if (Power6SectorSize128PPC64) {
+    PdScheduling::power6SectorSize = 0x80;
+  } else {
+    PdScheduling::power6SectorSize = 0x20;
+  }
+#endif
+  if (UsePower6SchedulerPPC64) Unimplemented();
+}
+#endif // COMPILER2
+
+void VM_Version::determine_features() {
+  // 7 InstWords for each call (function descriptor + blr instruction).
+  const int code_size = (num_features+1+2*7)*BytesPerInstWord;
+  int features = 0;
+
+  // create test area
+  enum { BUFFER_SIZE = 2*4*K }; // Needs to be >=2* max cache line size (cache line size can't exceed min page size).
+  char test_area[BUFFER_SIZE];
+  char *mid_of_test_area = &test_area[BUFFER_SIZE>>1];
+
+  // Allocate space for the code.
+  ResourceMark rm;
+  CodeBuffer cb("detect_cpu_features", code_size, 0);
+  MacroAssembler* a = new MacroAssembler(&cb);
+
+  // Emit code.
+  void (*test)(address addr, uint64_t offset)=(void(*)(address addr, uint64_t offset))(void *)a->emit_fd();
+  uint32_t *code = (uint32_t *)a->pc();
+  // Don't use R0 in ldarx.
+  // Keep R3_ARG1 unmodified, it contains &field (see below).
+  // Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
+  a->fsqrt(F3, F4);                            // code[0] -> fsqrt_m
+  a->isel(R7, R5, R6, 0);                      // code[1] -> isel_m
+  a->ldarx_unchecked(R7, R3_ARG1, R4_ARG2, 1); // code[2] -> lxarx_m
+  a->cmpb(R7, R5, R6);                         // code[3] -> bcmp
+  //a->mftgpr(R7, F3);                         // code[4] -> mftgpr
+  a->popcntb(R7, R5);                          // code[5] -> popcntb
+  a->popcntw(R7, R5);                          // code[6] -> popcntw
+  a->fcfids(F3, F4);                           // code[7] -> fcfids
+  a->vand(VR0, VR0, VR0);                      // code[8] -> vand
+  a->blr();
+
+  // Emit function to set one cache line to zero. Emit function descriptor and get pointer to it.
+  void (*zero_cacheline_func_ptr)(char*) = (void(*)(char*))(void *)a->emit_fd();
+  a->dcbz(R3_ARG1); // R3_ARG1 = addr
+  a->blr();
+
+  uint32_t *code_end = (uint32_t *)a->pc();
+  a->flush();
+
+  // Print the detection code.
+  if (PrintAssembly) {
+    ttyLocker ttyl;
+    tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " before execution:", code);
+    Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
+  }
+
+  // Measure cache line size.
+  memset(test_area, 0xFF, BUFFER_SIZE); // Fill test area with 0xFF.
+  (*zero_cacheline_func_ptr)(mid_of_test_area); // Call function which executes dcbz to the middle.
+  int count = 0; // count zeroed bytes
+  for (int i = 0; i < BUFFER_SIZE; i++) if (test_area[i] == 0) count++;
+  guarantee(is_power_of_2(count), "cache line size needs to be a power of 2");
+  _measured_cache_line_size = count;
+
+  // Execute code. Illegal instructions will be replaced by 0 in the signal handler.
+  VM_Version::_is_determine_features_test_running = true;
+  (*test)((address)mid_of_test_area, (uint64_t)0);
+  VM_Version::_is_determine_features_test_running = false;
+
+  // determine which instructions are legal.
+  int feature_cntr = 0;
+  if (code[feature_cntr++]) features |= fsqrt_m;
+  if (code[feature_cntr++]) features |= isel_m;
+  if (code[feature_cntr++]) features |= lxarxeh_m;
+  if (code[feature_cntr++]) features |= cmpb_m;
+  //if(code[feature_cntr++])features |= mftgpr_m;
+  if (code[feature_cntr++]) features |= popcntb_m;
+  if (code[feature_cntr++]) features |= popcntw_m;
+  if (code[feature_cntr++]) features |= fcfids_m;
+  if (code[feature_cntr++]) features |= vand_m;
+
+  // Print the detection code.
+  if (PrintAssembly) {
+    ttyLocker ttyl;
+    tty->print_cr("Decoding cpu-feature detection stub at " INTPTR_FORMAT " after execution:", code);
+    Disassembler::decode((u_char*)code, (u_char*)code_end, tty);
+  }
+
+  _features = features;
+}
+
+
+static int saved_features = 0;
+
+void VM_Version::allow_all() {
+  saved_features = _features;
+  _features      = all_features_m;
+}
+
+void VM_Version::revert() {
+  _features = saved_features;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/vm_version_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_VM_VERSION_PPC_HPP
+#define CPU_PPC_VM_VM_VERSION_PPC_HPP
+
+#include "runtime/globals_extension.hpp"
+#include "runtime/vm_version.hpp"
+
+class VM_Version: public Abstract_VM_Version {
+protected:
+  enum Feature_Flag {
+    fsqrt,
+    isel,
+    lxarxeh,
+    cmpb,
+    popcntb,
+    popcntw,
+    fcfids,
+    vand,
+    dcba,
+    num_features // last entry to count features
+  };
+  enum Feature_Flag_Set {
+    unknown_m             = 0,
+    fsqrt_m               = (1 << fsqrt  ),
+    isel_m                = (1 << isel   ),
+    lxarxeh_m             = (1 << lxarxeh),
+    cmpb_m                = (1 << cmpb   ),
+    popcntb_m             = (1 << popcntb),
+    popcntw_m             = (1 << popcntw),
+    fcfids_m              = (1 << fcfids ),
+    vand_m                = (1 << vand   ),
+    dcba_m                = (1 << dcba   ),
+    all_features_m        = -1
+  };
+  static int  _features;
+  static int  _measured_cache_line_size;
+  static const char* _features_str;
+  static bool _is_determine_features_test_running;
+
+  static void print_features();
+  static void determine_features(); // also measures cache line size
+  static void determine_section_size();
+  static void power6_micro_bench();
+public:
+  // Initialization
+  static void initialize();
+
+  static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
+  // CPU instruction support
+  static bool has_fsqrt()   { return (_features & fsqrt_m) != 0; }
+  static bool has_isel()    { return (_features & isel_m) != 0; }
+  static bool has_lxarxeh() { return (_features & lxarxeh_m) !=0; }
+  static bool has_cmpb()    { return (_features & cmpb_m) != 0; }
+  static bool has_popcntb() { return (_features & popcntb_m) != 0; }
+  static bool has_popcntw() { return (_features & popcntw_m) != 0; }
+  static bool has_fcfids()  { return (_features & fcfids_m) != 0; }
+  static bool has_vand()    { return (_features & vand_m) != 0; }
+  static bool has_dcba()    { return (_features & dcba_m) != 0; }
+
+  static const char* cpu_features() { return _features_str; }
+
+  static int get_cache_line_size()  { return _measured_cache_line_size; }
+
+  // Assembler testing
+  static void allow_all();
+  static void revert();
+};
+
+#endif // CPU_PPC_VM_VM_VERSION_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/vmreg_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "code/vmreg.hpp"
+
+void VMRegImpl::set_regName() {
+  Register reg = ::as_Register(0);
+  int i;
+  for (i = 0; i < ConcreteRegisterImpl::max_gpr; ) {
+    regName[i++] = reg->name();
+    regName[i++] = reg->name();
+    if (reg->encoding() < RegisterImpl::number_of_registers-1)
+      reg = reg->successor();
+  }
+
+  FloatRegister freg = ::as_FloatRegister(0);
+  for ( ; i < ConcreteRegisterImpl::max_fpr; ) {
+    regName[i++] = freg->name();
+    regName[i++] = freg->name();
+    if (reg->encoding() < FloatRegisterImpl::number_of_registers-1)
+      freg = freg->successor();
+  }
+  for ( ; i < ConcreteRegisterImpl::number_of_registers; i++) {
+    regName[i] = "NON-GPR-FPR";
+  }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/vmreg_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_VMREG_PPC_HPP
+#define CPU_PPC_VM_VMREG_PPC_HPP
+
+  bool is_Register();
+  Register as_Register();
+
+  bool is_FloatRegister();
+  FloatRegister as_FloatRegister();
+
+#endif // CPU_PPC_VM_VMREG_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/vmreg_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_VM_VMREG_PPC_INLINE_HPP
+#define CPU_PPC_VM_VMREG_PPC_INLINE_HPP
+
+inline VMReg RegisterImpl::as_VMReg() {
+  if (this == noreg) return VMRegImpl::Bad();
+  return VMRegImpl::as_VMReg(encoding() << 1);
+}
+
+// Since we don't have two halfs here, don't multiply by 2.
+inline VMReg ConditionRegisterImpl::as_VMReg() {
+  return VMRegImpl::as_VMReg((encoding()) + ConcreteRegisterImpl::max_fpr);
+}
+
+inline VMReg FloatRegisterImpl::as_VMReg() {
+  return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_gpr);
+}
+
+inline VMReg SpecialRegisterImpl::as_VMReg() {
+  return VMRegImpl::as_VMReg((encoding()) + ConcreteRegisterImpl::max_cnd);
+}
+
+inline bool VMRegImpl::is_Register() {
+  return (unsigned int)value() < (unsigned int)ConcreteRegisterImpl::max_gpr;
+}
+
+inline bool VMRegImpl::is_FloatRegister() {
+  return value() >= ConcreteRegisterImpl::max_gpr &&
+         value() < ConcreteRegisterImpl::max_fpr;
+}
+
+inline Register VMRegImpl::as_Register() {
+  assert(is_Register() && is_even(value()), "even-aligned GPR name");
+  return ::as_Register(value()>>1);
+}
+
+inline FloatRegister VMRegImpl::as_FloatRegister() {
+  assert(is_FloatRegister() && is_even(value()), "must be");
+  return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> 1);
+}
+
+inline bool VMRegImpl::is_concrete() {
+  assert(is_reg(), "must be");
+  return is_even(value());
+}
+
+#endif // CPU_PPC_VM_VMREG_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/ppc/vm/vtableStubs_ppc_64.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "code/vtableStubs.hpp"
+#include "interp_masm_ppc_64.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/instanceKlass.hpp"
+#include "oops/klassVtable.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "vmreg_ppc.inline.hpp"
+#ifdef COMPILER2
+#include "opto/runtime.hpp"
+#endif
+
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) // nothing
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+#ifndef PRODUCT
+extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
+#endif
+
+// Used by compiler only; may use only caller saved, non-argument
+// registers.
+VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
+  // PPC port: use fixed size.
+  const int code_length = VtableStub::pd_code_size_limit(true);
+  VtableStub* s = new (code_length) VtableStub(true, vtable_index);
+  ResourceMark rm;
+  CodeBuffer cb(s->entry_point(), code_length);
+  MacroAssembler* masm = new MacroAssembler(&cb);
+  address start_pc;
+
+#ifndef PRODUCT
+  if (CountCompiledCalls) {
+    __ load_const(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr());
+    __ lwz(R12_scratch2, 0, R11_scratch1);
+    __ addi(R12_scratch2, R12_scratch2, 1);
+    __ stw(R12_scratch2, 0, R11_scratch1);
+  }
+#endif
+
+  assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
+
+  // Get receiver klass.
+  const Register rcvr_klass = R11_scratch1;
+
+  // We might implicit NULL fault here.
+  address npe_addr = __ pc(); // npe = null pointer exception
+  __ load_klass_with_trap_null_check(rcvr_klass, R3);
+
+ // Set methodOop (in case of interpreted method), and destination address.
+  int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
+
+#ifndef PRODUCT
+  if (DebugVtables) {
+    Label L;
+    // Check offset vs vtable length.
+    const Register vtable_len = R12_scratch2;
+    __ lwz(vtable_len, InstanceKlass::vtable_length_offset()*wordSize, rcvr_klass);
+    __ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size());
+    __ bge(CCR0, L);
+    __ li(R12_scratch2, vtable_index);
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false);
+    __ bind(L);
+  }
+#endif
+
+  int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
+
+  __ ld(R19_method, v_off, rcvr_klass);
+
+#ifndef PRODUCT
+  if (DebugVtables) {
+    Label L;
+    __ cmpdi(CCR0, R19_method, 0);
+    __ bne(CCR0, L);
+    __ stop("Vtable entry is ZERO", 102);
+    __ bind(L);
+  }
+#endif
+
+  // If the vtable entry is null, the method is abstract.
+  address ame_addr = __ pc(); // ame = abstract method error
+
+  __ load_with_trap_null_check(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
+  __ mtctr(R12_scratch2);
+  __ bctr();
+  masm->flush();
+
+  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
+
+  s->set_exception_points(npe_addr, ame_addr);
+
+  return s;
+}
+
+VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
+  // PPC port: use fixed size.
+  const int code_length = VtableStub::pd_code_size_limit(false);
+  VtableStub* s = new (code_length) VtableStub(false, vtable_index);
+  ResourceMark rm;
+  CodeBuffer cb(s->entry_point(), code_length);
+  MacroAssembler* masm = new MacroAssembler(&cb);
+  address start_pc;
+
+#ifndef PRODUCT
+  if (CountCompiledCalls) {
+    __ load_const(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr());
+    __ lwz(R12_scratch2, 0, R11_scratch1);
+    __ addi(R12_scratch2, R12_scratch2, 1);
+    __ stw(R12_scratch2, 0, R11_scratch1);
+  }
+#endif
+
+  assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
+
+  // Entry arguments:
+  //  R19_method: Interface
+  //  R3_ARG1:    Receiver
+  //
+
+  const Register rcvr_klass = R11_scratch1;
+  const Register vtable_len = R12_scratch2;
+  const Register itable_entry_addr = R21_tmp1;
+  const Register itable_interface = R22_tmp2;
+
+  // Get receiver klass.
+
+  // We might implicit NULL fault here.
+  address npe_addr = __ pc(); // npe = null pointer exception
+  __ load_klass_with_trap_null_check(rcvr_klass, R3_ARG1);
+
+  //__ ld(rcvr_klass, oopDesc::klass_offset_in_bytes(), R3_ARG1);
+
+  BLOCK_COMMENT("Load start of itable entries into itable_entry.");
+  __ lwz(vtable_len, InstanceKlass::vtable_length_offset() * wordSize, rcvr_klass);
+  __ slwi(vtable_len, vtable_len, exact_log2(vtableEntry::size() * wordSize));
+  __ add(itable_entry_addr, vtable_len, rcvr_klass);
+
+  // Loop over all itable entries until desired interfaceOop(Rinterface) found.
+  BLOCK_COMMENT("Increment itable_entry_addr in loop.");
+  const int vtable_base_offset = InstanceKlass::vtable_start_offset() * wordSize;
+  __ addi(itable_entry_addr, itable_entry_addr, vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes());
+
+  const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
+  Label search;
+  __ bind(search);
+  __ ld(itable_interface, 0, itable_entry_addr);
+
+  // Handle IncompatibleClassChangeError in itable stubs.
+  // If the entry is NULL then we've reached the end of the table
+  // without finding the expected interface, so throw an exception.
+  BLOCK_COMMENT("Handle IncompatibleClassChangeError in itable stubs.");
+  Label throw_icce;
+  __ cmpdi(CCR1, itable_interface, 0);
+  __ cmpd(CCR0, itable_interface, R19_method);
+  __ addi(itable_entry_addr, itable_entry_addr, itable_offset_search_inc);
+  __ beq(CCR1, throw_icce);
+  __ bne(CCR0, search);
+
+  // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
+
+  const Register vtable_offset = R12_scratch2;
+  const Register itable_method = R11_scratch1;
+
+  const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
+                                    itableOffsetEntry::interface_offset_in_bytes()) -
+                                   itable_offset_search_inc;
+  __ lwz(vtable_offset, vtable_offset_offset, itable_entry_addr);
+
+  // Compute itableMethodEntry and get methodOop and entry point for compiler.
+  const int method_offset = (itableMethodEntry::size() * wordSize * vtable_index) +
+    itableMethodEntry::method_offset_in_bytes();
+
+  __ add(itable_method, rcvr_klass, vtable_offset);
+  __ ld(R19_method, method_offset, itable_method);
+
+#ifndef PRODUCT
+  if (DebugVtables) {
+    Label ok;
+    __ cmpd(CCR0, R19_method, 0);
+    __ bne(CCR0, ok);
+    __ stop("methodOop is null", 103);
+    __ bind(ok);
+  }
+#endif
+
+  // If the vtable entry is null, the method is abstract.
+  address ame_addr = __ pc(); // ame = abstract method error
+
+  // Must do an explicit check if implicit checks are disabled.
+  assert(!MacroAssembler::needs_explicit_null_check(in_bytes(Method::from_compiled_offset())), "sanity");
+  if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
+    if (TrapBasedNullChecks) {
+      __ trap_null_check(R19_method);
+    } else {
+      __ cmpdi(CCR0, R19_method, 0);
+      __ beq(CCR0, throw_icce);
+    }
+  }
+  __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
+  __ mtctr(R12_scratch2);
+  __ bctr();
+
+  // Handle IncompatibleClassChangeError in itable stubs.
+  // More detailed error message.
+  // We force resolving of the call site by jumping to the "handle
+  // wrong method" stub, and so let the interpreter runtime do all the
+  // dirty work.
+  __ bind(throw_icce);
+  __ load_const(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub());
+  __ mtctr(R11_scratch1);
+  __ bctr();
+
+  masm->flush();
+
+  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
+
+  s->set_exception_points(npe_addr, ame_addr);
+  return s;
+}
+
+int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
+  if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) {
+    return 1000;
+  } else {
+    int decode_klass_size = MacroAssembler::instr_size_for_decode_klass_not_null();
+    if (is_vtable_stub) {
+      return 20 + decode_klass_size +  8 + 8;   // Plain + cOops + Traps + safety
+    } else {
+      return 96 + decode_klass_size + 12 + 8;   // Plain + cOops + Traps + safety
+    }
+  }
+}
+
+int VtableStub::pd_code_alignment() {
+  const unsigned int icache_line_size = 32;
+  return icache_line_size;
+}
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -88,6 +88,7 @@
     orncc_op3    = 0x16,
     xnorcc_op3   = 0x17,
     addccc_op3   = 0x18,
+    aes4_op3     = 0x19,
     umulcc_op3   = 0x1a,
     smulcc_op3   = 0x1b,
     subccc_op3   = 0x1c,
@@ -121,6 +122,8 @@
     fpop1_op3    = 0x34,
     fpop2_op3    = 0x35,
     impdep1_op3  = 0x36,
+    aes3_op3     = 0x36,
+    flog3_op3    = 0x36,
     impdep2_op3  = 0x37,
     jmpl_op3     = 0x38,
     rett_op3     = 0x39,
@@ -172,41 +175,56 @@
 
   enum opfs {
     // selected opfs
-    fmovs_opf   = 0x01,
-    fmovd_opf   = 0x02,
+    fmovs_opf          = 0x01,
+    fmovd_opf          = 0x02,
 
-    fnegs_opf   = 0x05,
-    fnegd_opf   = 0x06,
+    fnegs_opf          = 0x05,
+    fnegd_opf          = 0x06,
 
-    fadds_opf   = 0x41,
-    faddd_opf   = 0x42,
-    fsubs_opf   = 0x45,
-    fsubd_opf   = 0x46,
+    fadds_opf          = 0x41,
+    faddd_opf          = 0x42,
+    fsubs_opf          = 0x45,
+    fsubd_opf          = 0x46,
 
-    fmuls_opf   = 0x49,
-    fmuld_opf   = 0x4a,
-    fdivs_opf   = 0x4d,
-    fdivd_opf   = 0x4e,
+    fmuls_opf          = 0x49,
+    fmuld_opf          = 0x4a,
+    fdivs_opf          = 0x4d,
+    fdivd_opf          = 0x4e,
+
+    fcmps_opf          = 0x51,
+    fcmpd_opf          = 0x52,
 
-    fcmps_opf   = 0x51,
-    fcmpd_opf   = 0x52,
+    fstox_opf          = 0x81,
+    fdtox_opf          = 0x82,
+    fxtos_opf          = 0x84,
+    fxtod_opf          = 0x88,
+    fitos_opf          = 0xc4,
+    fdtos_opf          = 0xc6,
+    fitod_opf          = 0xc8,
+    fstod_opf          = 0xc9,
+    fstoi_opf          = 0xd1,
+    fdtoi_opf          = 0xd2,
 
-    fstox_opf   = 0x81,
-    fdtox_opf   = 0x82,
-    fxtos_opf   = 0x84,
-    fxtod_opf   = 0x88,
-    fitos_opf   = 0xc4,
-    fdtos_opf   = 0xc6,
-    fitod_opf   = 0xc8,
-    fstod_opf   = 0xc9,
-    fstoi_opf   = 0xd1,
-    fdtoi_opf   = 0xd2,
+    mdtox_opf          = 0x110,
+    mstouw_opf         = 0x111,
+    mstosw_opf         = 0x113,
+    mxtod_opf          = 0x118,
+    mwtos_opf          = 0x119,
+
+    aes_kexpand0_opf   = 0x130,
+    aes_kexpand2_opf   = 0x131
+  };
 
-    mdtox_opf   = 0x110,
-    mstouw_opf  = 0x111,
-    mstosw_opf  = 0x113,
-    mxtod_opf   = 0x118,
-    mwtos_opf   = 0x119
+  enum op5s {
+    aes_eround01_op5     = 0x00,
+    aes_eround23_op5     = 0x01,
+    aes_dround01_op5     = 0x02,
+    aes_dround23_op5     = 0x03,
+    aes_eround01_l_op5   = 0x04,
+    aes_eround23_l_op5   = 0x05,
+    aes_dround01_l_op5   = 0x06,
+    aes_dround23_l_op5   = 0x07,
+    aes_kexpand1_op5     = 0x08
   };
 
   enum RCondition {  rc_z = 1,  rc_lez = 2,  rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7, rc_last = rc_gez  };
@@ -427,6 +445,7 @@
   static int immed(    bool        i)  { return  u_field(i ? 1 : 0,     13, 13); }
   static int opf_low6( int         w)  { return  u_field(w,             10,  5); }
   static int opf_low5( int         w)  { return  u_field(w,              9,  5); }
+  static int op5(      int         x)  { return  u_field(x,              8,  5); }
   static int trapcc(   CC         cc)  { return  u_field(cc,            12, 11); }
   static int sx(       int         i)  { return  u_field(i,             12, 12); } // shift x=1 means 64-bit
   static int opf(      int         x)  { return  u_field(x,             13,  5); }
@@ -451,6 +470,7 @@
   static int fd( FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); };
   static int fs1(FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); };
   static int fs2(FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa),  4,  0); };
+  static int fs3(FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 13,  9); };
 
   // some float instructions use this encoding on the op3 field
   static int alt_op3(int op, FloatRegisterImpl::Width w) {
@@ -559,6 +579,12 @@
     return x & ((1 << 10) - 1);
   }
 
+  // AES crypto instructions supported only on certain processors
+  static void aes_only() { assert( VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); }
+
+  // instruction only in VIS1
+  static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); }
+
   // instruction only in VIS3
   static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
 
@@ -682,6 +708,24 @@
   void addccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
 
 
+  // 4-operand AES instructions
+
+  void aes_eround01(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+  void aes_eround23(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+  void aes_dround01(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+  void aes_dround23(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+  void aes_eround01_l(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+  void aes_eround23_l(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+  void aes_dround01_l(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+  void aes_dround23_l(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+  void aes_kexpand1(  FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | u_field(imm5a, 13, 9) | op5(aes_kexpand1_op5) | fs2(s2, FloatRegisterImpl::D) ); }
+
+
+  // 3-operand AES instructions
+
+  void aes_kexpand0(  FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand0_opf) | fs2(s2, FloatRegisterImpl::D) ); }
+  void aes_kexpand2(  FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand2_opf) | fs2(s2, FloatRegisterImpl::D) ); }
+
   // pp 136
 
   inline void bpr(RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none);
@@ -784,6 +828,10 @@
   void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw,  FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); }
   void fdiv( FloatRegisterImpl::Width w,                            FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w)  | op3(fpop1_op3) | fs1(s1, w)  | opf(0x4c + w)         | fs2(s2, w)); }
 
+  // FXORs/FXORd instructions
+
+  void fxor( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(flog3_op3) | fs1(s1, w) | opf(0x6E - w) | fs2(s2, w)); }
+
   // pp 164
 
   void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); }
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/bytecodeInterpreter_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/bytecodeInterpreter_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -83,7 +83,7 @@
 #define LOCALS_ADDR(offset)    ((address)locals[-(offset)])
 #define LOCALS_INT(offset)     (*((jint*)&locals[-(offset)]))
 #define LOCALS_FLOAT(offset)   (*((jfloat*)&locals[-(offset)]))
-#define LOCALS_OBJECT(offset)  ((oop)locals[-(offset)])
+#define LOCALS_OBJECT(offset)  (cast_to_oop(locals[-(offset)]))
 #define LOCALS_DOUBLE(offset)  (((VMJavaVal64*)&locals[-((offset) + 1)])->d)
 #define LOCALS_LONG(offset)    (((VMJavaVal64*)&locals[-((offset) + 1)])->l)
 #define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
--- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1315,7 +1315,7 @@
 }
 
 Address LIR_Assembler::as_Address(LIR_Address* addr) {
-  Register reg = addr->base()->as_register();
+  Register reg = addr->base()->as_pointer_register();
   LIR_Opr index = addr->index();
   if (index->is_illegal()) {
     return Address(reg, addr->disp());
@@ -3100,7 +3100,145 @@
 }
 
 void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
-  fatal("Type profiling not implemented on this platform");
+  Register obj = op->obj()->as_register();
+  Register tmp1 = op->tmp()->as_pointer_register();
+  Register tmp2 = G1;
+  Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
+  ciKlass* exact_klass = op->exact_klass();
+  intptr_t current_klass = op->current_klass();
+  bool not_null = op->not_null();
+  bool no_conflict = op->no_conflict();
+
+  Label update, next, none;
+
+  bool do_null = !not_null;
+  bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
+  bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
+
+  assert(do_null || do_update, "why are we here?");
+  assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
+
+  __ verify_oop(obj);
+
+  if (tmp1 != obj) {
+    __ mov(obj, tmp1);
+  }
+  if (do_null) {
+    __ br_notnull_short(tmp1, Assembler::pt, update);
+    if (!TypeEntries::was_null_seen(current_klass)) {
+      __ ld_ptr(mdo_addr, tmp1);
+      __ or3(tmp1, TypeEntries::null_seen, tmp1);
+      __ st_ptr(tmp1, mdo_addr);
+    }
+    if (do_update) {
+      __ ba(next);
+      __ delayed()->nop();
+    }
+#ifdef ASSERT
+  } else {
+    __ br_notnull_short(tmp1, Assembler::pt, update);
+    __ stop("unexpect null obj");
+#endif
+  }
+
+  __ bind(update);
+
+  if (do_update) {
+#ifdef ASSERT
+    if (exact_klass != NULL) {
+      Label ok;
+      __ load_klass(tmp1, tmp1);
+      metadata2reg(exact_klass->constant_encoding(), tmp2);
+      __ cmp_and_br_short(tmp1, tmp2, Assembler::equal, Assembler::pt, ok);
+      __ stop("exact klass and actual klass differ");
+      __ bind(ok);
+    }
+#endif
+
+    Label do_update;
+    __ ld_ptr(mdo_addr, tmp2);
+
+    if (!no_conflict) {
+      if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
+        if (exact_klass != NULL) {
+          metadata2reg(exact_klass->constant_encoding(), tmp1);
+        } else {
+          __ load_klass(tmp1, tmp1);
+        }
+
+        __ xor3(tmp1, tmp2, tmp1);
+        __ btst(TypeEntries::type_klass_mask, tmp1);
+        // klass seen before, nothing to do. The unknown bit may have been
+        // set already but no need to check.
+        __ brx(Assembler::zero, false, Assembler::pt, next);
+        __ delayed()->
+
+           btst(TypeEntries::type_unknown, tmp1);
+        // already unknown. Nothing to do anymore.
+        __ brx(Assembler::notZero, false, Assembler::pt, next);
+
+        if (TypeEntries::is_type_none(current_klass)) {
+          __ delayed()->btst(TypeEntries::type_mask, tmp2);
+          __ brx(Assembler::zero, true, Assembler::pt, do_update);
+          // first time here. Set profile type.
+          __ delayed()->or3(tmp2, tmp1, tmp2);
+        } else {
+          __ delayed()->nop();
+        }
+      } else {
+        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
+               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
+
+        __ btst(TypeEntries::type_unknown, tmp2);
+        // already unknown. Nothing to do anymore.
+        __ brx(Assembler::notZero, false, Assembler::pt, next);
+        __ delayed()->nop();
+      }
+
+      // different than before. Cannot keep accurate profile.
+      __ or3(tmp2, TypeEntries::type_unknown, tmp2);
+    } else {
+      // There's a single possible klass at this profile point
+      assert(exact_klass != NULL, "should be");
+      if (TypeEntries::is_type_none(current_klass)) {
+        metadata2reg(exact_klass->constant_encoding(), tmp1);
+        __ xor3(tmp1, tmp2, tmp1);
+        __ btst(TypeEntries::type_klass_mask, tmp1);
+        __ brx(Assembler::zero, false, Assembler::pt, next);
+#ifdef ASSERT
+
+        {
+          Label ok;
+          __ delayed()->btst(TypeEntries::type_mask, tmp2);
+          __ brx(Assembler::zero, true, Assembler::pt, ok);
+          __ delayed()->nop();
+
+          __ stop("unexpected profiling mismatch");
+          __ bind(ok);
+        }
+        // first time here. Set profile type.
+        __ or3(tmp2, tmp1, tmp2);
+#else
+        // first time here. Set profile type.
+        __ delayed()->or3(tmp2, tmp1, tmp2);
+#endif
+
+      } else {
+        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
+               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
+
+        // already unknown. Nothing to do anymore.
+        __ btst(TypeEntries::type_unknown, tmp2);
+        __ brx(Assembler::notZero, false, Assembler::pt, next);
+        __ delayed()->or3(tmp2, TypeEntries::type_unknown, tmp2);
+      }
+    }
+
+    __ bind(do_update);
+    __ st_ptr(tmp2, mdo_addr);
+
+    __ bind(next);
+  }
 }
 
 void LIR_Assembler::align_backward_branch_target() {
@@ -3320,9 +3458,14 @@
 
 void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
   LIR_Address* addr = addr_opr->as_address_ptr();
-  assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
-
-  __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
+  assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1, "can't handle complex addresses yet");
+
+  if (Assembler::is_simm13(addr->disp())) {
+    __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
+  } else {
+    __ set(addr->disp(), G3_scratch);
+    __ add(addr->base()->as_pointer_register(), G3_scratch, dest->as_pointer_register());
+  }
 }
 
 
--- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/c1_globals_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/c1_globals_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/c2_globals_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -92,6 +92,8 @@
 define_pd_global(uintx, CodeCacheMinBlockLength,     4);
 define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
 
+define_pd_global(bool,  TrapBasedRangeChecks,        false); // Not needed on sparc.
+
 // Heap related flags
 define_pd_global(uintx,MetaspaceSize,    ScaleForWordSize(16*M));
 
--- a/src/cpu/sparc/vm/c2_init_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/c2_init_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/disassembler_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/disassembler_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/frame_sparc.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/frame_sparc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -237,6 +237,10 @@
 inline ConstantPoolCache** frame::interpreter_frame_cache_addr() const {
   return (ConstantPoolCache**)sp_addr_at( LcpoolCache->sp_offset_in_saved_window());
 }
+
+inline oop* frame::interpreter_frame_temp_oop_addr() const {
+  return (oop *)(fp() + interpreter_frame_oop_temp_offset);
+}
 #endif // CC_INTERP
 
 
--- a/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/globalDefinitions_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,12 @@
 
 const int StackAlignmentInBytes = (2*wordSize);
 
+// Indicates whether the C calling conventions require that
+// 32-bit integer argument values are properly extended to 64 bits.
+// If set, SharedRuntime::c_calling_convention() must adapt
+// signatures accordingly.
+const bool CCallingConventionRequiresIntsAsLongs = false;
+
 #define SUPPORTS_NATIVE_CX8
 
 #endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,8 @@
 define_pd_global(bool, NeedsDeoptSuspend,           true); // register window machines need this
 
 define_pd_global(bool, ImplicitNullChecks,          true);  // Generate code for implicit null checks
-define_pd_global(bool, UncommonNullCast,            true);  // Uncommon-trap NULLs past to check cast
+define_pd_global(bool, TrapBasedNullChecks,         false); // Not needed on sparc.
+define_pd_global(bool, UncommonNullCast,            true);  // Uncommon-trap NULLs passed to check cast
 
 define_pd_global(intx, CodeEntryAlignment,    32);
 // The default setting 16/16 seems to work best.
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1942,6 +1942,220 @@
   }
 }
 
+void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr, Register tmp) {
+  Label not_null, do_nothing, do_update;
+
+  assert_different_registers(obj, mdo_addr.base(), tmp);
+
+  verify_oop(obj);
+
+  ld_ptr(mdo_addr, tmp);
+
+  br_notnull_short(obj, pt, not_null);
+  or3(tmp, TypeEntries::null_seen, tmp);
+  ba_short(do_update);
+
+  bind(not_null);
+  load_klass(obj, obj);
+
+  xor3(obj, tmp, obj);
+  btst(TypeEntries::type_klass_mask, obj);
+  // klass seen before, nothing to do. The unknown bit may have been
+  // set already but no need to check.
+  brx(zero, false, pt, do_nothing);
+  delayed()->
+
+  btst(TypeEntries::type_unknown, obj);
+  // already unknown. Nothing to do anymore.
+  brx(notZero, false, pt, do_nothing);
+  delayed()->
+
+  btst(TypeEntries::type_mask, tmp);
+  brx(zero, true, pt, do_update);
+  // first time here. Set profile type.
+  delayed()->or3(tmp, obj, tmp);
+
+  // different than before. Cannot keep accurate profile.
+  or3(tmp, TypeEntries::type_unknown, tmp);
+
+  bind(do_update);
+  // update profile
+  st_ptr(tmp, mdo_addr);
+
+  bind(do_nothing);
+}
+
+void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) {
+  if (!ProfileInterpreter) {
+    return;
+  }
+
+  assert_different_registers(callee, tmp1, tmp2, ImethodDataPtr);
+
+  if (MethodData::profile_arguments() || MethodData::profile_return()) {
+    Label profile_continue;
+
+    test_method_data_pointer(profile_continue);
+
+    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
+
+    ldub(ImethodDataPtr, in_bytes(DataLayout::tag_offset()) - off_to_start, tmp1);
+    cmp_and_br_short(tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag, notEqual, pn, profile_continue);
+
+    if (MethodData::profile_arguments()) {
+      Label done;
+      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
+      add(ImethodDataPtr, off_to_args, ImethodDataPtr);
+
+      for (int i = 0; i < TypeProfileArgsLimit; i++) {
+        if (i > 0 || MethodData::profile_return()) {
+          // If return value type is profiled we may have no argument to profile
+          ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1);
+          sub(tmp1, i*TypeStackSlotEntries::per_arg_count(), tmp1);
+          cmp_and_br_short(tmp1, TypeStackSlotEntries::per_arg_count(), less, pn, done);
+        }
+        ld_ptr(Address(callee, Method::const_offset()), tmp1);
+        lduh(Address(tmp1, ConstMethod::size_of_parameters_offset()), tmp1);
+        // stack offset o (zero based) from the start of the argument
+        // list, for n arguments translates into offset n - o - 1 from
+        // the end of the argument list. But there's an extra slot at
+        // the stop of the stack. So the offset is n - o from Lesp.
+        ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, tmp2);
+        sub(tmp1, tmp2, tmp1);
+
+        // Can't use MacroAssembler::argument_address() which needs Gargs to be set up
+        sll(tmp1, Interpreter::logStackElementSize, tmp1);
+        ld_ptr(Lesp, tmp1, tmp1);
+
+        Address mdo_arg_addr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
+        profile_obj_type(tmp1, mdo_arg_addr, tmp2);
+
+        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
+        add(ImethodDataPtr, to_add, ImethodDataPtr);
+        off_to_args += to_add;
+      }
+
+      if (MethodData::profile_return()) {
+        ld_ptr(ImethodDataPtr, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, tmp1);
+        sub(tmp1, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count(), tmp1);
+      }
+
+      bind(done);
+
+      if (MethodData::profile_return()) {
+        // We're right after the type profile for the last
+        // argument. tmp1 is the number of cells left in the
+        // CallTypeData/VirtualCallTypeData to reach its end. Non null
+        // if there's a return to profile.
+        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
+        sll(tmp1, exact_log2(DataLayout::cell_size), tmp1);
+        add(ImethodDataPtr, tmp1, ImethodDataPtr);
+      }
+    } else {
+      assert(MethodData::profile_return(), "either profile call args or call ret");
+      update_mdp_by_constant(in_bytes(ReturnTypeEntry::size()));
+    }
+
+    // mdp points right after the end of the
+    // CallTypeData/VirtualCallTypeData, right after the cells for the
+    // return value type if there's one.
+
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {
+  assert_different_registers(ret, tmp1, tmp2);
+  if (ProfileInterpreter && MethodData::profile_return()) {
+    Label profile_continue, done;
+
+    test_method_data_pointer(profile_continue);
+
+    if (MethodData::profile_return_jsr292_only()) {
+      // If we don't profile all invoke bytecodes we must make sure
+      // it's a bytecode we indeed profile. We can't go back to the
+      // begining of the ProfileData we intend to update to check its
+      // type because we're right after it and we don't known its
+      // length.
+      Label do_profile;
+      ldub(Lbcp, 0, tmp1);
+      cmp_and_br_short(tmp1, Bytecodes::_invokedynamic, equal, pn, do_profile);
+      cmp(tmp1, Bytecodes::_invokehandle);
+      br(equal, false, pn, do_profile);
+      delayed()->ldub(Lmethod, Method::intrinsic_id_offset_in_bytes(), tmp1);
+      cmp_and_br_short(tmp1, vmIntrinsics::_compiledLambdaForm, notEqual, pt, profile_continue);
+
+      bind(do_profile);
+    }
+
+    Address mdo_ret_addr(ImethodDataPtr, -in_bytes(ReturnTypeEntry::size()));
+    mov(ret, tmp1);
+    profile_obj_type(tmp1, mdo_ret_addr, tmp2);
+
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
+  if (ProfileInterpreter && MethodData::profile_parameters()) {
+    Label profile_continue, done;
+
+    test_method_data_pointer(profile_continue);
+
+    // Load the offset of the area within the MDO used for
+    // parameters. If it's negative we're not profiling any parameters.
+    lduw(ImethodDataPtr, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), tmp1);
+    cmp_and_br_short(tmp1, 0, less, pn, profile_continue);
+
+    // Compute a pointer to the area for parameters from the offset
+    // and move the pointer to the slot for the last
+    // parameters. Collect profiling from last parameter down.
+    // mdo start + parameters offset + array length - 1
+
+    // Pointer to the parameter area in the MDO
+    Register mdp = tmp1;
+    add(ImethodDataPtr, tmp1, mdp);
+
+    // offset of the current profile entry to update
+    Register entry_offset = tmp2;
+    // entry_offset = array len in number of cells
+    ld_ptr(mdp, ArrayData::array_len_offset(), entry_offset);
+
+    int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
+    assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");
+
+    // entry_offset (number of cells)  = array len - size of 1 entry + offset of the stack slot field
+    sub(entry_offset, TypeStackSlotEntries::per_arg_count() - (off_base / DataLayout::cell_size), entry_offset);
+    // entry_offset in bytes
+    sll(entry_offset, exact_log2(DataLayout::cell_size), entry_offset);
+
+    Label loop;
+    bind(loop);
+
+    // load offset on the stack from the slot for this parameter
+    ld_ptr(mdp, entry_offset, tmp3);
+    sll(tmp3,Interpreter::logStackElementSize, tmp3);
+    neg(tmp3);
+    // read the parameter from the local area
+    ld_ptr(Llocals, tmp3, tmp3);
+
+    // make entry_offset now point to the type field for this parameter
+    int type_base = in_bytes(ParametersTypeData::type_offset(0));
+    assert(type_base > off_base, "unexpected");
+    add(entry_offset, type_base - off_base, entry_offset);
+
+    // profile the parameter
+    Address arg_type(mdp, entry_offset);
+    profile_obj_type(tmp3, arg_type, tmp4);
+
+    // go to next parameter
+    sub(entry_offset, TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base), entry_offset);
+    cmp_and_br_short(entry_offset, off_base, greaterEqual, pt, loop);
+
+    bind(profile_continue);
+  }
+}
+
 // add a InterpMonitorElem to stack (see frame_sparc.hpp)
 
 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty,
--- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -330,6 +330,11 @@
                            Register scratch2,
                            Register scratch3);
 
+  void profile_obj_type(Register obj, const Address& mdo_addr, Register tmp);
+  void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual);
+  void profile_return_type(Register ret, Register tmp1, Register tmp2);
+  void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
+
   // Debugging
   void interp_verify_oop(Register reg, TosState state, const char * file, int line);    // only if +VerifyOops && state == atos
   void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
--- a/src/cpu/sparc/vm/jni_sparc.h	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/jni_sparc.h	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/nativeInst_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/nativeInst_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/register_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/register_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1113,7 +1113,9 @@
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,
+                                         VMRegPair *regs2,
                                          int total_args_passed) {
+    assert(regs2 == NULL, "not needed on sparc");
 
     // Return the number of VMReg stack_slots needed for the args.
     // This value does not include an abi space (like register window
@@ -2116,7 +2118,7 @@
   // the 1st six register arguments). It's weird see int_stk_helper.
   //
   int out_arg_slots;
-  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
+  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
 
   if (is_critical_native) {
     // Critical natives may have to call out so they need a save area
@@ -2863,7 +2865,7 @@
   // the 1st six register arguments). It's weird see int_stk_helper.
   //
   int out_arg_slots;
-  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
+  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
 
   // Calculate the total number of stack slots we will need.
 
--- a/src/cpu/sparc/vm/sparc.ad	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/sparc.ad	Wed Mar 12 13:30:08 2014 +0100
@@ -757,7 +757,7 @@
 #endif
 
 
-void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary,
+void emit_form3_mem_reg(CodeBuffer &cbuf, PhaseRegAlloc* ra, const MachNode* n, int primary, int tertiary,
                         int src1_enc, int disp32, int src2_enc, int dst_enc) {
 
 #ifdef ASSERT
@@ -912,8 +912,14 @@
   uint index = src2_enc;
   int disp = disp32;
 
-  if (src1_enc == R_SP_enc || src1_enc == R_FP_enc)
+  if (src1_enc == R_SP_enc || src1_enc == R_FP_enc) {
     disp += STACK_BIAS;
+    // Quick fix for JDK-8029668: check that stack offset fits, bailout if not
+    if (!Assembler::is_simm13(disp)) {
+      ra->C->record_method_not_compilable("unable to handle large constant offsets");
+      return;
+    }
+  }
 
   // We should have a compiler bailout here rather than a guarantee.
   // Better yet would be some mechanism to handle variable-size matches correctly.
@@ -1034,6 +1040,11 @@
   }
 }
 
+bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
+void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
+  ShouldNotReachHere();
+}
+
 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
   Compile* C = ra_->C;
   Compile::ConstantTable& constant_table = C->constant_table();
@@ -1279,20 +1290,15 @@
   return rc_float;
 }
 
-static int impl_helper( const MachNode *mach, CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) {
-  if( cbuf ) {
-    // Better yet would be some mechanism to handle variable-size matches correctly
-    if (!Assembler::is_simm13(offset + STACK_BIAS)) {
-      ra_->C->record_method_not_compilable("unable to handle large constant offsets");
-    } else {
-      emit_form3_mem_reg(*cbuf, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]);
-    }
+static int impl_helper(const MachNode* mach, CodeBuffer* cbuf, PhaseRegAlloc* ra, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) {
+  if (cbuf) {
+    emit_form3_mem_reg(*cbuf, ra, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]);
   }
 #ifndef PRODUCT
-  else if( !do_size ) {
-    if( size != 0 ) st->print("\n\t");
-    if( is_load ) st->print("%s   [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg));
-    else          st->print("%s   R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset);
+  else if (!do_size) {
+    if (size != 0) st->print("\n\t");
+    if (is_load) st->print("%s   [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg));
+    else         st->print("%s   R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset);
   }
 #endif
   return size+4;
@@ -1847,6 +1853,12 @@
   return false;
 }
 
+// Current (2013) SPARC platforms need to read original key
+// to construct decryption expanded key 
+const bool Matcher::pass_original_key_for_aes() {
+  return true;
+}
+
 // USII supports fxtof through the whole range of number, USIII doesn't
 const bool Matcher::convL2FSupported(void) {
   return VM_Version::has_fast_fxtof();
@@ -1884,6 +1896,9 @@
   return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0;
 }
 
+// Does the CPU require late expand (see block.cpp for description of late expand)?
+const bool Matcher::require_postalloc_expand = false;
+
 // Should the Matcher clone shifts on addressing modes, expecting them to
 // be subsumed into complex addressing expressions or compute them into
 // registers?  True for Intel but false for most RISCs
@@ -2022,19 +2037,6 @@
   return L7_REGP_mask();
 }
 
-const RegMask Matcher::mathExactI_result_proj_mask() {
-  return G1_REGI_mask();
-}
-
-const RegMask Matcher::mathExactL_result_proj_mask() {
-  return G1_REGL_mask();
-}
-
-const RegMask Matcher::mathExactI_flags_proj_mask() {
-  return INT_FLAGS_mask();
-}
-
-
 %}
 
 
@@ -2087,22 +2089,22 @@
   %}
 
   enc_class form3_mem_reg( memory mem, iRegI dst ) %{
-    emit_form3_mem_reg(cbuf, this, $primary, $tertiary,
+    emit_form3_mem_reg(cbuf, ra_, this, $primary, $tertiary,
                        $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
   %}
 
   enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{
-    emit_form3_mem_reg(cbuf, this, $primary, -1,
+    emit_form3_mem_reg(cbuf, ra_, this, $primary, -1,
                        $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
   %}
 
   enc_class form3_mem_prefetch_read( memory mem ) %{
-    emit_form3_mem_reg(cbuf, this, $primary, -1,
+    emit_form3_mem_reg(cbuf, ra_, this, $primary, -1,
                        $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
   %}
 
   enc_class form3_mem_prefetch_write( memory mem ) %{
-    emit_form3_mem_reg(cbuf, this, $primary, -1,
+    emit_form3_mem_reg(cbuf, ra_, this, $primary, -1,
                        $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/);
   %}
 
@@ -2110,8 +2112,8 @@
     assert(Assembler::is_simm13($mem$$disp  ), "need disp and disp+4");
     assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
     guarantee($mem$$index == R_G0_enc, "double index?");
-    emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
-    emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp,   R_G0_enc, $reg$$reg );
+    emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
+    emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp,   R_G0_enc, $reg$$reg );
     emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 );
     emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc );
   %}
@@ -2121,14 +2123,14 @@
     assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
     guarantee($mem$$index == R_G0_enc, "double index?");
     // Load long with 2 instructions
-    emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp,   R_G0_enc, $reg$$reg+0 );
-    emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 );
+    emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp,   R_G0_enc, $reg$$reg+0 );
+    emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 );
   %}
 
   //%%% form3_mem_plus_4_reg is a hack--get rid of it
   enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{
     guarantee($mem$$disp, "cannot offset a reg-reg operand by 4");
-    emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg);
+    emit_form3_mem_reg(cbuf, ra_, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg);
   %}
 
   enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{
@@ -3241,7 +3243,7 @@
   // C.
   c_calling_convention %{
     // This is obviously always outgoing
-    (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
+    (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
   %}
 
   // Location of native (C/C++) and interpreter return values.  This is specified to
@@ -3354,8 +3356,8 @@
   interface(CONST_INTER);
 %}
 
-// Unsigned (positive) Integer Immediate: 13-bit
-operand immU13() %{
+// Unsigned Integer Immediate: 12-bit (non-negative that fits in simm13)
+operand immU12() %{
   predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int()));
   match(ConI);
   op_cost(0);
@@ -3391,6 +3393,17 @@
   interface(CONST_INTER);
 %}
 
+// Int Immediate non-negative
+operand immU31()
+%{
+  predicate(n->get_int() >= 0);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // Integer Immediate: 0-bit
 operand immI0() %{
   predicate(n->get_int() == 0);
@@ -5719,7 +5732,6 @@
   effect(TEMP dst, TEMP tmp);
   ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
 
-  size((3+1)*4);  // set may use two instructions.
   format %{ "LDUH   $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t"
             "SET    $mask,$tmp\n\t"
             "AND    $dst,$tmp,$dst" %}
@@ -5841,13 +5853,13 @@
   ins_pipe(iload_mem);
 %}
 
-// Load Integer with a 13-bit mask into a Long Register
-instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{
+// Load Integer with a 12-bit mask into a Long Register
+instruct loadI2L_immU12(iRegL dst, memory mem, immU12 mask) %{
   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
   ins_cost(MEMORY_REF_COST + DEFAULT_COST);
 
   size(2*4);
-  format %{ "LDUW   $mem,$dst\t! int & 13-bit mask -> long\n\t"
+  format %{ "LDUW   $mem,$dst\t! int & 12-bit mask -> long\n\t"
             "AND    $dst,$mask,$dst" %}
   ins_encode %{
     Register Rdst = $dst$$Register;
@@ -5857,14 +5869,13 @@
   ins_pipe(iload_mem);
 %}
 
-// Load Integer with a 32-bit mask into a Long Register
-instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{
+// Load Integer with a 31-bit mask into a Long Register
+instruct loadI2L_immU31(iRegL dst, memory mem, immU31 mask, iRegL tmp) %{
   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
   effect(TEMP dst, TEMP tmp);
   ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
 
-  size((3+1)*4);  // set may use two instructions.
-  format %{ "LDUW   $mem,$dst\t! int & 32-bit mask -> long\n\t"
+  format %{ "LDUW   $mem,$dst\t! int & 31-bit mask -> long\n\t"
             "SET    $mask,$tmp\n\t"
             "AND    $dst,$tmp,$dst" %}
   ins_encode %{
@@ -6643,6 +6654,7 @@
 
 instruct membar_acquire() %{
   match(MemBarAcquire);
+  match(LoadFence);
   ins_cost(4*MEMORY_REF_COST);
 
   size(0);
@@ -6663,6 +6675,7 @@
 
 instruct membar_release() %{
   match(MemBarRelease);
+  match(StoreFence);
   ins_cost(4*MEMORY_REF_COST);
 
   size(0);
@@ -8959,7 +8972,7 @@
   ins_pipe(ialu_cconly_reg_reg);
 %}
 
-instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU13 op2 ) %{
+instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU12 op2 ) %{
   match(Set icc (CmpU op1 op2));
 
   size(4);
@@ -9164,7 +9177,7 @@
   size(4);
   ins_cost(BRANCH_COST);
   format %{ "BA     $labl\t! short branch" %}
-  ins_encode %{ 
+  ins_encode %{
     Label* L = $labl$$label;
     assert(__ use_cbcond(*L), "back to back cbcond");
     __ ba_short(*L);
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3304,6 +3304,775 @@
     }
   }
 
+  address generate_aescrypt_encryptBlock() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "aesencryptBlock");
+    Label L_doLast128bit, L_storeOutput;
+    address start = __ pc();
+    Register from = O0; // source byte array
+    Register to = O1;   // destination byte array
+    Register key = O2;  // expanded key array
+    const Register keylen = O4; //reg for storing expanded key array length
+
+    // read expanded key length
+    __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
+
+    // load input into F54-F56; F30-F31 used as temp
+    __ ldf(FloatRegisterImpl::S, from, 0, F30);
+    __ ldf(FloatRegisterImpl::S, from, 4, F31);
+    __ fmov(FloatRegisterImpl::D, F30, F54);
+    __ ldf(FloatRegisterImpl::S, from, 8, F30);
+    __ ldf(FloatRegisterImpl::S, from, 12, F31);
+    __ fmov(FloatRegisterImpl::D, F30, F56);
+
+    // load expanded key
+    for ( int i = 0;  i <= 38; i += 2 ) {
+      __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i));
+    }
+
+    // perform cipher transformation
+    __ fxor(FloatRegisterImpl::D, F0, F54, F54);
+    __ fxor(FloatRegisterImpl::D, F2, F56, F56);
+    // rounds 1 through 8
+    for ( int i = 4;  i <= 28; i += 8 ) {
+      __ aes_eround01(as_FloatRegister(i), F54, F56, F58);
+      __ aes_eround23(as_FloatRegister(i+2), F54, F56, F60);
+      __ aes_eround01(as_FloatRegister(i+4), F58, F60, F54);
+      __ aes_eround23(as_FloatRegister(i+6), F58, F60, F56);
+    }
+    __ aes_eround01(F36, F54, F56, F58); //round 9
+    __ aes_eround23(F38, F54, F56, F60);
+
+    // 128-bit original key size
+    __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_doLast128bit);
+
+    for ( int i = 40;  i <= 50; i += 2 ) {
+      __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i) );
+    }
+    __ aes_eround01(F40, F58, F60, F54); //round 10
+    __ aes_eround23(F42, F58, F60, F56);
+    __ aes_eround01(F44, F54, F56, F58); //round 11
+    __ aes_eround23(F46, F54, F56, F60);
+
+    // 192-bit original key size
+    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_storeOutput);
+
+    __ ldf(FloatRegisterImpl::D, key, 208, F52);
+    __ aes_eround01(F48, F58, F60, F54); //round 12
+    __ aes_eround23(F50, F58, F60, F56);
+    __ ldf(FloatRegisterImpl::D, key, 216, F46);
+    __ ldf(FloatRegisterImpl::D, key, 224, F48);
+    __ ldf(FloatRegisterImpl::D, key, 232, F50);
+    __ aes_eround01(F52, F54, F56, F58); //round 13
+    __ aes_eround23(F46, F54, F56, F60);
+    __ br(Assembler::always, false, Assembler::pt, L_storeOutput);
+    __ delayed()->nop();
+
+    __ BIND(L_doLast128bit);
+    __ ldf(FloatRegisterImpl::D, key, 160, F48);
+    __ ldf(FloatRegisterImpl::D, key, 168, F50);
+
+    __ BIND(L_storeOutput);
+    // perform last round of encryption common for all key sizes
+    __ aes_eround01_l(F48, F58, F60, F54); //last round
+    __ aes_eround23_l(F50, F58, F60, F56);
+
+    // store output into the destination array, F0-F1 used as temp
+    __ fmov(FloatRegisterImpl::D, F54, F0);
+    __ stf(FloatRegisterImpl::S, F0, to, 0);
+    __ stf(FloatRegisterImpl::S, F1, to, 4);
+    __ fmov(FloatRegisterImpl::D, F56, F0);
+    __ stf(FloatRegisterImpl::S, F0, to, 8);
+    __ retl();
+    __ delayed()->stf(FloatRegisterImpl::S, F1, to, 12);
+
+    return start;
+  }
+
+  address generate_aescrypt_decryptBlock() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "aesdecryptBlock");
+    address start = __ pc();
+    Label L_expand192bit, L_expand256bit, L_common_transform;
+    Register from = O0; // source byte array
+    Register to = O1;   // destination byte array
+    Register key = O2;  // expanded key array
+    Register original_key = O3;  // original key array only required during decryption
+    const Register keylen = O4;  // reg for storing expanded key array length
+
+    // read expanded key array length
+    __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
+
+    // load input into F52-F54; F30,F31 used as temp
+    __ ldf(FloatRegisterImpl::S, from, 0, F30);
+    __ ldf(FloatRegisterImpl::S, from, 4, F31);
+    __ fmov(FloatRegisterImpl::D, F30, F52);
+    __ ldf(FloatRegisterImpl::S, from, 8, F30);
+    __ ldf(FloatRegisterImpl::S, from, 12, F31);
+    __ fmov(FloatRegisterImpl::D, F30, F54);
+
+    // load original key from SunJCE expanded decryption key
+    for ( int i = 0;  i <= 3; i++ ) {
+      __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
+    }
+
+    // 256-bit original key size
+    __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
+
+    // 192-bit original key size
+    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
+
+    // 128-bit original key size
+    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+    for ( int i = 0;  i <= 36; i += 4 ) {
+      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
+      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
+    }
+
+    // perform 128-bit key specific inverse cipher transformation
+    __ fxor(FloatRegisterImpl::D, F42, F54, F54);
+    __ fxor(FloatRegisterImpl::D, F40, F52, F52);
+    __ br(Assembler::always, false, Assembler::pt, L_common_transform);
+    __ delayed()->nop();
+
+    __ BIND(L_expand192bit);
+
+    // start loading rest of the 192-bit key
+    __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
+    __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
+
+    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+    for ( int i = 0;  i <= 36; i += 6 ) {
+      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
+      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
+      __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
+    }
+    __ aes_kexpand1(F42, F46, 7, F48);
+    __ aes_kexpand2(F44, F48, F50);
+
+    // perform 192-bit key specific inverse cipher transformation
+    __ fxor(FloatRegisterImpl::D, F50, F54, F54);
+    __ fxor(FloatRegisterImpl::D, F48, F52, F52);
+    __ aes_dround23(F46, F52, F54, F58);
+    __ aes_dround01(F44, F52, F54, F56);
+    __ aes_dround23(F42, F56, F58, F54);
+    __ aes_dround01(F40, F56, F58, F52);
+    __ br(Assembler::always, false, Assembler::pt, L_common_transform);
+    __ delayed()->nop();
+
+    __ BIND(L_expand256bit);
+
+    // load rest of the 256-bit key
+    for ( int i = 4;  i <= 7; i++ ) {
+      __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
+    }
+
+    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+    for ( int i = 0;  i <= 40; i += 8 ) {
+      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
+      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
+      __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
+      __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
+    }
+    __ aes_kexpand1(F48, F54, 6, F56);
+    __ aes_kexpand2(F50, F56, F58);
+
+    for ( int i = 0;  i <= 6; i += 2 ) {
+      __ fmov(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i));
+    }
+
+    // load input into F52-F54
+    __ ldf(FloatRegisterImpl::D, from, 0, F52);
+    __ ldf(FloatRegisterImpl::D, from, 8, F54);
+
+    // perform 256-bit key specific inverse cipher transformation
+    __ fxor(FloatRegisterImpl::D, F0, F54, F54);
+    __ fxor(FloatRegisterImpl::D, F2, F52, F52);
+    __ aes_dround23(F4, F52, F54, F58);
+    __ aes_dround01(F6, F52, F54, F56);
+    __ aes_dround23(F50, F56, F58, F54);
+    __ aes_dround01(F48, F56, F58, F52);
+    __ aes_dround23(F46, F52, F54, F58);
+    __ aes_dround01(F44, F52, F54, F56);
+    __ aes_dround23(F42, F56, F58, F54);
+    __ aes_dround01(F40, F56, F58, F52);
+
+    for ( int i = 0;  i <= 7; i++ ) {
+      __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
+    }
+
+    // perform inverse cipher transformations common for all key sizes
+    __ BIND(L_common_transform);
+    for ( int i = 38;  i >= 6; i -= 8 ) {
+      __ aes_dround23(as_FloatRegister(i), F52, F54, F58);
+      __ aes_dround01(as_FloatRegister(i-2), F52, F54, F56);
+      if ( i != 6) {
+        __ aes_dround23(as_FloatRegister(i-4), F56, F58, F54);
+        __ aes_dround01(as_FloatRegister(i-6), F56, F58, F52);
+      } else {
+        __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F54);
+        __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F52);
+      }
+    }
+
+    // store output to destination array, F0-F1 used as temp
+    __ fmov(FloatRegisterImpl::D, F52, F0);
+    __ stf(FloatRegisterImpl::S, F0, to, 0);
+    __ stf(FloatRegisterImpl::S, F1, to, 4);
+    __ fmov(FloatRegisterImpl::D, F54, F0);
+    __ stf(FloatRegisterImpl::S, F0, to, 8);
+    __ retl();
+    __ delayed()->stf(FloatRegisterImpl::S, F1, to, 12);
+
+    return start;
+  }
+
+  address generate_cipherBlockChaining_encryptAESCrypt() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
+    Label L_cbcenc128, L_cbcenc192, L_cbcenc256;
+    address start = __ pc();
+    Register from = O0; // source byte array
+    Register to = O1;   // destination byte array
+    Register key = O2;  // expanded key array
+    Register rvec = O3; // init vector
+    const Register len_reg = O4; // cipher length
+    const Register keylen = O5;  // reg for storing expanded key array length
+
+    // save cipher len to return in the end
+    __ mov(len_reg, L1);
+
+    // read expanded key length
+    __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
+
+    // load init vector
+    __ ldf(FloatRegisterImpl::D, rvec, 0, F60);
+    __ ldf(FloatRegisterImpl::D, rvec, 8, F62);
+    __ ldx(key,0,G1);
+    __ ldx(key,8,G2);
+
+    // start loading expanded key
+    for ( int i = 0, j = 16;  i <= 38; i += 2, j += 8 ) {
+      __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
+    }
+
+    // 128-bit original key size
+    __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_cbcenc128);
+
+    for ( int i = 40, j = 176;  i <= 46; i += 2, j += 8 ) {
+      __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
+    }
+
+    // 192-bit original key size
+    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_cbcenc192);
+
+    for ( int i = 48, j = 208;  i <= 54; i += 2, j += 8 ) {
+      __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
+    }
+
+    // 256-bit original key size
+    __ br(Assembler::always, false, Assembler::pt, L_cbcenc256);
+    __ delayed()->nop();
+
+    __ align(OptoLoopAlignment);
+    __ BIND(L_cbcenc128);
+    __ ldx(from,0,G3);
+    __ ldx(from,8,G4);
+    __ xor3(G1,G3,G3);
+    __ xor3(G2,G4,G4);
+    __ movxtod(G3,F56);
+    __ movxtod(G4,F58);
+    __ fxor(FloatRegisterImpl::D, F60, F56, F60);
+    __ fxor(FloatRegisterImpl::D, F62, F58, F62);
+
+    // TEN_EROUNDS
+    for ( int i = 0;  i <= 32; i += 8 ) {
+      __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
+      __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
+      if (i != 32 ) {
+        __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
+        __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
+      } else {
+        __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
+        __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
+      }
+    }
+
+    __ stf(FloatRegisterImpl::D, F60, to, 0);
+    __ stf(FloatRegisterImpl::D, F62, to, 8);
+    __ add(from, 16, from);
+    __ add(to, 16, to);
+    __ subcc(len_reg, 16, len_reg);
+    __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc128);
+    __ delayed()->nop();
+    __ stf(FloatRegisterImpl::D, F60, rvec, 0);
+    __ stf(FloatRegisterImpl::D, F62, rvec, 8);
+    __ retl();
+    __ delayed()->mov(L1, O0);
+
+    __ align(OptoLoopAlignment);
+    __ BIND(L_cbcenc192);
+    __ ldx(from,0,G3);
+    __ ldx(from,8,G4);
+    __ xor3(G1,G3,G3);
+    __ xor3(G2,G4,G4);
+    __ movxtod(G3,F56);
+    __ movxtod(G4,F58);
+    __ fxor(FloatRegisterImpl::D, F60, F56, F60);
+    __ fxor(FloatRegisterImpl::D, F62, F58, F62);
+
+    // TWELEVE_EROUNDS
+    for ( int i = 0;  i <= 40; i += 8 ) {
+      __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
+      __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
+      if (i != 40 ) {
+        __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
+        __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
+      } else {
+        __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
+        __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
+      }
+    }
+
+    __ stf(FloatRegisterImpl::D, F60, to, 0);
+    __ stf(FloatRegisterImpl::D, F62, to, 8);
+    __ add(from, 16, from);
+    __ subcc(len_reg, 16, len_reg);
+    __ add(to, 16, to);
+    __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc192);
+    __ delayed()->nop();
+    __ stf(FloatRegisterImpl::D, F60, rvec, 0);
+    __ stf(FloatRegisterImpl::D, F62, rvec, 8);
+    __ retl();
+    __ delayed()->mov(L1, O0);
+
+    __ align(OptoLoopAlignment);
+    __ BIND(L_cbcenc256);
+    __ ldx(from,0,G3);
+    __ ldx(from,8,G4);
+    __ xor3(G1,G3,G3);
+    __ xor3(G2,G4,G4);
+    __ movxtod(G3,F56);
+    __ movxtod(G4,F58);
+    __ fxor(FloatRegisterImpl::D, F60, F56, F60);
+    __ fxor(FloatRegisterImpl::D, F62, F58, F62);
+
+    // FOURTEEN_EROUNDS
+    for ( int i = 0;  i <= 48; i += 8 ) {
+      __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
+      __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
+      if (i != 48 ) {
+        __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
+        __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
+      } else {
+        __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
+        __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
+      }
+    }
+
+    __ stf(FloatRegisterImpl::D, F60, to, 0);
+    __ stf(FloatRegisterImpl::D, F62, to, 8);
+    __ add(from, 16, from);
+    __ subcc(len_reg, 16, len_reg);
+    __ add(to, 16, to);
+    __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc256);
+    __ delayed()->nop();
+    __ stf(FloatRegisterImpl::D, F60, rvec, 0);
+    __ stf(FloatRegisterImpl::D, F62, rvec, 8);
+    __ retl();
+    __ delayed()->mov(L1, O0);
+
+    return start;
+  }
+
+  address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
+    __ align(CodeEntryAlignment);
+    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
+    Label L_cbcdec_end, L_expand192bit, L_expand256bit, L_dec_first_block_start;
+    Label L_dec_first_block128, L_dec_first_block192, L_dec_next2_blocks128, L_dec_next2_blocks192, L_dec_next2_blocks256;
+    address start = __ pc();
+    Register from = I0; // source byte array
+    Register to = I1;   // destination byte array
+    Register key = I2;  // expanded key array
+    Register rvec = I3; // init vector
+    const Register len_reg = I4; // cipher length
+    const Register original_key = I5;  // original key array only required during decryption
+    const Register keylen = L6;  // reg for storing expanded key array length
+
+    // save cipher len before save_frame, to return in the end
+    __ mov(O4, L0);
+    __ save_frame(0); //args are read from I* registers since we save the frame in the beginning
+
+    // load original key from SunJCE expanded decryption key
+    for ( int i = 0;  i <= 3; i++ ) {
+      __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
+    }
+
+    // load initial vector
+    __ ldx(rvec,0,L0);
+    __ ldx(rvec,8,L1);
+
+    // read expanded key array length
+    __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
+
+    // 256-bit original key size
+    __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
+
+    // 192-bit original key size
+    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
+
+    // 128-bit original key size
+    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+    for ( int i = 0;  i <= 36; i += 4 ) {
+      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
+      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
+    }
+
+    // load expanded key[last-1] and key[last] elements
+    __ movdtox(F40,L2);
+    __ movdtox(F42,L3);
+
+    __ and3(len_reg, 16, L4);
+    __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks128);
+    __ delayed()->nop();
+
+    __ br(Assembler::always, false, Assembler::pt, L_dec_first_block_start);
+    __ delayed()->nop();
+
+    __ BIND(L_expand192bit);
+    // load rest of the 192-bit key
+    __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
+    __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
+
+    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+    for ( int i = 0;  i <= 36; i += 6 ) {
+      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
+      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
+      __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
+    }
+    __ aes_kexpand1(F42, F46, 7, F48);
+    __ aes_kexpand2(F44, F48, F50);
+
+    // load expanded key[last-1] and key[last] elements
+    __ movdtox(F48,L2);
+    __ movdtox(F50,L3);
+
+    __ and3(len_reg, 16, L4);
+    __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks192);
+    __ delayed()->nop();
+
+    __ br(Assembler::always, false, Assembler::pt, L_dec_first_block_start);
+    __ delayed()->nop();
+
+    __ BIND(L_expand256bit);
+    // load rest of the 256-bit key
+    for ( int i = 4;  i <= 7; i++ ) {
+      __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
+    }
+
+    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
+    for ( int i = 0;  i <= 40; i += 8 ) {
+      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
+      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
+      __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
+      __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
+    }
+    __ aes_kexpand1(F48, F54, 6, F56);
+    __ aes_kexpand2(F50, F56, F58);
+
+    // load expanded key[last-1] and key[last] elements
+    __ movdtox(F56,L2);
+    __ movdtox(F58,L3);
+
+    __ and3(len_reg, 16, L4);
+    __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks256);
+    __ delayed()->nop();
+
+    __ BIND(L_dec_first_block_start);
+    __ ldx(from,0,L4);
+    __ ldx(from,8,L5);
+    __ xor3(L2,L4,G1);
+    __ movxtod(G1,F60);
+    __ xor3(L3,L5,G1);
+    __ movxtod(G1,F62);
+
+    // 128-bit original key size
+    __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pn, L_dec_first_block128);
+
+    // 192-bit original key size
+    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_first_block192);
+
+    __ aes_dround23(F54, F60, F62, F58);
+    __ aes_dround01(F52, F60, F62, F56);
+    __ aes_dround23(F50, F56, F58, F62);
+    __ aes_dround01(F48, F56, F58, F60);
+
+    __ BIND(L_dec_first_block192);
+    __ aes_dround23(F46, F60, F62, F58);
+    __ aes_dround01(F44, F60, F62, F56);
+    __ aes_dround23(F42, F56, F58, F62);
+    __ aes_dround01(F40, F56, F58, F60);
+
+    __ BIND(L_dec_first_block128);
+    for ( int i = 38;  i >= 6; i -= 8 ) {
+      __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
+      __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
+      if ( i != 6) {
+        __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
+        __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
+      } else {
+        __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
+        __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
+      }
+    }
+
+    __ movxtod(L0,F56);
+    __ movxtod(L1,F58);
+    __ mov(L4,L0);
+    __ mov(L5,L1);
+    __ fxor(FloatRegisterImpl::D, F56, F60, F60);
+    __ fxor(FloatRegisterImpl::D, F58, F62, F62);
+
+    __ stf(FloatRegisterImpl::D, F60, to, 0);
+    __ stf(FloatRegisterImpl::D, F62, to, 8);
+
+    __ add(from, 16, from);
+    __ add(to, 16, to);
+    __ subcc(len_reg, 16, len_reg);
+    __ br(Assembler::equal, false, Assembler::pt, L_cbcdec_end);
+    __ delayed()->nop();
+
+    // 256-bit original key size
+    __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_dec_next2_blocks256);
+
+    // 192-bit original key size
+    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_next2_blocks192);
+
+    __ align(OptoLoopAlignment);
+    __ BIND(L_dec_next2_blocks128);
+    __ nop();
+
+    // F40:F42 used for first 16-bytes
+    __ ldx(from,0,G4);
+    __ ldx(from,8,G5);
+    __ xor3(L2,G4,G1);
+    __ movxtod(G1,F40);
+    __ xor3(L3,G5,G1);
+    __ movxtod(G1,F42);
+
+    // F60:F62 used for next 16-bytes
+    __ ldx(from,16,L4);
+    __ ldx(from,24,L5);
+    __ xor3(L2,L4,G1);
+    __ movxtod(G1,F60);
+    __ xor3(L3,L5,G1);
+    __ movxtod(G1,F62);
+
+    for ( int i = 38;  i >= 6; i -= 8 ) {
+      __ aes_dround23(as_FloatRegister(i), F40, F42, F44);
+      __ aes_dround01(as_FloatRegister(i-2), F40, F42, F46);
+      __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
+      __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
+      if (i != 6 ) {
+        __ aes_dround23(as_FloatRegister(i-4), F46, F44, F42);
+        __ aes_dround01(as_FloatRegister(i-6), F46, F44, F40);
+        __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
+        __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
+      } else {
+        __ aes_dround23_l(as_FloatRegister(i-4), F46, F44, F42);
+        __ aes_dround01_l(as_FloatRegister(i-6), F46, F44, F40);
+        __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
+        __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
+      }
+    }
+
+    __ movxtod(L0,F46);
+    __ movxtod(L1,F44);
+    __ fxor(FloatRegisterImpl::D, F46, F40, F40);
+    __ fxor(FloatRegisterImpl::D, F44, F42, F42);
+
+    __ stf(FloatRegisterImpl::D, F40, to, 0);
+    __ stf(FloatRegisterImpl::D, F42, to, 8);
+
+    __ movxtod(G4,F56);
+    __ movxtod(G5,F58);
+    __ mov(L4,L0);
+    __ mov(L5,L1);
+    __ fxor(FloatRegisterImpl::D, F56, F60, F60);
+    __ fxor(FloatRegisterImpl::D, F58, F62, F62);
+
+    __ stf(FloatRegisterImpl::D, F60, to, 16);
+    __ stf(FloatRegisterImpl::D, F62, to, 24);
+
+    __ add(from, 32, from);
+    __ add(to, 32, to);
+    __ subcc(len_reg, 32, len_reg);
+    __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks128);
+    __ delayed()->nop();
+    __ br(Assembler::always, false, Assembler::pt, L_cbcdec_end);
+    __ delayed()->nop();
+
+    __ align(OptoLoopAlignment);
+    __ BIND(L_dec_next2_blocks192);
+    __ nop();
+
+    // F48:F50 used for first 16-bytes
+    __ ldx(from,0,G4);
+    __ ldx(from,8,G5);
+    __ xor3(L2,G4,G1);
+    __ movxtod(G1,F48);
+    __ xor3(L3,G5,G1);
+    __ movxtod(G1,F50);
+
+    // F60:F62 used for next 16-bytes
+    __ ldx(from,16,L4);
+    __ ldx(from,24,L5);
+    __ xor3(L2,L4,G1);
+    __ movxtod(G1,F60);
+    __ xor3(L3,L5,G1);
+    __ movxtod(G1,F62);
+
+    for ( int i = 46;  i >= 6; i -= 8 ) {
+      __ aes_dround23(as_FloatRegister(i), F48, F50, F52);
+      __ aes_dround01(as_FloatRegister(i-2), F48, F50, F54);
+      __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
+      __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
+      if (i != 6 ) {
+        __ aes_dround23(as_FloatRegister(i-4), F54, F52, F50);
+        __ aes_dround01(as_FloatRegister(i-6), F54, F52, F48);
+        __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
+        __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
+      } else {
+        __ aes_dround23_l(as_FloatRegister(i-4), F54, F52, F50);
+        __ aes_dround01_l(as_FloatRegister(i-6), F54, F52, F48);
+        __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
+        __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
+      }
+    }
+
+    __ movxtod(L0,F54);
+    __ movxtod(L1,F52);
+    __ fxor(FloatRegisterImpl::D, F54, F48, F48);
+    __ fxor(FloatRegisterImpl::D, F52, F50, F50);
+
+    __ stf(FloatRegisterImpl::D, F48, to, 0);
+    __ stf(FloatRegisterImpl::D, F50, to, 8);
+
+    __ movxtod(G4,F56);
+    __ movxtod(G5,F58);
+    __ mov(L4,L0);
+    __ mov(L5,L1);
+    __ fxor(FloatRegisterImpl::D, F56, F60, F60);
+    __ fxor(FloatRegisterImpl::D, F58, F62, F62);
+
+    __ stf(FloatRegisterImpl::D, F60, to, 16);
+    __ stf(FloatRegisterImpl::D, F62, to, 24);
+
+    __ add(from, 32, from);
+    __ add(to, 32, to);
+    __ subcc(len_reg, 32, len_reg);
+    __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks192);
+    __ delayed()->nop();
+    __ br(Assembler::always, false, Assembler::pt, L_cbcdec_end);
+    __ delayed()->nop();
+
+    __ align(OptoLoopAlignment);
+    __ BIND(L_dec_next2_blocks256);
+    __ nop();
+
+    // F0:F2 used for first 16-bytes
+    __ ldx(from,0,G4);
+    __ ldx(from,8,G5);
+    __ xor3(L2,G4,G1);
+    __ movxtod(G1,F0);
+    __ xor3(L3,G5,G1);
+    __ movxtod(G1,F2);
+
+    // F60:F62 used for next 16-bytes
+    __ ldx(from,16,L4);
+    __ ldx(from,24,L5);
+    __ xor3(L2,L4,G1);
+    __ movxtod(G1,F60);
+    __ xor3(L3,L5,G1);
+    __ movxtod(G1,F62);
+
+    __ aes_dround23(F54, F0, F2, F4);
+    __ aes_dround01(F52, F0, F2, F6);
+    __ aes_dround23(F54, F60, F62, F58);
+    __ aes_dround01(F52, F60, F62, F56);
+    __ aes_dround23(F50, F6, F4, F2);
+    __ aes_dround01(F48, F6, F4, F0);
+    __ aes_dround23(F50, F56, F58, F62);
+    __ aes_dround01(F48, F56, F58, F60);
+    // save F48:F54 in temp registers
+    __ movdtox(F54,G2);
+    __ movdtox(F52,G3);
+    __ movdtox(F50,G6);
+    __ movdtox(F48,G1);
+    for ( int i = 46;  i >= 14; i -= 8 ) {
+      __ aes_dround23(as_FloatRegister(i), F0, F2, F4);
+      __ aes_dround01(as_FloatRegister(i-2), F0, F2, F6);
+      __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
+      __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
+      __ aes_dround23(as_FloatRegister(i-4), F6, F4, F2);
+      __ aes_dround01(as_FloatRegister(i-6), F6, F4, F0);
+      __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
+      __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
+    }
+    // init F48:F54 with F0:F6 values (original key)
+    __ ldf(FloatRegisterImpl::D, original_key, 0, F48);
+    __ ldf(FloatRegisterImpl::D, original_key, 8, F50);
+    __ ldf(FloatRegisterImpl::D, original_key, 16, F52);
+    __ ldf(FloatRegisterImpl::D, original_key, 24, F54);
+    __ aes_dround23(F54, F0, F2, F4);
+    __ aes_dround01(F52, F0, F2, F6);
+    __ aes_dround23(F54, F60, F62, F58);
+    __ aes_dround01(F52, F60, F62, F56);
+    __ aes_dround23_l(F50, F6, F4, F2);
+    __ aes_dround01_l(F48, F6, F4, F0);
+    __ aes_dround23_l(F50, F56, F58, F62);
+    __ aes_dround01_l(F48, F56, F58, F60);
+    // re-init F48:F54 with their original values
+    __ movxtod(G2,F54);
+    __ movxtod(G3,F52);
+    __ movxtod(G6,F50);
+    __ movxtod(G1,F48);
+
+    __ movxtod(L0,F6);
+    __ movxtod(L1,F4);
+    __ fxor(FloatRegisterImpl::D, F6, F0, F0);
+    __ fxor(FloatRegisterImpl::D, F4, F2, F2);
+
+    __ stf(FloatRegisterImpl::D, F0, to, 0);
+    __ stf(FloatRegisterImpl::D, F2, to, 8);
+
+    __ movxtod(G4,F56);
+    __ movxtod(G5,F58);
+    __ mov(L4,L0);
+    __ mov(L5,L1);
+    __ fxor(FloatRegisterImpl::D, F56, F60, F60);
+    __ fxor(FloatRegisterImpl::D, F58, F62, F62);
+
+    __ stf(FloatRegisterImpl::D, F60, to, 16);
+    __ stf(FloatRegisterImpl::D, F62, to, 24);
+
+    __ add(from, 32, from);
+    __ add(to, 32, to);
+    __ subcc(len_reg, 32, len_reg);
+    __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks256);
+    __ delayed()->nop();
+
+    __ BIND(L_cbcdec_end);
+    __ stx(L0, rvec, 0);
+    __ stx(L1, rvec, 8);
+    __ restore();
+    __ mov(L0, O0);
+    __ retl();
+    __ delayed()->nop();
+
+    return start;
+  }
+
   void generate_initial() {
     // Generates all stubs and initializes the entry points
 
@@ -3369,6 +4138,14 @@
     generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
                                                        &StubRoutines::_safefetchN_fault_pc,
                                                        &StubRoutines::_safefetchN_continuation_pc);
+
+    // generate AES intrinsics code
+    if (UseAESIntrinsics) {
+      StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
+      StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
+      StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
+      StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
+    }
   }
 
 
--- a/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -156,6 +156,10 @@
 address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
   address entry = __ pc();
 
+  if (state == atos) {
+    __ profile_return_type(O0, G3_scratch, G1_scratch);
+  }
+
 #if !defined(_LP64) && defined(COMPILER2)
   // All return values are where we want them, except for Longs.  C2 returns
   // longs in G1 in the 32-bit build whereas the interpreter wants them in O0/O1.
@@ -1346,6 +1350,7 @@
   __ movbool(true, G3_scratch);
   __ stbool(G3_scratch, do_not_unlock_if_synchronized);
 
+  __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch);
   // increment invocation counter and check for overflow
   //
   // Note: checking for negative value instead of overflow
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -2942,7 +2942,6 @@
 
 
 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
-  Register Rtemp = G4_scratch;
   Register Rcall = Rindex;
   assert_different_registers(Rcall, G5_method, Gargs, Rret);
 
@@ -2951,6 +2950,7 @@
 #ifdef GRAAL
   __ profile_called_method(G5_method, Rtemp);
 #endif
+  __ profile_arguments_type(G5_method, Rcall, Gargs, true);
   __ call_from_interpreter(Rcall, Gargs, Rret);
 }
 
@@ -3025,6 +3025,7 @@
   __ null_check(O0);
 
   __ profile_final_call(O4);
+  __ profile_arguments_type(G5_method, Rscratch, Gargs, true);
 
   // get return address
   AddressLiteral table(Interpreter::invoke_return_entry_table());
@@ -3054,6 +3055,7 @@
 
   // do the call
   __ profile_call(O4);
+  __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
   __ call_from_interpreter(Rscratch, Gargs, Rret);
 }
 
@@ -3069,6 +3071,7 @@
 
   // do the call
   __ profile_call(O4);
+  __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
   __ call_from_interpreter(Rscratch, Gargs, Rret);
 }
 
@@ -3094,6 +3097,7 @@
   // do the call - the index (f2) contains the Method*
   assert_different_registers(G5_method, Gargs, Rcall);
   __ mov(Rindex, G5_method);
+  __ profile_arguments_type(G5_method, Rcall, Gargs, true);
   __ call_from_interpreter(Rcall, Gargs, Rret);
   __ bind(notFinal);
 
@@ -3204,6 +3208,7 @@
   __ profile_called_method(G5_method, Rscratch);
 #endif
 
+  __ profile_arguments_type(G5_method, Rcall, Gargs, true);
   __ call_from_interpreter(Rcall, Gargs, Rret);
 }
 
@@ -3233,6 +3238,7 @@
   // do the call
   __ verify_oop(G4_mtype);
   __ profile_final_call(O4);  // FIXME: profile the LambdaForm also
+  __ profile_arguments_type(G5_method, Rscratch, Gargs, true);
   __ call_from_interpreter(Rscratch, Gargs, Rret);
 }
 
@@ -3269,6 +3275,7 @@
 
   // do the call
   __ verify_oop(G4_callsite);
+  __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
   __ call_from_interpreter(Rscratch, Gargs, Rret);
 }
 
--- a/src/cpu/sparc/vm/vmStructs_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/vmStructs_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -234,7 +234,7 @@
   assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
 
   char buf[512];
-  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
                (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
                (has_hardware_popc() ? ", popc" : ""),
                (has_vis1() ? ", vis1" : ""),
@@ -242,6 +242,7 @@
                (has_vis3() ? ", vis3" : ""),
                (has_blk_init() ? ", blk_init" : ""),
                (has_cbcond() ? ", cbcond" : ""),
+               (has_aes() ? ", aes" : ""),
                (is_ultra3() ? ", ultra3" : ""),
                (is_sun4v() ? ", sun4v" : ""),
                (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
@@ -265,6 +266,41 @@
   if (!has_vis1()) // Drop to 0 if no VIS1 support
     UseVIS = 0;
 
+  // T2 and above should have support for AES instructions
+  if (has_aes()) {
+    if (UseVIS > 0) { // AES intrinsics use FXOR instruction which is VIS1
+      if (FLAG_IS_DEFAULT(UseAES)) {
+        FLAG_SET_DEFAULT(UseAES, true);
+      }
+      if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+        FLAG_SET_DEFAULT(UseAESIntrinsics, true);
+      }
+      // we disable both the AES flags if either of them is disabled on the command line
+      if (!UseAES || !UseAESIntrinsics) {
+        FLAG_SET_DEFAULT(UseAES, false);
+        FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+      }
+    } else {
+        if (UseAES || UseAESIntrinsics) {
+          warning("SPARC AES intrinsics require VIS1 instruction support. Intrinsics will be disabled.");
+          if (UseAES) {
+            FLAG_SET_DEFAULT(UseAES, false);
+          }
+          if (UseAESIntrinsics) {
+            FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+          }
+        }
+    }
+  } else if (UseAES || UseAESIntrinsics) {
+    warning("AES instructions are not available on this CPU");
+    if (UseAES) {
+      FLAG_SET_DEFAULT(UseAES, false);
+    }
+    if (UseAESIntrinsics) {
+      FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+    }
+  }
+
   if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
     (cache_line_size > ContendedPaddingWidth))
     ContendedPaddingWidth = cache_line_size;
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/sparc/vm/vm_version_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,7 +48,9 @@
     sparc64_family       = 14,
     M_family             = 15,
     T_family             = 16,
-    T1_model             = 17
+    T1_model             = 17,
+    sparc5_instructions  = 18,
+    aes_instructions     = 19
   };
 
   enum Feature_Flag_Set {
@@ -73,6 +75,8 @@
     M_family_m              = 1 << M_family,
     T_family_m              = 1 << T_family,
     T1_model_m              = 1 << T1_model,
+    sparc5_instructions_m   = 1 << sparc5_instructions,
+    aes_instructions_m      = 1 << aes_instructions,
 
     generic_v8_m        = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
     generic_v9_m        = generic_v8_m | v9_instructions_m,
@@ -123,6 +127,8 @@
   static bool has_vis3()                { return (_features & vis3_instructions_m) != 0; }
   static bool has_blk_init()            { return (_features & blk_init_instructions_m) != 0; }
   static bool has_cbcond()              { return (_features & cbcond_instructions_m) != 0; }
+  static bool has_sparc5_instr()        { return (_features & sparc5_instructions_m) != 0; }
+  static bool has_aes()                 { return (_features & aes_instructions_m) != 0; }
 
   static bool supports_compare_and_exchange()
                                         { return has_v9(); }
@@ -133,6 +139,7 @@
 
   static bool is_M_series()             { return is_M_family(_features); }
   static bool is_T4()                   { return is_T_family(_features) && has_cbcond(); }
+  static bool is_T7()                   { return is_T_family(_features) && has_sparc5_instr(); }
 
   // Fujitsu SPARC64
   static bool is_sparc64()              { return (_features & sparc64_family_m) != 0; }
@@ -152,7 +159,7 @@
   static const char* cpu_features()     { return _features_str; }
 
   static intx prefetch_data_size()  {
-    return is_T4() ? 32 : 64;  // default prefetch block size on sparc
+    return is_T4() && !is_T7() ? 32 : 64;  // default prefetch block size on sparc
   }
 
   // Prefetch
--- a/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/bytecodeInterpreter_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/bytecodeInterpreter_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/bytecodeInterpreter_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -94,7 +94,7 @@
 #define LOCALS_ADDR(offset)    ((address)locals[-(offset)])
 #define LOCALS_INT(offset)     ((jint)(locals[-(offset)]))
 #define LOCALS_FLOAT(offset)   (*((jfloat*)&locals[-(offset)]))
-#define LOCALS_OBJECT(offset)  ((oop)locals[-(offset)])
+#define LOCALS_OBJECT(offset)  (cast_to_oop(locals[-(offset)]))
 #define LOCALS_DOUBLE(offset)  (((VMJavaVal64*)&locals[-((offset) + 1)])->d)
 #define LOCALS_LONG(offset)    (((VMJavaVal64*)&locals[-((offset) + 1)])->l)
 #define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/c1_FrameMap_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/c1_FrameMap_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -38,6 +38,7 @@
 #include "nativeInst_x86.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "vmreg_x86.inline.hpp"
 
 
 // These masks are used to provide 128-bit aligned bitmasks to the XMM
@@ -1006,6 +1007,9 @@
     if (UseCompressedOops && !wide) {
       __ movptr(compressed_src, src->as_register());
       __ encode_heap_oop(compressed_src);
+      if (patch_code != lir_patch_none) {
+        info->oop_map()->set_narrowoop(compressed_src->as_VMReg());
+      }
     }
 #endif
   }
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -941,6 +941,8 @@
     case vmIntrinsics::_updateCRC32: {
       LIRItem crc(x->argument_at(0), this);
       LIRItem val(x->argument_at(1), this);
+      // val is destroyed by update_crc32
+      val.set_destroys_register();
       crc.load_item();
       val.load_item();
       __ update_crc32(crc.result(), val.result(), result);
--- a/src/cpu/x86/vm/c1_LinearScan_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/c1_LinearScan_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/c1_globals_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/c1_globals_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/c2_globals_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -94,6 +94,8 @@
 define_pd_global(uintx, CodeCacheMinBlockLength,     4);
 define_pd_global(uintx, CodeCacheMinimumUseSpace,    400*K);
 
+define_pd_global(bool,  TrapBasedRangeChecks,        false); // Not needed on x86.
+
 // Heap related flags
 define_pd_global(uintx,MetaspaceSize,    ScaleForWordSize(16*M));
 
--- a/src/cpu/x86/vm/frame_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/frame_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/frame_x86.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/frame_x86.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -247,6 +247,10 @@
   }
 }
 
+inline oop* frame::interpreter_frame_temp_oop_addr() const {
+  return (oop *)(fp() + interpreter_frame_oop_temp_offset);
+}
+
 #endif /* CC_INTERP */
 
 inline int frame::pd_oop_map_offset_adjustment() const {
--- a/src/cpu/x86/vm/globalDefinitions_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/globalDefinitions_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,12 @@
 
 const int StackAlignmentInBytes  = 16;
 
+// Indicates whether the C calling conventions require that
+// 32-bit integer argument values are properly extended to 64 bits.
+// If set, SharedRuntime::c_calling_convention() must adapt
+// signatures accordingly.
+const bool CCallingConventionRequiresIntsAsLongs = false;
+
 #define SUPPORTS_NATIVE_CX8
 
 #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
--- a/src/cpu/x86/vm/globals_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/globals_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -37,7 +37,8 @@
 define_pd_global(bool, NeedsDeoptSuspend,        false); // only register window machines need this
 
 define_pd_global(bool, ImplicitNullChecks,       true);  // Generate code for implicit null checks
-define_pd_global(bool, UncommonNullCast,         true);  // Uncommon-trap NULLs past to check cast
+define_pd_global(bool, TrapBasedNullChecks,      false); // Not needed on x86.
+define_pd_global(bool, UncommonNullCast,         true);  // Uncommon-trap NULLs passed to check cast
 
 // See 4827828 for this change. There is no globals_core_i486.hpp. I can't
 // assign a different value for C2 without touching a number of files. Use
--- a/src/cpu/x86/vm/interp_masm_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/interp_masm_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -127,7 +127,7 @@
 
       if (MethodData::profile_return()) {
         // We're right after the type profile for the last
-        // argument. tmp is the number of cell left in the
+        // argument. tmp is the number of cells left in the
         // CallTypeData/VirtualCallTypeData to reach its end. Non null
         // if there's a return to profile.
         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
@@ -198,7 +198,7 @@
     // parameters. Collect profiling from last parameter down.
     // mdo start + parameters offset + array length - 1
     addptr(mdp, tmp1);
-    movptr(tmp1, Address(mdp, in_bytes(ArrayData::array_len_offset())));
+    movptr(tmp1, Address(mdp, ArrayData::array_len_offset()));
     decrement(tmp1, TypeStackSlotEntries::per_arg_count());
 
     Label loop;
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -98,217 +98,6 @@
   return Address::make_array(adr);
 }
 
-int MacroAssembler::biased_locking_enter(Register lock_reg,
-                                         Register obj_reg,
-                                         Register swap_reg,
-                                         Register tmp_reg,
-                                         bool swap_reg_contains_mark,
-                                         Label& done,
-                                         Label* slow_case,
-                                         BiasedLockingCounters* counters) {
-  assert(UseBiasedLocking, "why call this otherwise?");
-  assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
-  assert_different_registers(lock_reg, obj_reg, swap_reg);
-
-  if (PrintBiasedLockingStatistics && counters == NULL)
-    counters = BiasedLocking::counters();
-
-  bool need_tmp_reg = false;
-  if (tmp_reg == noreg) {
-    need_tmp_reg = true;
-    tmp_reg = lock_reg;
-  } else {
-    assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
-  }
-  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
-  Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
-  Address klass_addr     (obj_reg, oopDesc::klass_offset_in_bytes());
-  Address saved_mark_addr(lock_reg, 0);
-
-  // Biased locking
-  // See whether the lock is currently biased toward our thread and
-  // whether the epoch is still valid
-  // Note that the runtime guarantees sufficient alignment of JavaThread
-  // pointers to allow age to be placed into low bits
-  // First check to see whether biasing is even enabled for this object
-  Label cas_label;
-  int null_check_offset = -1;
-  if (!swap_reg_contains_mark) {
-    null_check_offset = offset();
-    movl(swap_reg, mark_addr);
-  }
-  if (need_tmp_reg) {
-    push(tmp_reg);
-  }
-  movl(tmp_reg, swap_reg);
-  andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
-  cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
-  if (need_tmp_reg) {
-    pop(tmp_reg);
-  }
-  jcc(Assembler::notEqual, cas_label);
-  // The bias pattern is present in the object's header. Need to check
-  // whether the bias owner and the epoch are both still current.
-  // Note that because there is no current thread register on x86 we
-  // need to store off the mark word we read out of the object to
-  // avoid reloading it and needing to recheck invariants below. This
-  // store is unfortunate but it makes the overall code shorter and
-  // simpler.
-  movl(saved_mark_addr, swap_reg);
-  if (need_tmp_reg) {
-    push(tmp_reg);
-  }
-  get_thread(tmp_reg);
-  xorl(swap_reg, tmp_reg);
-  if (swap_reg_contains_mark) {
-    null_check_offset = offset();
-  }
-  movl(tmp_reg, klass_addr);
-  xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset()));
-  andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
-  if (need_tmp_reg) {
-    pop(tmp_reg);
-  }
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address)counters->biased_lock_entry_count_addr()));
-  }
-  jcc(Assembler::equal, done);
-
-  Label try_revoke_bias;
-  Label try_rebias;
-
-  // At this point we know that the header has the bias pattern and
-  // that we are not the bias owner in the current epoch. We need to
-  // figure out more details about the state of the header in order to
-  // know what operations can be legally performed on the object's
-  // header.
-
-  // If the low three bits in the xor result aren't clear, that means
-  // the prototype header is no longer biased and we have to revoke
-  // the bias on this object.
-  testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
-  jcc(Assembler::notZero, try_revoke_bias);
-
-  // Biasing is still enabled for this data type. See whether the
-  // epoch of the current bias is still valid, meaning that the epoch
-  // bits of the mark word are equal to the epoch bits of the
-  // prototype header. (Note that the prototype header's epoch bits
-  // only change at a safepoint.) If not, attempt to rebias the object
-  // toward the current thread. Note that we must be absolutely sure
-  // that the current epoch is invalid in order to do this because
-  // otherwise the manipulations it performs on the mark word are
-  // illegal.
-  testl(swap_reg, markOopDesc::epoch_mask_in_place);
-  jcc(Assembler::notZero, try_rebias);
-
-  // The epoch of the current bias is still valid but we know nothing
-  // about the owner; it might be set or it might be clear. Try to
-  // acquire the bias of the object using an atomic operation. If this
-  // fails we will go in to the runtime to revoke the object's bias.
-  // Note that we first construct the presumed unbiased header so we
-  // don't accidentally blow away another thread's valid bias.
-  movl(swap_reg, saved_mark_addr);
-  andl(swap_reg,
-       markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
-  if (need_tmp_reg) {
-    push(tmp_reg);
-  }
-  get_thread(tmp_reg);
-  orl(tmp_reg, swap_reg);
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgptr(tmp_reg, Address(obj_reg, 0));
-  if (need_tmp_reg) {
-    pop(tmp_reg);
-  }
-  // If the biasing toward our thread failed, this means that
-  // another thread succeeded in biasing it toward itself and we
-  // need to revoke that bias. The revocation will occur in the
-  // interpreter runtime in the slow case.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
-  }
-  if (slow_case != NULL) {
-    jcc(Assembler::notZero, *slow_case);
-  }
-  jmp(done);
-
-  bind(try_rebias);
-  // At this point we know the epoch has expired, meaning that the
-  // current "bias owner", if any, is actually invalid. Under these
-  // circumstances _only_, we are allowed to use the current header's
-  // value as the comparison value when doing the cas to acquire the
-  // bias in the current epoch. In other words, we allow transfer of
-  // the bias from one thread to another directly in this situation.
-  //
-  // FIXME: due to a lack of registers we currently blow away the age
-  // bits in this situation. Should attempt to preserve them.
-  if (need_tmp_reg) {
-    push(tmp_reg);
-  }
-  get_thread(tmp_reg);
-  movl(swap_reg, klass_addr);
-  orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset()));
-  movl(swap_reg, saved_mark_addr);
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgptr(tmp_reg, Address(obj_reg, 0));
-  if (need_tmp_reg) {
-    pop(tmp_reg);
-  }
-  // If the biasing toward our thread failed, then another thread
-  // succeeded in biasing it toward itself and we need to revoke that
-  // bias. The revocation will occur in the runtime in the slow case.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
-  }
-  if (slow_case != NULL) {
-    jcc(Assembler::notZero, *slow_case);
-  }
-  jmp(done);
-
-  bind(try_revoke_bias);
-  // The prototype mark in the klass doesn't have the bias bit set any
-  // more, indicating that objects of this data type are not supposed
-  // to be biased any more. We are going to try to reset the mark of
-  // this object to the prototype value and fall through to the
-  // CAS-based locking scheme. Note that if our CAS fails, it means
-  // that another thread raced us for the privilege of revoking the
-  // bias of this particular object, so it's okay to continue in the
-  // normal locking code.
-  //
-  // FIXME: due to a lack of registers we currently blow away the age
-  // bits in this situation. Should attempt to preserve them.
-  movl(swap_reg, saved_mark_addr);
-  if (need_tmp_reg) {
-    push(tmp_reg);
-  }
-  movl(tmp_reg, klass_addr);
-  movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset()));
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgptr(tmp_reg, Address(obj_reg, 0));
-  if (need_tmp_reg) {
-    pop(tmp_reg);
-  }
-  // Fall through to the normal CAS-based lock, because no matter what
-  // the result of the above CAS, some thread must have succeeded in
-  // removing the bias bit from the object's header.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
-  }
-
-  bind(cas_label);
-
-  return null_check_offset;
-}
 void MacroAssembler::call_VM_leaf_base(address entry_point,
                                        int number_of_arguments) {
   call(RuntimeAddress(entry_point));
@@ -726,165 +515,6 @@
   return array;
 }
 
-int MacroAssembler::biased_locking_enter(Register lock_reg,
-                                         Register obj_reg,
-                                         Register swap_reg,
-                                         Register tmp_reg,
-                                         bool swap_reg_contains_mark,
-                                         Label& done,
-                                         Label* slow_case,
-                                         BiasedLockingCounters* counters) {
-  assert(UseBiasedLocking, "why call this otherwise?");
-  assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
-  assert(tmp_reg != noreg, "tmp_reg must be supplied");
-  assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
-  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
-  Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
-  Address saved_mark_addr(lock_reg, 0);
-
-  if (PrintBiasedLockingStatistics && counters == NULL)
-    counters = BiasedLocking::counters();
-
-  // Biased locking
-  // See whether the lock is currently biased toward our thread and
-  // whether the epoch is still valid
-  // Note that the runtime guarantees sufficient alignment of JavaThread
-  // pointers to allow age to be placed into low bits
-  // First check to see whether biasing is even enabled for this object
-  Label cas_label;
-  int null_check_offset = -1;
-  if (!swap_reg_contains_mark) {
-    null_check_offset = offset();
-    movq(swap_reg, mark_addr);
-  }
-  movq(tmp_reg, swap_reg);
-  andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
-  cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
-  jcc(Assembler::notEqual, cas_label);
-  // The bias pattern is present in the object's header. Need to check
-  // whether the bias owner and the epoch are both still current.
-  load_prototype_header(tmp_reg, obj_reg);
-  orq(tmp_reg, r15_thread);
-  xorq(tmp_reg, swap_reg);
-  andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
-  }
-  jcc(Assembler::equal, done);
-
-  Label try_revoke_bias;
-  Label try_rebias;
-
-  // At this point we know that the header has the bias pattern and
-  // that we are not the bias owner in the current epoch. We need to
-  // figure out more details about the state of the header in order to
-  // know what operations can be legally performed on the object's
-  // header.
-
-  // If the low three bits in the xor result aren't clear, that means
-  // the prototype header is no longer biased and we have to revoke
-  // the bias on this object.
-  testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
-  jcc(Assembler::notZero, try_revoke_bias);
-
-  // Biasing is still enabled for this data type. See whether the
-  // epoch of the current bias is still valid, meaning that the epoch
-  // bits of the mark word are equal to the epoch bits of the
-  // prototype header. (Note that the prototype header's epoch bits
-  // only change at a safepoint.) If not, attempt to rebias the object
-  // toward the current thread. Note that we must be absolutely sure
-  // that the current epoch is invalid in order to do this because
-  // otherwise the manipulations it performs on the mark word are
-  // illegal.
-  testq(tmp_reg, markOopDesc::epoch_mask_in_place);
-  jcc(Assembler::notZero, try_rebias);
-
-  // The epoch of the current bias is still valid but we know nothing
-  // about the owner; it might be set or it might be clear. Try to
-  // acquire the bias of the object using an atomic operation. If this
-  // fails we will go in to the runtime to revoke the object's bias.
-  // Note that we first construct the presumed unbiased header so we
-  // don't accidentally blow away another thread's valid bias.
-  andq(swap_reg,
-       markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
-  movq(tmp_reg, swap_reg);
-  orq(tmp_reg, r15_thread);
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgq(tmp_reg, Address(obj_reg, 0));
-  // If the biasing toward our thread failed, this means that
-  // another thread succeeded in biasing it toward itself and we
-  // need to revoke that bias. The revocation will occur in the
-  // interpreter runtime in the slow case.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
-  }
-  if (slow_case != NULL) {
-    jcc(Assembler::notZero, *slow_case);
-  }
-  jmp(done);
-
-  bind(try_rebias);
-  // At this point we know the epoch has expired, meaning that the
-  // current "bias owner", if any, is actually invalid. Under these
-  // circumstances _only_, we are allowed to use the current header's
-  // value as the comparison value when doing the cas to acquire the
-  // bias in the current epoch. In other words, we allow transfer of
-  // the bias from one thread to another directly in this situation.
-  //
-  // FIXME: due to a lack of registers we currently blow away the age
-  // bits in this situation. Should attempt to preserve them.
-  load_prototype_header(tmp_reg, obj_reg);
-  orq(tmp_reg, r15_thread);
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgq(tmp_reg, Address(obj_reg, 0));
-  // If the biasing toward our thread failed, then another thread
-  // succeeded in biasing it toward itself and we need to revoke that
-  // bias. The revocation will occur in the runtime in the slow case.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
-  }
-  if (slow_case != NULL) {
-    jcc(Assembler::notZero, *slow_case);
-  }
-  jmp(done);
-
-  bind(try_revoke_bias);
-  // The prototype mark in the klass doesn't have the bias bit set any
-  // more, indicating that objects of this data type are not supposed
-  // to be biased any more. We are going to try to reset the mark of
-  // this object to the prototype value and fall through to the
-  // CAS-based locking scheme. Note that if our CAS fails, it means
-  // that another thread raced us for the privilege of revoking the
-  // bias of this particular object, so it's okay to continue in the
-  // normal locking code.
-  //
-  // FIXME: due to a lack of registers we currently blow away the age
-  // bits in this situation. Should attempt to preserve them.
-  load_prototype_header(tmp_reg, obj_reg);
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgq(tmp_reg, Address(obj_reg, 0));
-  // Fall through to the normal CAS-based lock, because no matter what
-  // the result of the above CAS, some thread must have succeeded in
-  // removing the bias bit from the object's header.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
-  }
-
-  bind(cas_label);
-
-  return null_check_offset;
-}
-
 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
   Label L, E;
 
@@ -1360,9 +990,16 @@
 
 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
   pushf();
-  if (os::is_MP())
-    lock();
-  incrementl(counter_addr);
+  if (reachable(counter_addr)) {
+    if (os::is_MP())
+      lock();
+    incrementl(as_Address(counter_addr));
+  } else {
+    lea(rscratch1, counter_addr);
+    if (os::is_MP())
+      lock();
+    incrementl(Address(rscratch1, 0));
+  }
   popf();
 }
 
@@ -1393,6 +1030,234 @@
   }
 }
 
+int MacroAssembler::biased_locking_enter(Register lock_reg,
+                                         Register obj_reg,
+                                         Register swap_reg,
+                                         Register tmp_reg,
+                                         bool swap_reg_contains_mark,
+                                         Label& done,
+                                         Label* slow_case,
+                                         BiasedLockingCounters* counters) {
+  assert(UseBiasedLocking, "why call this otherwise?");
+  assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
+  LP64_ONLY( assert(tmp_reg != noreg, "tmp_reg must be supplied"); )
+  bool need_tmp_reg = false;
+  if (tmp_reg == noreg) {
+    need_tmp_reg = true;
+    tmp_reg = lock_reg;
+    assert_different_registers(lock_reg, obj_reg, swap_reg);
+  } else {
+    assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
+  }
+  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+  Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
+  Address saved_mark_addr(lock_reg, 0);
+
+  if (PrintBiasedLockingStatistics && counters == NULL) {
+    counters = BiasedLocking::counters();
+  }
+  // Biased locking
+  // See whether the lock is currently biased toward our thread and
+  // whether the epoch is still valid
+  // Note that the runtime guarantees sufficient alignment of JavaThread
+  // pointers to allow age to be placed into low bits
+  // First check to see whether biasing is even enabled for this object
+  Label cas_label;
+  int null_check_offset = -1;
+  if (!swap_reg_contains_mark) {
+    null_check_offset = offset();
+    movptr(swap_reg, mark_addr);
+  }
+  if (need_tmp_reg) {
+    push(tmp_reg);
+  }
+  movptr(tmp_reg, swap_reg);
+  andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
+  cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
+  if (need_tmp_reg) {
+    pop(tmp_reg);
+  }
+  jcc(Assembler::notEqual, cas_label);
+  // The bias pattern is present in the object's header. Need to check
+  // whether the bias owner and the epoch are both still current.
+#ifndef _LP64
+  // Note that because there is no current thread register on x86_32 we
+  // need to store off the mark word we read out of the object to
+  // avoid reloading it and needing to recheck invariants below. This
+  // store is unfortunate but it makes the overall code shorter and
+  // simpler.
+  movptr(saved_mark_addr, swap_reg);
+#endif
+  if (need_tmp_reg) {
+    push(tmp_reg);
+  }
+  if (swap_reg_contains_mark) {
+    null_check_offset = offset();
+  }
+  load_prototype_header(tmp_reg, obj_reg);
+#ifdef _LP64
+  orptr(tmp_reg, r15_thread);
+  xorptr(tmp_reg, swap_reg);
+  Register header_reg = tmp_reg;
+#else
+  xorptr(tmp_reg, swap_reg);
+  get_thread(swap_reg);
+  xorptr(swap_reg, tmp_reg);
+  Register header_reg = swap_reg;
+#endif
+  andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
+  if (need_tmp_reg) {
+    pop(tmp_reg);
+  }
+  if (counters != NULL) {
+    cond_inc32(Assembler::zero,
+               ExternalAddress((address) counters->biased_lock_entry_count_addr()));
+  }
+  jcc(Assembler::equal, done);
+
+  Label try_revoke_bias;
+  Label try_rebias;
+
+  // At this point we know that the header has the bias pattern and
+  // that we are not the bias owner in the current epoch. We need to
+  // figure out more details about the state of the header in order to
+  // know what operations can be legally performed on the object's
+  // header.
+
+  // If the low three bits in the xor result aren't clear, that means
+  // the prototype header is no longer biased and we have to revoke
+  // the bias on this object.
+  testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
+  jccb(Assembler::notZero, try_revoke_bias);
+
+  // Biasing is still enabled for this data type. See whether the
+  // epoch of the current bias is still valid, meaning that the epoch
+  // bits of the mark word are equal to the epoch bits of the
+  // prototype header. (Note that the prototype header's epoch bits
+  // only change at a safepoint.) If not, attempt to rebias the object
+  // toward the current thread. Note that we must be absolutely sure
+  // that the current epoch is invalid in order to do this because
+  // otherwise the manipulations it performs on the mark word are
+  // illegal.
+  testptr(header_reg, markOopDesc::epoch_mask_in_place);
+  jccb(Assembler::notZero, try_rebias);
+
+  // The epoch of the current bias is still valid but we know nothing
+  // about the owner; it might be set or it might be clear. Try to
+  // acquire the bias of the object using an atomic operation. If this
+  // fails we will go in to the runtime to revoke the object's bias.
+  // Note that we first construct the presumed unbiased header so we
+  // don't accidentally blow away another thread's valid bias.
+  NOT_LP64( movptr(swap_reg, saved_mark_addr); )
+  andptr(swap_reg,
+         markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+  if (need_tmp_reg) {
+    push(tmp_reg);
+  }
+#ifdef _LP64
+  movptr(tmp_reg, swap_reg);
+  orptr(tmp_reg, r15_thread);
+#else
+  get_thread(tmp_reg);
+  orptr(tmp_reg, swap_reg);
+#endif
+  if (os::is_MP()) {
+    lock();
+  }
+  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
+  if (need_tmp_reg) {
+    pop(tmp_reg);
+  }
+  // If the biasing toward our thread failed, this means that
+  // another thread succeeded in biasing it toward itself and we
+  // need to revoke that bias. The revocation will occur in the
+  // interpreter runtime in the slow case.
+  if (counters != NULL) {
+    cond_inc32(Assembler::zero,
+               ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
+  }
+  if (slow_case != NULL) {
+    jcc(Assembler::notZero, *slow_case);
+  }
+  jmp(done);
+
+  bind(try_rebias);
+  // At this point we know the epoch has expired, meaning that the
+  // current "bias owner", if any, is actually invalid. Under these
+  // circumstances _only_, we are allowed to use the current header's
+  // value as the comparison value when doing the cas to acquire the
+  // bias in the current epoch. In other words, we allow transfer of
+  // the bias from one thread to another directly in this situation.
+  //
+  // FIXME: due to a lack of registers we currently blow away the age
+  // bits in this situation. Should attempt to preserve them.
+  if (need_tmp_reg) {
+    push(tmp_reg);
+  }
+  load_prototype_header(tmp_reg, obj_reg);
+#ifdef _LP64
+  orptr(tmp_reg, r15_thread);
+#else
+  get_thread(swap_reg);
+  orptr(tmp_reg, swap_reg);
+  movptr(swap_reg, saved_mark_addr);
+#endif
+  if (os::is_MP()) {
+    lock();
+  }
+  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
+  if (need_tmp_reg) {
+    pop(tmp_reg);
+  }
+  // If the biasing toward our thread failed, then another thread
+  // succeeded in biasing it toward itself and we need to revoke that
+  // bias. The revocation will occur in the runtime in the slow case.
+  if (counters != NULL) {
+    cond_inc32(Assembler::zero,
+               ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
+  }
+  if (slow_case != NULL) {
+    jcc(Assembler::notZero, *slow_case);
+  }
+  jmp(done);
+
+  bind(try_revoke_bias);
+  // The prototype mark in the klass doesn't have the bias bit set any
+  // more, indicating that objects of this data type are not supposed
+  // to be biased any more. We are going to try to reset the mark of
+  // this object to the prototype value and fall through to the
+  // CAS-based locking scheme. Note that if our CAS fails, it means
+  // that another thread raced us for the privilege of revoking the
+  // bias of this particular object, so it's okay to continue in the
+  // normal locking code.
+  //
+  // FIXME: due to a lack of registers we currently blow away the age
+  // bits in this situation. Should attempt to preserve them.
+  NOT_LP64( movptr(swap_reg, saved_mark_addr); )
+  if (need_tmp_reg) {
+    push(tmp_reg);
+  }
+  load_prototype_header(tmp_reg, obj_reg);
+  if (os::is_MP()) {
+    lock();
+  }
+  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
+  if (need_tmp_reg) {
+    pop(tmp_reg);
+  }
+  // Fall through to the normal CAS-based lock, because no matter what
+  // the result of the above CAS, some thread must have succeeded in
+  // removing the bias bit from the object's header.
+  if (counters != NULL) {
+    cond_inc32(Assembler::zero,
+               ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
+  }
+
+  bind(cas_label);
+
+  return null_check_offset;
+}
+
 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
   assert(UseBiasedLocking, "why call this otherwise?");
 
@@ -1408,6 +1273,620 @@
   jcc(Assembler::equal, done);
 }
 
+#ifdef COMPILER2
+// Fast_Lock and Fast_Unlock used by C2
+
+// Because the transitions from emitted code to the runtime
+// monitorenter/exit helper stubs are so slow it's critical that
+// we inline both the stack-locking fast-path and the inflated fast path.
+//
+// See also: cmpFastLock and cmpFastUnlock.
+//
+// What follows is a specialized inline transliteration of the code
+// in slow_enter() and slow_exit().  If we're concerned about I$ bloat
+// another option would be to emit TrySlowEnter and TrySlowExit methods
+// at startup-time.  These methods would accept arguments as
+// (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
+// indications in the icc.ZFlag.  Fast_Lock and Fast_Unlock would simply
+// marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
+// In practice, however, the # of lock sites is bounded and is usually small.
+// Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
+// if the processor uses simple bimodal branch predictors keyed by EIP
+// Since the helper routines would be called from multiple synchronization
+// sites.
+//
+// An even better approach would be write "MonitorEnter()" and "MonitorExit()"
+// in java - using j.u.c and unsafe - and just bind the lock and unlock sites
+// to those specialized methods.  That'd give us a mostly platform-independent
+// implementation that the JITs could optimize and inline at their pleasure.
+// Done correctly, the only time we'd need to cross to native could would be
+// to park() or unpark() threads.  We'd also need a few more unsafe operators
+// to (a) prevent compiler-JIT reordering of non-volatile accesses, and
+// (b) explicit barriers or fence operations.
+//
+// TODO:
+//
+// *  Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
+//    This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
+//    Given TLAB allocation, Self is usually manifested in a register, so passing it into
+//    the lock operators would typically be faster than reifying Self.
+//
+// *  Ideally I'd define the primitives as:
+//       fast_lock   (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
+//       fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
+//    Unfortunately ADLC bugs prevent us from expressing the ideal form.
+//    Instead, we're stuck with a rather awkward and brittle register assignments below.
+//    Furthermore the register assignments are overconstrained, possibly resulting in
+//    sub-optimal code near the synchronization site.
+//
+// *  Eliminate the sp-proximity tests and just use "== Self" tests instead.
+//    Alternately, use a better sp-proximity test.
+//
+// *  Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
+//    Either one is sufficient to uniquely identify a thread.
+//    TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
+//
+// *  Intrinsify notify() and notifyAll() for the common cases where the
+//    object is locked by the calling thread but the waitlist is empty.
+//    avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
+//
+// *  use jccb and jmpb instead of jcc and jmp to improve code density.
+//    But beware of excessive branch density on AMD Opterons.
+//
+// *  Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
+//    or failure of the fast-path.  If the fast-path fails then we pass
+//    control to the slow-path, typically in C.  In Fast_Lock and
+//    Fast_Unlock we often branch to DONE_LABEL, just to find that C2
+//    will emit a conditional branch immediately after the node.
+//    So we have branches to branches and lots of ICC.ZF games.
+//    Instead, it might be better to have C2 pass a "FailureLabel"
+//    into Fast_Lock and Fast_Unlock.  In the case of success, control
+//    will drop through the node.  ICC.ZF is undefined at exit.
+//    In the case of failure, the node will branch directly to the
+//    FailureLabel
+
+
+// obj: object to lock
+// box: on-stack box address (displaced header location) - KILLED
+// rax,: tmp -- KILLED
+// scr: tmp -- KILLED
+void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, Register scrReg, BiasedLockingCounters* counters) {
+  // Ensure the register assignents are disjoint
+  guarantee (objReg != boxReg, "");
+  guarantee (objReg != tmpReg, "");
+  guarantee (objReg != scrReg, "");
+  guarantee (boxReg != tmpReg, "");
+  guarantee (boxReg != scrReg, "");
+  guarantee (tmpReg == rax, "");
+
+  if (counters != NULL) {
+    atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()));
+  }
+  if (EmitSync & 1) {
+      // set box->dhw = unused_mark (3)
+      // Force all sync thru slow-path: slow_enter() and slow_exit()
+      movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+      cmpptr (rsp, (int32_t)NULL_WORD);
+  } else
+  if (EmitSync & 2) {
+      Label DONE_LABEL ;
+      if (UseBiasedLocking) {
+         // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
+         biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
+      }
+
+      movptr(tmpReg, Address(objReg, 0));           // fetch markword
+      orptr (tmpReg, 0x1);
+      movptr(Address(boxReg, 0), tmpReg);           // Anticipate successful CAS
+      if (os::is_MP()) {
+        lock();
+      }
+      cmpxchgptr(boxReg, Address(objReg, 0));       // Updates tmpReg
+      jccb(Assembler::equal, DONE_LABEL);
+      // Recursive locking
+      subptr(tmpReg, rsp);
+      andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
+      movptr(Address(boxReg, 0), tmpReg);
+      bind(DONE_LABEL);
+  } else {
+    // Possible cases that we'll encounter in fast_lock
+    // ------------------------------------------------
+    // * Inflated
+    //    -- unlocked
+    //    -- Locked
+    //       = by self
+    //       = by other
+    // * biased
+    //    -- by Self
+    //    -- by other
+    // * neutral
+    // * stack-locked
+    //    -- by self
+    //       = sp-proximity test hits
+    //       = sp-proximity test generates false-negative
+    //    -- by other
+    //
+
+    Label IsInflated, DONE_LABEL;
+
+    // it's stack-locked, biased or neutral
+    // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
+    // order to reduce the number of conditional branches in the most common cases.
+    // Beware -- there's a subtle invariant that fetch of the markword
+    // at [FETCH], below, will never observe a biased encoding (*101b).
+    // If this invariant is not held we risk exclusion (safety) failure.
+    if (UseBiasedLocking && !UseOptoBiasInlining) {
+      biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
+    }
+
+    movptr(tmpReg, Address(objReg, 0));          // [FETCH]
+    testl (tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
+    jccb  (Assembler::notZero, IsInflated);
+
+    // Attempt stack-locking ...
+    orptr (tmpReg, 0x1);
+    movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
+    if (os::is_MP()) {
+      lock();
+    }
+    cmpxchgptr(boxReg, Address(objReg, 0));      // Updates tmpReg
+    if (counters != NULL) {
+      cond_inc32(Assembler::equal,
+                 ExternalAddress((address)counters->fast_path_entry_count_addr()));
+    }
+    jccb(Assembler::equal, DONE_LABEL);
+
+    // Recursive locking
+    subptr(tmpReg, rsp);
+    andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
+    movptr(Address(boxReg, 0), tmpReg);
+    if (counters != NULL) {
+      cond_inc32(Assembler::equal,
+                 ExternalAddress((address)counters->fast_path_entry_count_addr()));
+    }
+    jmpb(DONE_LABEL);
+
+    bind(IsInflated);
+#ifndef _LP64
+    // The object is inflated.
+    //
+    // TODO-FIXME: eliminate the ugly use of manifest constants:
+    //   Use markOopDesc::monitor_value instead of "2".
+    //   use markOop::unused_mark() instead of "3".
+    // The tmpReg value is an objectMonitor reference ORed with
+    // markOopDesc::monitor_value (2).   We can either convert tmpReg to an
+    // objectmonitor pointer by masking off the "2" bit or we can just
+    // use tmpReg as an objectmonitor pointer but bias the objectmonitor
+    // field offsets with "-2" to compensate for and annul the low-order tag bit.
+    //
+    // I use the latter as it avoids AGI stalls.
+    // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]"
+    // instead of "mov r, [tmpReg+OFFSETOF(Owner)]".
+    //
+    #define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2)
+
+    // boxReg refers to the on-stack BasicLock in the current frame.
+    // We'd like to write:
+    //   set box->_displaced_header = markOop::unused_mark().  Any non-0 value suffices.
+    // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
+    // additional latency as we have another ST in the store buffer that must drain.
+
+    if (EmitSync & 8192) {
+       movptr(Address(boxReg, 0), 3);            // results in ST-before-CAS penalty
+       get_thread (scrReg);
+       movptr(boxReg, tmpReg);                    // consider: LEA box, [tmp-2]
+       movptr(tmpReg, NULL_WORD);                 // consider: xor vs mov
+       if (os::is_MP()) {
+         lock();
+       }
+       cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    } else
+    if ((EmitSync & 128) == 0) {                      // avoid ST-before-CAS
+       movptr(scrReg, boxReg);
+       movptr(boxReg, tmpReg);                   // consider: LEA box, [tmp-2]
+
+       // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
+       if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
+          // prefetchw [eax + Offset(_owner)-2]
+          prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       }
+
+       if ((EmitSync & 64) == 0) {
+         // Optimistic form: consider XORL tmpReg,tmpReg
+         movptr(tmpReg, NULL_WORD);
+       } else {
+         // Can suffer RTS->RTO upgrades on shared or cold $ lines
+         // Test-And-CAS instead of CAS
+         movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));   // rax, = m->_owner
+         testptr(tmpReg, tmpReg);                   // Locked ?
+         jccb  (Assembler::notZero, DONE_LABEL);
+       }
+
+       // Appears unlocked - try to swing _owner from null to non-null.
+       // Ideally, I'd manifest "Self" with get_thread and then attempt
+       // to CAS the register containing Self into m->Owner.
+       // But we don't have enough registers, so instead we can either try to CAS
+       // rsp or the address of the box (in scr) into &m->owner.  If the CAS succeeds
+       // we later store "Self" into m->Owner.  Transiently storing a stack address
+       // (rsp or the address of the box) into  m->owner is harmless.
+       // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
+       if (os::is_MP()) {
+         lock();
+       }
+       cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       movptr(Address(scrReg, 0), 3);          // box->_displaced_header = 3
+       jccb  (Assembler::notZero, DONE_LABEL);
+       get_thread (scrReg);                    // beware: clobbers ICCs
+       movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg);
+       xorptr(boxReg, boxReg);                 // set icc.ZFlag = 1 to indicate success
+
+       // If the CAS fails we can either retry or pass control to the slow-path.
+       // We use the latter tactic.
+       // Pass the CAS result in the icc.ZFlag into DONE_LABEL
+       // If the CAS was successful ...
+       //   Self has acquired the lock
+       //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
+       // Intentional fall-through into DONE_LABEL ...
+    } else {
+       movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark()));  // results in ST-before-CAS penalty
+       movptr(boxReg, tmpReg);
+
+       // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
+       if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
+          // prefetchw [eax + Offset(_owner)-2]
+          prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       }
+
+       if ((EmitSync & 64) == 0) {
+         // Optimistic form
+         xorptr  (tmpReg, tmpReg);
+       } else {
+         // Can suffer RTS->RTO upgrades on shared or cold $ lines
+         movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));   // rax, = m->_owner
+         testptr(tmpReg, tmpReg);                   // Locked ?
+         jccb  (Assembler::notZero, DONE_LABEL);
+       }
+
+       // Appears unlocked - try to swing _owner from null to non-null.
+       // Use either "Self" (in scr) or rsp as thread identity in _owner.
+       // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
+       get_thread (scrReg);
+       if (os::is_MP()) {
+         lock();
+       }
+       cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+
+       // If the CAS fails we can either retry or pass control to the slow-path.
+       // We use the latter tactic.
+       // Pass the CAS result in the icc.ZFlag into DONE_LABEL
+       // If the CAS was successful ...
+       //   Self has acquired the lock
+       //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
+       // Intentional fall-through into DONE_LABEL ...
+    }
+#else // _LP64
+    // It's inflated
+
+    // TODO: someday avoid the ST-before-CAS penalty by
+    // relocating (deferring) the following ST.
+    // We should also think about trying a CAS without having
+    // fetched _owner.  If the CAS is successful we may
+    // avoid an RTO->RTS upgrade on the $line.
+
+    // Without cast to int32_t a movptr will destroy r10 which is typically obj
+    movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+
+    mov    (boxReg, tmpReg);
+    movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    testptr(tmpReg, tmpReg);
+    jccb   (Assembler::notZero, DONE_LABEL);
+
+    // It's inflated and appears unlocked
+    if (os::is_MP()) {
+      lock();
+    }
+    cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    // Intentional fall-through into DONE_LABEL ...
+
+#endif
+
+    // DONE_LABEL is a hot target - we'd really like to place it at the
+    // start of cache line by padding with NOPs.
+    // See the AMD and Intel software optimization manuals for the
+    // most efficient "long" NOP encodings.
+    // Unfortunately none of our alignment mechanisms suffice.
+    bind(DONE_LABEL);
+
+    // At DONE_LABEL the icc ZFlag is set as follows ...
+    // Fast_Unlock uses the same protocol.
+    // ZFlag == 1 -> Success
+    // ZFlag == 0 -> Failure - force control through the slow-path
+  }
+}
+
+// obj: object to unlock
+// box: box address (displaced header location), killed.  Must be EAX.
+// tmp: killed, cannot be obj nor box.
+//
+// Some commentary on balanced locking:
+//
+// Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
+// Methods that don't have provably balanced locking are forced to run in the
+// interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
+// The interpreter provides two properties:
+// I1:  At return-time the interpreter automatically and quietly unlocks any
+//      objects acquired the current activation (frame).  Recall that the
+//      interpreter maintains an on-stack list of locks currently held by
+//      a frame.
+// I2:  If a method attempts to unlock an object that is not held by the
+//      the frame the interpreter throws IMSX.
+//
+// Lets say A(), which has provably balanced locking, acquires O and then calls B().
+// B() doesn't have provably balanced locking so it runs in the interpreter.
+// Control returns to A() and A() unlocks O.  By I1 and I2, above, we know that O
+// is still locked by A().
+//
+// The only other source of unbalanced locking would be JNI.  The "Java Native Interface:
+// Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
+// should not be unlocked by "normal" java-level locking and vice-versa.  The specification
+// doesn't specify what will occur if a program engages in such mixed-mode locking, however.
+
+void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg) {
+  guarantee (objReg != boxReg, "");
+  guarantee (objReg != tmpReg, "");
+  guarantee (boxReg != tmpReg, "");
+  guarantee (boxReg == rax, "");
+
+  if (EmitSync & 4) {
+    // Disable - inhibit all inlining.  Force control through the slow-path
+    cmpptr (rsp, 0);
+  } else
+  if (EmitSync & 8) {
+    Label DONE_LABEL;
+    if (UseBiasedLocking) {
+       biased_locking_exit(objReg, tmpReg, DONE_LABEL);
+    }
+    // Classic stack-locking code ...
+    // Check whether the displaced header is 0
+    //(=> recursive unlock)
+    movptr(tmpReg, Address(boxReg, 0));
+    testptr(tmpReg, tmpReg);
+    jccb(Assembler::zero, DONE_LABEL);
+    // If not recursive lock, reset the header to displaced header
+    if (os::is_MP()) {
+      lock();
+    }
+    cmpxchgptr(tmpReg, Address(objReg, 0));   // Uses RAX which is box
+    bind(DONE_LABEL);
+  } else {
+    Label DONE_LABEL, Stacked, CheckSucc;
+
+    // Critically, the biased locking test must have precedence over
+    // and appear before the (box->dhw == 0) recursive stack-lock test.
+    if (UseBiasedLocking && !UseOptoBiasInlining) {
+       biased_locking_exit(objReg, tmpReg, DONE_LABEL);
+    }
+
+    cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
+    movptr(tmpReg, Address(objReg, 0));             // Examine the object's markword
+    jccb  (Assembler::zero, DONE_LABEL);            // 0 indicates recursive stack-lock
+
+    testptr(tmpReg, 0x02);                          // Inflated?
+    jccb  (Assembler::zero, Stacked);
+
+    // It's inflated.
+    // Despite our balanced locking property we still check that m->_owner == Self
+    // as java routines or native JNI code called by this thread might
+    // have released the lock.
+    // Refer to the comments in synchronizer.cpp for how we might encode extra
+    // state in _succ so we can avoid fetching EntryList|cxq.
+    //
+    // I'd like to add more cases in fast_lock() and fast_unlock() --
+    // such as recursive enter and exit -- but we have to be wary of
+    // I$ bloat, T$ effects and BP$ effects.
+    //
+    // If there's no contention try a 1-0 exit.  That is, exit without
+    // a costly MEMBAR or CAS.  See synchronizer.cpp for details on how
+    // we detect and recover from the race that the 1-0 exit admits.
+    //
+    // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
+    // before it STs null into _owner, releasing the lock.  Updates
+    // to data protected by the critical section must be visible before
+    // we drop the lock (and thus before any other thread could acquire
+    // the lock and observe the fields protected by the lock).
+    // IA32's memory-model is SPO, so STs are ordered with respect to
+    // each other and there's no need for an explicit barrier (fence).
+    // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+#ifndef _LP64
+    get_thread (boxReg);
+    if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
+      // prefetchw [ebx + Offset(_owner)-2]
+      prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    }
+
+    // Note that we could employ various encoding schemes to reduce
+    // the number of loads below (currently 4) to just 2 or 3.
+    // Refer to the comments in synchronizer.cpp.
+    // In practice the chain of fetches doesn't seem to impact performance, however.
+    if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
+       // Attempt to reduce branch density - AMD's branch predictor.
+       xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
+       orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
+       orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
+       jccb  (Assembler::notZero, DONE_LABEL);
+       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
+       jmpb  (DONE_LABEL);
+    } else {
+       xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
+       jccb  (Assembler::notZero, DONE_LABEL);
+       movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
+       orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
+       jccb  (Assembler::notZero, CheckSucc);
+       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
+       jmpb  (DONE_LABEL);
+    }
+
+    // The Following code fragment (EmitSync & 65536) improves the performance of
+    // contended applications and contended synchronization microbenchmarks.
+    // Unfortunately the emission of the code - even though not executed - causes regressions
+    // in scimark and jetstream, evidently because of $ effects.  Replacing the code
+    // with an equal number of never-executed NOPs results in the same regression.
+    // We leave it off by default.
+
+    if ((EmitSync & 65536) != 0) {
+       Label LSuccess, LGoSlowPath ;
+
+       bind  (CheckSucc);
+
+       // Optional pre-test ... it's safe to elide this
+       if ((EmitSync & 16) == 0) {
+          cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
+          jccb  (Assembler::zero, LGoSlowPath);
+       }
+
+       // We have a classic Dekker-style idiom:
+       //    ST m->_owner = 0 ; MEMBAR; LD m->_succ
+       // There are a number of ways to implement the barrier:
+       // (1) lock:andl &m->_owner, 0
+       //     is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
+       //     LOCK: ANDL [ebx+Offset(_Owner)-2], 0
+       //     Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
+       // (2) If supported, an explicit MFENCE is appealing.
+       //     In older IA32 processors MFENCE is slower than lock:add or xchg
+       //     particularly if the write-buffer is full as might be the case if
+       //     if stores closely precede the fence or fence-equivalent instruction.
+       //     In more modern implementations MFENCE appears faster, however.
+       // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
+       //     The $lines underlying the top-of-stack should be in M-state.
+       //     The locked add instruction is serializing, of course.
+       // (4) Use xchg, which is serializing
+       //     mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
+       // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
+       //     The integer condition codes will tell us if succ was 0.
+       //     Since _succ and _owner should reside in the same $line and
+       //     we just stored into _owner, it's likely that the $line
+       //     remains in M-state for the lock:orl.
+       //
+       // We currently use (3), although it's likely that switching to (2)
+       // is correct for the future.
+
+       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
+       if (os::is_MP()) {
+          if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
+            mfence();
+          } else {
+            lock (); addptr(Address(rsp, 0), 0);
+          }
+       }
+       // Ratify _succ remains non-null
+       cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0);
+       jccb  (Assembler::notZero, LSuccess);
+
+       xorptr(boxReg, boxReg);                  // box is really EAX
+       if (os::is_MP()) { lock(); }
+       cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       jccb  (Assembler::notEqual, LSuccess);
+       // Since we're low on registers we installed rsp as a placeholding in _owner.
+       // Now install Self over rsp.  This is safe as we're transitioning from
+       // non-null to non=null
+       get_thread (boxReg);
+       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg);
+       // Intentional fall-through into LGoSlowPath ...
+
+       bind  (LGoSlowPath);
+       orptr(boxReg, 1);                      // set ICC.ZF=0 to indicate failure
+       jmpb  (DONE_LABEL);
+
+       bind  (LSuccess);
+       xorptr(boxReg, boxReg);                 // set ICC.ZF=1 to indicate success
+       jmpb  (DONE_LABEL);
+    }
+
+    bind (Stacked);
+    // It's not inflated and it's not recursively stack-locked and it's not biased.
+    // It must be stack-locked.
+    // Try to reset the header to displaced header.
+    // The "box" value on the stack is stable, so we can reload
+    // and be assured we observe the same value as above.
+    movptr(tmpReg, Address(boxReg, 0));
+    if (os::is_MP()) {
+      lock();
+    }
+    cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
+    // Intention fall-thru into DONE_LABEL
+
+    // DONE_LABEL is a hot target - we'd really like to place it at the
+    // start of cache line by padding with NOPs.
+    // See the AMD and Intel software optimization manuals for the
+    // most efficient "long" NOP encodings.
+    // Unfortunately none of our alignment mechanisms suffice.
+    if ((EmitSync & 65536) == 0) {
+       bind (CheckSucc);
+    }
+#else // _LP64
+    // It's inflated
+    movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    xorptr(boxReg, r15_thread);
+    orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
+    jccb  (Assembler::notZero, DONE_LABEL);
+    movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
+    orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
+    jccb  (Assembler::notZero, CheckSucc);
+    movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
+    jmpb  (DONE_LABEL);
+
+    if ((EmitSync & 65536) == 0) {
+      Label LSuccess, LGoSlowPath ;
+      bind  (CheckSucc);
+      cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
+      jccb  (Assembler::zero, LGoSlowPath);
+
+      // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
+      // the explicit ST;MEMBAR combination, but masm doesn't currently support
+      // "ANDQ M,IMM".  Don't use MFENCE here.  lock:add to TOS, xchg, etc
+      // are all faster when the write buffer is populated.
+      movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
+      if (os::is_MP()) {
+         lock (); addl (Address(rsp, 0), 0);
+      }
+      cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
+      jccb  (Assembler::notZero, LSuccess);
+
+      movptr (boxReg, (int32_t)NULL_WORD);                   // box is really EAX
+      if (os::is_MP()) { lock(); }
+      cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+      jccb  (Assembler::notEqual, LSuccess);
+      // Intentional fall-through into slow-path
+
+      bind  (LGoSlowPath);
+      orl   (boxReg, 1);                      // set ICC.ZF=0 to indicate failure
+      jmpb  (DONE_LABEL);
+
+      bind  (LSuccess);
+      testl (boxReg, 0);                      // set ICC.ZF=1 to indicate success
+      jmpb  (DONE_LABEL);
+    }
+
+    bind  (Stacked);
+    movptr(tmpReg, Address (boxReg, 0));      // re-fetch
+    if (os::is_MP()) { lock(); }
+    cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
+
+    if (EmitSync & 65536) {
+       bind (CheckSucc);
+    }
+#endif
+    bind(DONE_LABEL);
+    // Avoid branch to branch on AMD processors
+    if (EmitSync & 32768) {
+       nop();
+    }
+  }
+}
+#endif // COMPILER2
+
 void MacroAssembler::c2bool(Register x) {
   // implements x == 0 ? 0 : 1
   // note: must only look at least-significant byte of x
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -651,7 +651,12 @@
                            Label& done, Label* slow_case = NULL,
                            BiasedLockingCounters* counters = NULL);
   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
-
+#ifdef COMPILER2
+  // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
+  // See full desription in macroAssembler_x86.cpp.
+  void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
+  void fast_unlock(Register obj, Register box, Register tmp);
+#endif
 
   Condition negate_condition(Condition cond);
 
--- a/src/cpu/x86/vm/register_definitions_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/register_definitions_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -977,7 +977,9 @@
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,
+                                         VMRegPair *regs2,
                                          int total_args_passed) {
+  assert(regs2 == NULL, "not needed on x86");
 // We return the amount of VMRegImpl stack slots we need to reserve for all
 // the arguments NOT counting out_preserve_stack_slots.
 
@@ -1624,7 +1626,7 @@
   // Now figure out where the args must be stored and how much stack space
   // they require.
   int out_arg_slots;
-  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
+  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
 
   // Compute framesize for the wrapper.  We need to handlize all oops in
   // registers a max of 2 on x86.
@@ -2495,7 +2497,7 @@
   // they require (neglecting out_preserve_stack_slots).
 
   int out_arg_slots;
-  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
+  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
 
   // Calculate the total number of stack slots we will need.
 
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -903,7 +903,9 @@
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,
+                                         VMRegPair *regs2,
                                          int total_args_passed) {
+  assert(regs2 == NULL, "not needed on x86");
 // We return the amount of VMRegImpl stack slots we need to reserve for all
 // the arguments NOT counting out_preserve_stack_slots.
 
@@ -1895,7 +1897,7 @@
   // Now figure out where the args must be stored and how much stack space
   // they require.
   int out_arg_slots;
-  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
+  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
 
   // Compute framesize for the wrapper.  We need to handlize all oops in
   // incoming registers
@@ -2799,7 +2801,7 @@
   // the 1st six register arguments). It's weird see int_stk_helper.
 
   int out_arg_slots;
-  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
+  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
 
   // Calculate the total number of stack slots we will need.
 
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -2403,6 +2403,9 @@
   //   c_rarg3   - r vector byte array address
   //   c_rarg4   - input length
   //
+  // Output:
+  //   rax       - input length
+  //
   address generate_cipherBlockChaining_encryptAESCrypt() {
     assert(UseAES, "need AES instructions and misaligned SSE support");
     __ align(CodeEntryAlignment);
@@ -2483,7 +2486,7 @@
     __ movdqu(Address(rvec, 0), xmm_result);     // final value of r stored in rvec of CipherBlockChaining object
 
     handleSOERegisters(false /*restoring*/);
-    __ movl(rax, 0);                             // return 0 (why?)
+    __ movptr(rax, len_param); // return length
     __ leave();                                  // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
 
@@ -2557,6 +2560,9 @@
   //   c_rarg3   - r vector byte array address
   //   c_rarg4   - input length
   //
+  // Output:
+  //   rax       - input length
+  //
 
   address generate_cipherBlockChaining_decryptAESCrypt() {
     assert(UseAES, "need AES instructions and misaligned SSE support");
@@ -2650,7 +2656,7 @@
     __ movptr(rvec , rvec_param);                                     // restore this since used in loop
     __ movdqu(Address(rvec, 0), xmm_temp);                            // final value of r stored in rvec of CipherBlockChaining object
     handleSOERegisters(false /*restoring*/);
-    __ movl(rax, 0);                                                  // return 0 (why?)
+    __ movptr(rax, len_param); // return length
     __ leave();                                                       // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
 
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -3217,6 +3217,9 @@
   //   c_rarg3   - r vector byte array address
   //   c_rarg4   - input length
   //
+  // Output:
+  //   rax       - input length
+  //
   address generate_cipherBlockChaining_encryptAESCrypt() {
     assert(UseAES, "need AES instructions and misaligned SSE support");
     __ align(CodeEntryAlignment);
@@ -3232,7 +3235,7 @@
 #ifndef _WIN64
     const Register len_reg     = c_rarg4;  // src len (must be multiple of blocksize 16)
 #else
-    const Address  len_mem(rsp, 6 * wordSize);  // length is on stack on Win64
+    const Address  len_mem(rbp, 6 * wordSize);  // length is on stack on Win64
     const Register len_reg     = r10;      // pick the first volatile windows register
 #endif
     const Register pos         = rax;
@@ -3259,6 +3262,8 @@
     for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
       __ movdqu(xmm_save(i), as_XMMRegister(i));
     }
+#else
+    __ push(len_reg); // Save
 #endif
 
     const XMMRegister xmm_key_shuf_mask = xmm_temp;  // used temporarily to swap key bytes up front
@@ -3301,8 +3306,10 @@
     for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
       __ movdqu(as_XMMRegister(i), xmm_save(i));
     }
+    __ movl(rax, len_mem);
+#else
+    __ pop(rax); // return length
 #endif
-    __ movl(rax, 0); // return 0 (why?)
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
 
@@ -3409,6 +3416,9 @@
   //   c_rarg3   - r vector byte array address
   //   c_rarg4   - input length
   //
+  // Output:
+  //   rax       - input length
+  //
 
   address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
     assert(UseAES, "need AES instructions and misaligned SSE support");
@@ -3427,7 +3437,7 @@
 #ifndef _WIN64
     const Register len_reg     = c_rarg4;  // src len (must be multiple of blocksize 16)
 #else
-    const Address  len_mem(rsp, 6 * wordSize);  // length is on stack on Win64
+    const Address  len_mem(rbp, 6 * wordSize);  // length is on stack on Win64
     const Register len_reg     = r10;      // pick the first volatile windows register
 #endif
     const Register pos         = rax;
@@ -3448,7 +3458,10 @@
     for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
       __ movdqu(xmm_save(i), as_XMMRegister(i));
     }
+#else
+    __ push(len_reg); // Save
 #endif
+
     // the java expanded key ordering is rotated one position from what we want
     // so we start from 0x10 here and hit 0x00 last
     const XMMRegister xmm_key_shuf_mask = xmm1;  // used temporarily to swap key bytes up front
@@ -3554,8 +3567,10 @@
     for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
       __ movdqu(as_XMMRegister(i), xmm_save(i));
     }
+    __ movl(rax, len_mem);
+#else
+    __ pop(rax); // return length
 #endif
-    __ movl(rax, 0); // return 0 (why?)
     __ leave(); // required for proper stackwalking of RuntimeStub frame
     __ ret(0);
 
--- a/src/cpu/x86/vm/templateInterpreter_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/templateInterpreter_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -107,10 +107,6 @@
   return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
 }
 
-static inline Address at_tos_p3() {
-  return Address(rsp,  Interpreter::expr_offset_in_bytes(3));
-}
-
 // Condition conversion
 static Assembler::Condition j_not(TemplateTable::Condition cc) {
   switch (cc) {
--- a/src/cpu/x86/vm/vmStructs_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/vmStructs_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/cpu/x86/vm/x86.ad	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/x86.ad	Wed Mar 12 13:30:08 2014 +0100
@@ -581,6 +581,12 @@
   return !AlignVector; // can be changed by flag
 }
 
+// x86 AES instructions are compatible with SunJCE expanded
+// keys, hence we do not need to pass the original key to stubs
+const bool Matcher::pass_original_key_for_aes() {
+  return false;
+}
+
 // Helper methods for MachSpillCopyNode::implementation().
 static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
                           int src_hi, int dst_hi, uint ireg, outputStream* st) {
--- a/src/cpu/x86/vm/x86_32.ad	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/x86_32.ad	Wed Mar 12 13:30:08 2014 +0100
@@ -487,6 +487,11 @@
   return 0;  // absolute addressing, no offset
 }
 
+bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
+void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
+  ShouldNotReachHere();
+}
+
 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
   // Empty encoding
 }
@@ -1389,6 +1394,9 @@
 // No CMOVF/CMOVD with SSE/SSE2
 const int Matcher::float_cmove_cost() { return (UseSSE>=1) ? ConditionalMoveLimit : 0; }
 
+// Does the CPU require late expand (see block.cpp for description of late expand)?
+const bool Matcher::require_postalloc_expand = false;
+
 // Should the Matcher clone shifts on addressing modes, expecting them to
 // be subsumed into complex addressing expressions or compute them into
 // registers?  True for Intel but false for most RISCs
@@ -1534,19 +1542,6 @@
   return EBP_REG_mask();
 }
 
-const RegMask Matcher::mathExactI_result_proj_mask() {
-  return EAX_REG_mask();
-}
-
-const RegMask Matcher::mathExactL_result_proj_mask() {
-  ShouldNotReachHere();
-  return RegMask();
-}
-
-const RegMask Matcher::mathExactI_flags_proj_mask() {
-  return INT_FLAGS_mask();
-}
-
 // Returns true if the high 32 bits of the value is known to be zero.
 bool is_operand_hi32_zero(Node* n) {
   int opc = n->Opcode();
@@ -2910,542 +2905,6 @@
     emit_d8    (cbuf,0 );
   %}
 
-
-  // Because the transitions from emitted code to the runtime
-  // monitorenter/exit helper stubs are so slow it's critical that
-  // we inline both the stack-locking fast-path and the inflated fast path.
-  //
-  // See also: cmpFastLock and cmpFastUnlock.
-  //
-  // What follows is a specialized inline transliteration of the code
-  // in slow_enter() and slow_exit().  If we're concerned about I$ bloat
-  // another option would be to emit TrySlowEnter and TrySlowExit methods
-  // at startup-time.  These methods would accept arguments as
-  // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
-  // indications in the icc.ZFlag.  Fast_Lock and Fast_Unlock would simply
-  // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
-  // In practice, however, the # of lock sites is bounded and is usually small.
-  // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
-  // if the processor uses simple bimodal branch predictors keyed by EIP
-  // Since the helper routines would be called from multiple synchronization
-  // sites.
-  //
-  // An even better approach would be write "MonitorEnter()" and "MonitorExit()"
-  // in java - using j.u.c and unsafe - and just bind the lock and unlock sites
-  // to those specialized methods.  That'd give us a mostly platform-independent
-  // implementation that the JITs could optimize and inline at their pleasure.
-  // Done correctly, the only time we'd need to cross to native could would be
-  // to park() or unpark() threads.  We'd also need a few more unsafe operators
-  // to (a) prevent compiler-JIT reordering of non-volatile accesses, and
-  // (b) explicit barriers or fence operations.
-  //
-  // TODO:
-  //
-  // *  Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
-  //    This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
-  //    Given TLAB allocation, Self is usually manifested in a register, so passing it into
-  //    the lock operators would typically be faster than reifying Self.
-  //
-  // *  Ideally I'd define the primitives as:
-  //       fast_lock   (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
-  //       fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
-  //    Unfortunately ADLC bugs prevent us from expressing the ideal form.
-  //    Instead, we're stuck with a rather awkward and brittle register assignments below.
-  //    Furthermore the register assignments are overconstrained, possibly resulting in
-  //    sub-optimal code near the synchronization site.
-  //
-  // *  Eliminate the sp-proximity tests and just use "== Self" tests instead.
-  //    Alternately, use a better sp-proximity test.
-  //
-  // *  Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
-  //    Either one is sufficient to uniquely identify a thread.
-  //    TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
-  //
-  // *  Intrinsify notify() and notifyAll() for the common cases where the
-  //    object is locked by the calling thread but the waitlist is empty.
-  //    avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
-  //
-  // *  use jccb and jmpb instead of jcc and jmp to improve code density.
-  //    But beware of excessive branch density on AMD Opterons.
-  //
-  // *  Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
-  //    or failure of the fast-path.  If the fast-path fails then we pass
-  //    control to the slow-path, typically in C.  In Fast_Lock and
-  //    Fast_Unlock we often branch to DONE_LABEL, just to find that C2
-  //    will emit a conditional branch immediately after the node.
-  //    So we have branches to branches and lots of ICC.ZF games.
-  //    Instead, it might be better to have C2 pass a "FailureLabel"
-  //    into Fast_Lock and Fast_Unlock.  In the case of success, control
-  //    will drop through the node.  ICC.ZF is undefined at exit.
-  //    In the case of failure, the node will branch directly to the
-  //    FailureLabel
-
-
-  // obj: object to lock
-  // box: on-stack box address (displaced header location) - KILLED
-  // rax,: tmp -- KILLED
-  // scr: tmp -- KILLED
-  enc_class Fast_Lock( eRegP obj, eRegP box, eAXRegI tmp, eRegP scr ) %{
-
-    Register objReg = as_Register($obj$$reg);
-    Register boxReg = as_Register($box$$reg);
-    Register tmpReg = as_Register($tmp$$reg);
-    Register scrReg = as_Register($scr$$reg);
-
-    // Ensure the register assignents are disjoint
-    guarantee (objReg != boxReg, "") ;
-    guarantee (objReg != tmpReg, "") ;
-    guarantee (objReg != scrReg, "") ;
-    guarantee (boxReg != tmpReg, "") ;
-    guarantee (boxReg != scrReg, "") ;
-    guarantee (tmpReg == as_Register(EAX_enc), "") ;
-
-    MacroAssembler masm(&cbuf);
-
-    if (_counters != NULL) {
-      masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
-    }
-    if (EmitSync & 1) {
-        // set box->dhw = unused_mark (3)
-        // Force all sync thru slow-path: slow_enter() and slow_exit() 
-        masm.movptr (Address(boxReg, 0), int32_t(markOopDesc::unused_mark())) ;             
-        masm.cmpptr (rsp, (int32_t)0) ;                        
-    } else 
-    if (EmitSync & 2) { 
-        Label DONE_LABEL ;           
-        if (UseBiasedLocking) {
-           // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
-           masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
-        }
-
-        masm.movptr(tmpReg, Address(objReg, 0)) ;          // fetch markword 
-        masm.orptr (tmpReg, 0x1);
-        masm.movptr(Address(boxReg, 0), tmpReg);           // Anticipate successful CAS 
-        if (os::is_MP()) { masm.lock();  }
-        masm.cmpxchgptr(boxReg, Address(objReg, 0));          // Updates tmpReg
-        masm.jcc(Assembler::equal, DONE_LABEL);
-        // Recursive locking
-        masm.subptr(tmpReg, rsp);
-        masm.andptr(tmpReg, (int32_t) 0xFFFFF003 );
-        masm.movptr(Address(boxReg, 0), tmpReg);
-        masm.bind(DONE_LABEL) ; 
-    } else {  
-      // Possible cases that we'll encounter in fast_lock 
-      // ------------------------------------------------
-      // * Inflated
-      //    -- unlocked
-      //    -- Locked
-      //       = by self
-      //       = by other
-      // * biased
-      //    -- by Self
-      //    -- by other
-      // * neutral
-      // * stack-locked
-      //    -- by self
-      //       = sp-proximity test hits
-      //       = sp-proximity test generates false-negative
-      //    -- by other
-      //
-
-      Label IsInflated, DONE_LABEL, PopDone ;
-
-      // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
-      // order to reduce the number of conditional branches in the most common cases.
-      // Beware -- there's a subtle invariant that fetch of the markword
-      // at [FETCH], below, will never observe a biased encoding (*101b).
-      // If this invariant is not held we risk exclusion (safety) failure.
-      if (UseBiasedLocking && !UseOptoBiasInlining) {
-        masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
-      }
-
-      masm.movptr(tmpReg, Address(objReg, 0)) ;         // [FETCH]
-      masm.testptr(tmpReg, 0x02) ;                      // Inflated v (Stack-locked or neutral)
-      masm.jccb  (Assembler::notZero, IsInflated) ;
-
-      // Attempt stack-locking ...
-      masm.orptr (tmpReg, 0x1);
-      masm.movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
-      if (os::is_MP()) { masm.lock();  }
-      masm.cmpxchgptr(boxReg, Address(objReg, 0));           // Updates tmpReg
-      if (_counters != NULL) {
-        masm.cond_inc32(Assembler::equal,
-                        ExternalAddress((address)_counters->fast_path_entry_count_addr()));
-      }
-      masm.jccb (Assembler::equal, DONE_LABEL);
-
-      // Recursive locking
-      masm.subptr(tmpReg, rsp);
-      masm.andptr(tmpReg, 0xFFFFF003 );
-      masm.movptr(Address(boxReg, 0), tmpReg);
-      if (_counters != NULL) {
-        masm.cond_inc32(Assembler::equal,
-                        ExternalAddress((address)_counters->fast_path_entry_count_addr()));
-      }
-      masm.jmp  (DONE_LABEL) ;
-
-      masm.bind (IsInflated) ;
-
-      // The object is inflated.
-      //
-      // TODO-FIXME: eliminate the ugly use of manifest constants:
-      //   Use markOopDesc::monitor_value instead of "2".
-      //   use markOop::unused_mark() instead of "3".
-      // The tmpReg value is an objectMonitor reference ORed with
-      // markOopDesc::monitor_value (2).   We can either convert tmpReg to an
-      // objectmonitor pointer by masking off the "2" bit or we can just
-      // use tmpReg as an objectmonitor pointer but bias the objectmonitor
-      // field offsets with "-2" to compensate for and annul the low-order tag bit.
-      //
-      // I use the latter as it avoids AGI stalls.
-      // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]"
-      // instead of "mov r, [tmpReg+OFFSETOF(Owner)]".
-      //
-      #define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2)
-
-      // boxReg refers to the on-stack BasicLock in the current frame.
-      // We'd like to write:
-      //   set box->_displaced_header = markOop::unused_mark().  Any non-0 value suffices.
-      // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
-      // additional latency as we have another ST in the store buffer that must drain.
-
-      if (EmitSync & 8192) { 
-         masm.movptr(Address(boxReg, 0), 3) ;            // results in ST-before-CAS penalty
-         masm.get_thread (scrReg) ; 
-         masm.movptr(boxReg, tmpReg);                    // consider: LEA box, [tmp-2] 
-         masm.movptr(tmpReg, NULL_WORD);                 // consider: xor vs mov
-         if (os::is_MP()) { masm.lock(); } 
-         masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; 
-      } else 
-      if ((EmitSync & 128) == 0) {                      // avoid ST-before-CAS
-         masm.movptr(scrReg, boxReg) ; 
-         masm.movptr(boxReg, tmpReg);                   // consider: LEA box, [tmp-2] 
-
-         // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
-         if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
-            // prefetchw [eax + Offset(_owner)-2]
-            masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
-         }
-
-         if ((EmitSync & 64) == 0) {
-           // Optimistic form: consider XORL tmpReg,tmpReg
-           masm.movptr(tmpReg, NULL_WORD) ; 
-         } else { 
-           // Can suffer RTS->RTO upgrades on shared or cold $ lines
-           // Test-And-CAS instead of CAS
-           masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;   // rax, = m->_owner
-           masm.testptr(tmpReg, tmpReg) ;                   // Locked ? 
-           masm.jccb  (Assembler::notZero, DONE_LABEL) ;                   
-         }
-
-         // Appears unlocked - try to swing _owner from null to non-null.
-         // Ideally, I'd manifest "Self" with get_thread and then attempt
-         // to CAS the register containing Self into m->Owner.
-         // But we don't have enough registers, so instead we can either try to CAS
-         // rsp or the address of the box (in scr) into &m->owner.  If the CAS succeeds
-         // we later store "Self" into m->Owner.  Transiently storing a stack address
-         // (rsp or the address of the box) into  m->owner is harmless.
-         // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
-         if (os::is_MP()) { masm.lock();  }
-         masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; 
-         masm.movptr(Address(scrReg, 0), 3) ;          // box->_displaced_header = 3
-         masm.jccb  (Assembler::notZero, DONE_LABEL) ; 
-         masm.get_thread (scrReg) ;                    // beware: clobbers ICCs
-         masm.movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg) ; 
-         masm.xorptr(boxReg, boxReg) ;                 // set icc.ZFlag = 1 to indicate success
-                       
-         // If the CAS fails we can either retry or pass control to the slow-path.  
-         // We use the latter tactic.  
-         // Pass the CAS result in the icc.ZFlag into DONE_LABEL
-         // If the CAS was successful ...
-         //   Self has acquired the lock
-         //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
-         // Intentional fall-through into DONE_LABEL ...
-      } else {
-         masm.movptr(Address(boxReg, 0), 3) ;       // results in ST-before-CAS penalty
-         masm.movptr(boxReg, tmpReg) ; 
-
-         // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
-         if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
-            // prefetchw [eax + Offset(_owner)-2]
-            masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
-         }
-
-         if ((EmitSync & 64) == 0) {
-           // Optimistic form
-           masm.xorptr  (tmpReg, tmpReg) ; 
-         } else { 
-           // Can suffer RTS->RTO upgrades on shared or cold $ lines
-           masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;   // rax, = m->_owner
-           masm.testptr(tmpReg, tmpReg) ;                   // Locked ? 
-           masm.jccb  (Assembler::notZero, DONE_LABEL) ;                   
-         }
-
-         // Appears unlocked - try to swing _owner from null to non-null.
-         // Use either "Self" (in scr) or rsp as thread identity in _owner.
-         // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
-         masm.get_thread (scrReg) ;
-         if (os::is_MP()) { masm.lock(); }
-         masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
-
-         // If the CAS fails we can either retry or pass control to the slow-path.
-         // We use the latter tactic.
-         // Pass the CAS result in the icc.ZFlag into DONE_LABEL
-         // If the CAS was successful ...
-         //   Self has acquired the lock
-         //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
-         // Intentional fall-through into DONE_LABEL ...
-      }
-
-      // DONE_LABEL is a hot target - we'd really like to place it at the
-      // start of cache line by padding with NOPs.
-      // See the AMD and Intel software optimization manuals for the
-      // most efficient "long" NOP encodings.
-      // Unfortunately none of our alignment mechanisms suffice.
-      masm.bind(DONE_LABEL);
-
-      // Avoid branch-to-branch on AMD processors
-      // This appears to be superstition.
-      if (EmitSync & 32) masm.nop() ;
-
-
-      // At DONE_LABEL the icc ZFlag is set as follows ...
-      // Fast_Unlock uses the same protocol.
-      // ZFlag == 1 -> Success
-      // ZFlag == 0 -> Failure - force control through the slow-path
-    }
-  %}
-
-  // obj: object to unlock
-  // box: box address (displaced header location), killed.  Must be EAX.
-  // rbx,: killed tmp; cannot be obj nor box.
-  //
-  // Some commentary on balanced locking:
-  //
-  // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
-  // Methods that don't have provably balanced locking are forced to run in the
-  // interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
-  // The interpreter provides two properties:
-  // I1:  At return-time the interpreter automatically and quietly unlocks any
-  //      objects acquired the current activation (frame).  Recall that the
-  //      interpreter maintains an on-stack list of locks currently held by
-  //      a frame.
-  // I2:  If a method attempts to unlock an object that is not held by the
-  //      the frame the interpreter throws IMSX.
-  //
-  // Lets say A(), which has provably balanced locking, acquires O and then calls B().
-  // B() doesn't have provably balanced locking so it runs in the interpreter.
-  // Control returns to A() and A() unlocks O.  By I1 and I2, above, we know that O
-  // is still locked by A().
-  //
-  // The only other source of unbalanced locking would be JNI.  The "Java Native Interface:
-  // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
-  // should not be unlocked by "normal" java-level locking and vice-versa.  The specification
-  // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
-
-  enc_class Fast_Unlock( nabxRegP obj, eAXRegP box, eRegP tmp) %{
-
-    Register objReg = as_Register($obj$$reg);
-    Register boxReg = as_Register($box$$reg);
-    Register tmpReg = as_Register($tmp$$reg);
-
-    guarantee (objReg != boxReg, "") ;
-    guarantee (objReg != tmpReg, "") ;
-    guarantee (boxReg != tmpReg, "") ;
-    guarantee (boxReg == as_Register(EAX_enc), "") ;
-    MacroAssembler masm(&cbuf);
-
-    if (EmitSync & 4) {
-      // Disable - inhibit all inlining.  Force control through the slow-path
-      masm.cmpptr (rsp, 0) ; 
-    } else 
-    if (EmitSync & 8) {
-      Label DONE_LABEL ;
-      if (UseBiasedLocking) {
-         masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
-      }
-      // classic stack-locking code ...
-      masm.movptr(tmpReg, Address(boxReg, 0)) ;
-      masm.testptr(tmpReg, tmpReg) ;
-      masm.jcc   (Assembler::zero, DONE_LABEL) ;
-      if (os::is_MP()) { masm.lock(); }
-      masm.cmpxchgptr(tmpReg, Address(objReg, 0));          // Uses EAX which is box
-      masm.bind(DONE_LABEL);
-    } else {
-      Label DONE_LABEL, Stacked, CheckSucc, Inflated ;
-
-      // Critically, the biased locking test must have precedence over
-      // and appear before the (box->dhw == 0) recursive stack-lock test.
-      if (UseBiasedLocking && !UseOptoBiasInlining) {
-         masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
-      }
-      
-      masm.cmpptr(Address(boxReg, 0), 0) ;            // Examine the displaced header
-      masm.movptr(tmpReg, Address(objReg, 0)) ;       // Examine the object's markword
-      masm.jccb  (Assembler::zero, DONE_LABEL) ;      // 0 indicates recursive stack-lock
-
-      masm.testptr(tmpReg, 0x02) ;                     // Inflated? 
-      masm.jccb  (Assembler::zero, Stacked) ;
-
-      masm.bind  (Inflated) ;
-      // It's inflated.
-      // Despite our balanced locking property we still check that m->_owner == Self
-      // as java routines or native JNI code called by this thread might
-      // have released the lock.
-      // Refer to the comments in synchronizer.cpp for how we might encode extra
-      // state in _succ so we can avoid fetching EntryList|cxq.
-      //
-      // I'd like to add more cases in fast_lock() and fast_unlock() --
-      // such as recursive enter and exit -- but we have to be wary of
-      // I$ bloat, T$ effects and BP$ effects.
-      //
-      // If there's no contention try a 1-0 exit.  That is, exit without
-      // a costly MEMBAR or CAS.  See synchronizer.cpp for details on how
-      // we detect and recover from the race that the 1-0 exit admits.
-      //
-      // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
-      // before it STs null into _owner, releasing the lock.  Updates
-      // to data protected by the critical section must be visible before
-      // we drop the lock (and thus before any other thread could acquire
-      // the lock and observe the fields protected by the lock).
-      // IA32's memory-model is SPO, so STs are ordered with respect to
-      // each other and there's no need for an explicit barrier (fence).
-      // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
-
-      masm.get_thread (boxReg) ;
-      if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
-        // prefetchw [ebx + Offset(_owner)-2]
-        masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2));
-      }
-
-      // Note that we could employ various encoding schemes to reduce
-      // the number of loads below (currently 4) to just 2 or 3.
-      // Refer to the comments in synchronizer.cpp.
-      // In practice the chain of fetches doesn't seem to impact performance, however.
-      if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
-         // Attempt to reduce branch density - AMD's branch predictor.
-         masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;  
-         masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
-         masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; 
-         masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; 
-         masm.jccb  (Assembler::notZero, DONE_LABEL) ; 
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ; 
-         masm.jmpb  (DONE_LABEL) ; 
-      } else { 
-         masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;  
-         masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
-         masm.jccb  (Assembler::notZero, DONE_LABEL) ; 
-         masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; 
-         masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; 
-         masm.jccb  (Assembler::notZero, CheckSucc) ; 
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ; 
-         masm.jmpb  (DONE_LABEL) ; 
-      }
-
-      // The Following code fragment (EmitSync & 65536) improves the performance of
-      // contended applications and contended synchronization microbenchmarks.
-      // Unfortunately the emission of the code - even though not executed - causes regressions
-      // in scimark and jetstream, evidently because of $ effects.  Replacing the code
-      // with an equal number of never-executed NOPs results in the same regression.
-      // We leave it off by default.
-
-      if ((EmitSync & 65536) != 0) {
-         Label LSuccess, LGoSlowPath ;
-
-         masm.bind  (CheckSucc) ;
-
-         // Optional pre-test ... it's safe to elide this
-         if ((EmitSync & 16) == 0) { 
-            masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ; 
-            masm.jccb  (Assembler::zero, LGoSlowPath) ; 
-         }
-
-         // We have a classic Dekker-style idiom:
-         //    ST m->_owner = 0 ; MEMBAR; LD m->_succ
-         // There are a number of ways to implement the barrier:
-         // (1) lock:andl &m->_owner, 0
-         //     is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
-         //     LOCK: ANDL [ebx+Offset(_Owner)-2], 0
-         //     Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
-         // (2) If supported, an explicit MFENCE is appealing.
-         //     In older IA32 processors MFENCE is slower than lock:add or xchg
-         //     particularly if the write-buffer is full as might be the case if
-         //     if stores closely precede the fence or fence-equivalent instruction.
-         //     In more modern implementations MFENCE appears faster, however.
-         // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
-         //     The $lines underlying the top-of-stack should be in M-state.
-         //     The locked add instruction is serializing, of course.
-         // (4) Use xchg, which is serializing
-         //     mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
-         // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
-         //     The integer condition codes will tell us if succ was 0.
-         //     Since _succ and _owner should reside in the same $line and
-         //     we just stored into _owner, it's likely that the $line
-         //     remains in M-state for the lock:orl.
-         //
-         // We currently use (3), although it's likely that switching to (2)
-         // is correct for the future.
-            
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ; 
-         if (os::is_MP()) { 
-            if (VM_Version::supports_sse2() && 1 == FenceInstruction) { 
-              masm.mfence();
-            } else { 
-              masm.lock () ; masm.addptr(Address(rsp, 0), 0) ; 
-            }
-         }
-         // Ratify _succ remains non-null
-         masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ; 
-         masm.jccb  (Assembler::notZero, LSuccess) ; 
-
-         masm.xorptr(boxReg, boxReg) ;                  // box is really EAX
-         if (os::is_MP()) { masm.lock(); }
-         masm.cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
-         masm.jccb  (Assembler::notEqual, LSuccess) ;
-         // Since we're low on registers we installed rsp as a placeholding in _owner.
-         // Now install Self over rsp.  This is safe as we're transitioning from
-         // non-null to non=null
-         masm.get_thread (boxReg) ;
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg) ;
-         // Intentional fall-through into LGoSlowPath ...
-
-         masm.bind  (LGoSlowPath) ; 
-         masm.orptr(boxReg, 1) ;                      // set ICC.ZF=0 to indicate failure
-         masm.jmpb  (DONE_LABEL) ; 
-
-         masm.bind  (LSuccess) ; 
-         masm.xorptr(boxReg, boxReg) ;                 // set ICC.ZF=1 to indicate success
-         masm.jmpb  (DONE_LABEL) ; 
-      }
-
-      masm.bind (Stacked) ;
-      // It's not inflated and it's not recursively stack-locked and it's not biased.
-      // It must be stack-locked.
-      // Try to reset the header to displaced header.
-      // The "box" value on the stack is stable, so we can reload
-      // and be assured we observe the same value as above.
-      masm.movptr(tmpReg, Address(boxReg, 0)) ;
-      if (os::is_MP()) {   masm.lock();    }
-      masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box
-      // Intention fall-thru into DONE_LABEL
-
-
-      // DONE_LABEL is a hot target - we'd really like to place it at the
-      // start of cache line by padding with NOPs.
-      // See the AMD and Intel software optimization manuals for the
-      // most efficient "long" NOP encodings.
-      // Unfortunately none of our alignment mechanisms suffice.
-      if ((EmitSync & 65536) == 0) {
-         masm.bind (CheckSucc) ;
-      }
-      masm.bind(DONE_LABEL);
-
-      // Avoid branch to branch on AMD processors
-      if (EmitSync & 32768) { masm.nop() ; }
-    }
-  %}
-
-
   enc_class enc_pop_rdx() %{
     emit_opcode(cbuf,0x5A);
   %}
@@ -3768,7 +3227,7 @@
   // automatically biased by the preserve_stack_slots field above.
   c_calling_convention %{
     // This is obviously always outgoing
-    (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
+    (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
   %}
 
   // Location of C & interpreter return values
@@ -3889,6 +3348,17 @@
   interface(CONST_INTER);
 %}
 
+// Int Immediate non-negative
+operand immU31()
+%{
+  predicate(n->get_int() >= 0);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // Constant for long shifts
 operand immI_32() %{
   predicate( n->get_int() == 32 );
@@ -6119,12 +5589,12 @@
   ins_pipe(ialu_reg_mem);
 %}
 
-// Load Integer with 32-bit mask into Long Register
-instruct loadI2L_immI(eRegL dst, memory mem, immI mask, eFlagsReg cr) %{
+// Load Integer with 31-bit mask into Long Register
+instruct loadI2L_immU31(eRegL dst, memory mem, immU31 mask, eFlagsReg cr) %{
   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
   effect(KILL cr);
 
-  format %{ "MOV    $dst.lo,$mem\t# int & 32-bit mask -> long\n\t"
+  format %{ "MOV    $dst.lo,$mem\t# int & 31-bit mask -> long\n\t"
             "XOR    $dst.hi,$dst.hi\n\t"
             "AND    $dst.lo,$mask" %}
   ins_encode %{
@@ -7088,6 +6558,7 @@
 
 instruct membar_acquire() %{
   match(MemBarAcquire);
+  match(LoadFence);
   ins_cost(400);
 
   size(0);
@@ -7108,6 +6579,7 @@
 
 instruct membar_release() %{
   match(MemBarRelease);
+  match(StoreFence);
   ins_cost(400);
 
   size(0);
@@ -7524,44 +6996,6 @@
 //----------Arithmetic Instructions--------------------------------------------
 //----------Addition Instructions----------------------------------------------
 
-instruct addExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
-%{
-  match(AddExactI dst src);
-  effect(DEF cr);
-
-  format %{ "ADD    $dst, $src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
-%{
-  match(AddExactI dst src);
-  effect(DEF cr);
-
-  format %{ "ADD    $dst, $src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
-%{
-  match(AddExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(125);
-  format %{ "ADD    $dst,$src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$Address);
-  %}
-  ins_pipe( ialu_reg_mem );
-%}
-
-
 // Integer Addition Instructions
 instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
   match(Set dst (AddI dst src));
@@ -7871,43 +7305,6 @@
 
 //----------Subtraction Instructions-------------------------------------------
 
-instruct subExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
-%{
-  match(SubExactI dst src);
-  effect(DEF cr);
-
-  format %{ "SUB    $dst, $src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
-%{
-  match(SubExactI dst src);
-  effect(DEF cr);
-
-  format %{ "SUB    $dst, $src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
-%{
-  match(SubExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(125);
-  format %{ "SUB    $dst,$src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$Address);
-  %}
-  ins_pipe( ialu_reg_mem );
-%}
-
 // Integer Subtraction Instructions
 instruct subI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
   match(Set dst (SubI dst src));
@@ -7976,17 +7373,6 @@
   ins_pipe( ialu_reg );
 %}
 
-instruct negExactI_eReg(eAXRegI dst, eFlagsReg cr) %{
-  match(NegExactI dst);
-  effect(DEF cr);
-
-  format %{ "NEG    $dst\t# negExact int"%}
-  ins_encode %{
-    __ negl($dst$$Register);
-  %}
-  ins_pipe(ialu_reg);
-%}
-
 //----------Multiplication/Division Instructions-------------------------------
 // Integer Multiplication Instructions
 // Multiply Register
@@ -8198,46 +7584,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct mulExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
-%{
-  match(MulExactI dst src);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "IMUL   $dst, $src\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-instruct mulExactI_eReg_imm(eAXRegI dst, rRegI src, immI imm, eFlagsReg cr)
-%{
-  match(MulExactI src imm);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "IMUL   $dst, $src, $imm\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Register, $imm$$constant);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-instruct mulExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
-%{
-  match(MulExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(350);
-  format %{ "IMUL   $dst, $src\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Address);
-  %}
-  ins_pipe(ialu_reg_mem_alu0);
-%}
-
-
 // Integer DIV with Register
 instruct divI_eReg(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{
   match(Set rax (DivI rax div));
@@ -9103,6 +8449,91 @@
 instruct cadd_cmpLTMask_mem(ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr) %{
   match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
 */
+//----------Overflow Math Instructions-----------------------------------------
+
+instruct overflowAddI_eReg(eFlagsReg cr, eAXRegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowAddI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "ADD    $op1, $op2\t# overflow check int" %}
+
+  ins_encode %{
+    __ addl($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowAddI_rReg_imm(eFlagsReg cr, eAXRegI op1, immI op2)
+%{
+  match(Set cr (OverflowAddI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "ADD    $op1, $op2\t# overflow check int" %}
+
+  ins_encode %{
+    __ addl($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubI_rReg(eFlagsReg cr, rRegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowSubI op1 op2));
+
+  format %{ "CMP    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ cmpl($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubI_rReg_imm(eFlagsReg cr, rRegI op1, immI op2)
+%{
+  match(Set cr (OverflowSubI op1 op2));
+
+  format %{ "CMP    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ cmpl($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowNegI_rReg(eFlagsReg cr, immI0 zero, eAXRegI op2)
+%{
+  match(Set cr (OverflowSubI zero op2));
+  effect(DEF cr, USE_KILL op2);
+
+  format %{ "NEG    $op2\t# overflow check int" %}
+  ins_encode %{
+    __ negl($op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowMulI_rReg(eFlagsReg cr, eAXRegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowMulI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "IMUL    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ imull($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct overflowMulI_rReg_imm(eFlagsReg cr, rRegI op1, immI op2, rRegI tmp)
+%{
+  match(Set cr (OverflowMulI op1 op2));
+  effect(DEF cr, TEMP tmp, USE op1, USE op2);
+
+  format %{ "IMUL    $tmp, $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ imull($tmp$$Register, $op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
 
 //----------Long Instructions------------------------------------------------
 // Add Long Register with Register
@@ -13136,23 +12567,26 @@
 
 // inlined locking and unlocking
 
-
-instruct cmpFastLock( eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
-  match( Set cr (FastLock object box) );
-  effect( TEMP tmp, TEMP scr, USE_KILL box );
+instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
+  match(Set cr (FastLock object box));
+  effect(TEMP tmp, TEMP scr, USE_KILL box);
   ins_cost(300);
   format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
-  ins_encode( Fast_Lock(object,box,tmp,scr) );
-  ins_pipe( pipe_slow );
-%}
-
-instruct cmpFastUnlock( eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
-  match( Set cr (FastUnlock object box) );
-  effect( TEMP tmp, USE_KILL box );
+  ins_encode %{
+    __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
+  match(Set cr (FastUnlock object box));
+  effect(TEMP tmp, USE_KILL box);
   ins_cost(300);
   format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
-  ins_encode( Fast_Unlock(object,box,tmp) );
-  ins_pipe( pipe_slow );
+  ins_encode %{
+    __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
+  %}
+  ins_pipe(pipe_slow);
 %}
 
 
--- a/src/cpu/x86/vm/x86_64.ad	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/x86/vm/x86_64.ad	Wed Mar 12 13:30:08 2014 +0100
@@ -688,6 +688,11 @@
   return 0;  // absolute addressing, no offset
 }
 
+bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
+void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
+  ShouldNotReachHere();
+}
+
 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
   // Empty encoding
 }
@@ -1542,6 +1547,9 @@
 // No CMOVF/CMOVD with SSE2
 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
 
+// Does the CPU require late expand (see block.cpp for description of late expand)?
+const bool Matcher::require_postalloc_expand = false;
+
 // Should the Matcher clone shifts on addressing modes, expecting them
 // to be subsumed into complex addressing expressions or compute them
 // into registers?  True for Intel but false for most RISCs
@@ -1649,18 +1657,6 @@
   return PTR_RBP_REG_mask();
 }
 
-const RegMask Matcher::mathExactI_result_proj_mask() {
-  return INT_RAX_REG_mask();
-}
-
-const RegMask Matcher::mathExactL_result_proj_mask() {
-  return LONG_RAX_REG_mask();
-}
-
-const RegMask Matcher::mathExactI_flags_proj_mask() {
-  return INT_FLAGS_mask();
-}
-
 %}
 
 //----------ENCODING BLOCK-----------------------------------------------------
@@ -2591,231 +2587,6 @@
   %}
 
 
-  // obj: object to lock
-  // box: box address (header location) -- killed
-  // tmp: rax -- killed
-  // scr: rbx -- killed
-  //
-  // What follows is a direct transliteration of fast_lock() and fast_unlock()
-  // from i486.ad.  See that file for comments.
-  // TODO: where possible switch from movq (r, 0) to movl(r,0) and
-  // use the shorter encoding.  (Movl clears the high-order 32-bits).
-
-
-  enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
-  %{
-    Register objReg = as_Register((int)$obj$$reg);
-    Register boxReg = as_Register((int)$box$$reg);
-    Register tmpReg = as_Register($tmp$$reg);
-    Register scrReg = as_Register($scr$$reg);
-    MacroAssembler masm(&cbuf);
-
-    // Verify uniqueness of register assignments -- necessary but not sufficient
-    assert (objReg != boxReg && objReg != tmpReg &&
-            objReg != scrReg && tmpReg != scrReg, "invariant") ;
-
-    if (_counters != NULL) {
-      masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
-    }
-    if (EmitSync & 1) {
-        // Without cast to int32_t a movptr will destroy r10 which is typically obj
-        masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
-        masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
-    } else
-    if (EmitSync & 2) {
-        Label DONE_LABEL;
-        if (UseBiasedLocking) {
-           // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
-          masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
-        }
-        // QQQ was movl...
-        masm.movptr(tmpReg, 0x1);
-        masm.orptr(tmpReg, Address(objReg, 0));
-        masm.movptr(Address(boxReg, 0), tmpReg);
-        if (os::is_MP()) {
-          masm.lock();
-        }
-        masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
-        masm.jcc(Assembler::equal, DONE_LABEL);
-
-        // Recursive locking
-        masm.subptr(tmpReg, rsp);
-        masm.andptr(tmpReg, 7 - os::vm_page_size());
-        masm.movptr(Address(boxReg, 0), tmpReg);
-
-        masm.bind(DONE_LABEL);
-        masm.nop(); // avoid branch to branch
-    } else {
-        Label DONE_LABEL, IsInflated, Egress;
-
-        masm.movptr(tmpReg, Address(objReg, 0)) ;
-        masm.testl (tmpReg, 0x02) ;         // inflated vs stack-locked|neutral|biased
-        masm.jcc   (Assembler::notZero, IsInflated) ;
-
-        // it's stack-locked, biased or neutral
-        // TODO: optimize markword triage order to reduce the number of
-        // conditional branches in the most common cases.
-        // Beware -- there's a subtle invariant that fetch of the markword
-        // at [FETCH], below, will never observe a biased encoding (*101b).
-        // If this invariant is not held we'll suffer exclusion (safety) failure.
-
-        if (UseBiasedLocking && !UseOptoBiasInlining) {
-          masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
-          masm.movptr(tmpReg, Address(objReg, 0)) ;        // [FETCH]
-        }
-
-        // was q will it destroy high?
-        masm.orl   (tmpReg, 1) ;
-        masm.movptr(Address(boxReg, 0), tmpReg) ;
-        if (os::is_MP()) { masm.lock(); }
-        masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
-        if (_counters != NULL) {
-           masm.cond_inc32(Assembler::equal,
-                           ExternalAddress((address) _counters->fast_path_entry_count_addr()));
-        }
-        masm.jcc   (Assembler::equal, DONE_LABEL);
-
-        // Recursive locking
-        masm.subptr(tmpReg, rsp);
-        masm.andptr(tmpReg, 7 - os::vm_page_size());
-        masm.movptr(Address(boxReg, 0), tmpReg);
-        if (_counters != NULL) {
-           masm.cond_inc32(Assembler::equal,
-                           ExternalAddress((address) _counters->fast_path_entry_count_addr()));
-        }
-        masm.jmp   (DONE_LABEL) ;
-
-        masm.bind  (IsInflated) ;
-        // It's inflated
-
-        // TODO: someday avoid the ST-before-CAS penalty by
-        // relocating (deferring) the following ST.
-        // We should also think about trying a CAS without having
-        // fetched _owner.  If the CAS is successful we may
-        // avoid an RTO->RTS upgrade on the $line.
-        // Without cast to int32_t a movptr will destroy r10 which is typically obj
-        masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
-
-        masm.mov    (boxReg, tmpReg) ;
-        masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
-        masm.testptr(tmpReg, tmpReg) ;
-        masm.jcc    (Assembler::notZero, DONE_LABEL) ;
-
-        // It's inflated and appears unlocked
-        if (os::is_MP()) { masm.lock(); }
-        masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
-        // Intentional fall-through into DONE_LABEL ...
-
-        masm.bind  (DONE_LABEL) ;
-        masm.nop   () ;                 // avoid jmp to jmp
-    }
-  %}
-
-  // obj: object to unlock
-  // box: box address (displaced header location), killed
-  // RBX: killed tmp; cannot be obj nor box
-  enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
-  %{
-
-    Register objReg = as_Register($obj$$reg);
-    Register boxReg = as_Register($box$$reg);
-    Register tmpReg = as_Register($tmp$$reg);
-    MacroAssembler masm(&cbuf);
-
-    if (EmitSync & 4) {
-       masm.cmpptr(rsp, 0) ;
-    } else
-    if (EmitSync & 8) {
-       Label DONE_LABEL;
-       if (UseBiasedLocking) {
-         masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
-       }
-
-       // Check whether the displaced header is 0
-       //(=> recursive unlock)
-       masm.movptr(tmpReg, Address(boxReg, 0));
-       masm.testptr(tmpReg, tmpReg);
-       masm.jcc(Assembler::zero, DONE_LABEL);
-
-       // If not recursive lock, reset the header to displaced header
-       if (os::is_MP()) {
-         masm.lock();
-       }
-       masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
-       masm.bind(DONE_LABEL);
-       masm.nop(); // avoid branch to branch
-    } else {
-       Label DONE_LABEL, Stacked, CheckSucc ;
-
-       if (UseBiasedLocking && !UseOptoBiasInlining) {
-         masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
-       }
-
-       masm.movptr(tmpReg, Address(objReg, 0)) ;
-       masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
-       masm.jcc   (Assembler::zero, DONE_LABEL) ;
-       masm.testl (tmpReg, 0x02) ;
-       masm.jcc   (Assembler::zero, Stacked) ;
-
-       // It's inflated
-       masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
-       masm.xorptr(boxReg, r15_thread) ;
-       masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
-       masm.jcc   (Assembler::notZero, DONE_LABEL) ;
-       masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
-       masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
-       masm.jcc   (Assembler::notZero, CheckSucc) ;
-       masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
-       masm.jmp   (DONE_LABEL) ;
-
-       if ((EmitSync & 65536) == 0) {
-         Label LSuccess, LGoSlowPath ;
-         masm.bind  (CheckSucc) ;
-         masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
-         masm.jcc   (Assembler::zero, LGoSlowPath) ;
-
-         // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
-         // the explicit ST;MEMBAR combination, but masm doesn't currently support
-         // "ANDQ M,IMM".  Don't use MFENCE here.  lock:add to TOS, xchg, etc
-         // are all faster when the write buffer is populated.
-         masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
-         if (os::is_MP()) {
-            masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
-         }
-         masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
-         masm.jcc   (Assembler::notZero, LSuccess) ;
-
-         masm.movptr (boxReg, (int32_t)NULL_WORD) ;                   // box is really EAX
-         if (os::is_MP()) { masm.lock(); }
-         masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
-         masm.jcc   (Assembler::notEqual, LSuccess) ;
-         // Intentional fall-through into slow-path
-
-         masm.bind  (LGoSlowPath) ;
-         masm.orl   (boxReg, 1) ;                      // set ICC.ZF=0 to indicate failure
-         masm.jmp   (DONE_LABEL) ;
-
-         masm.bind  (LSuccess) ;
-         masm.testl (boxReg, 0) ;                      // set ICC.ZF=1 to indicate success
-         masm.jmp   (DONE_LABEL) ;
-       }
-
-       masm.bind  (Stacked) ;
-       masm.movptr(tmpReg, Address (boxReg, 0)) ;      // re-fetch
-       if (os::is_MP()) { masm.lock(); }
-       masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
-
-       if (EmitSync & 65536) {
-          masm.bind (CheckSucc) ;
-       }
-       masm.bind(DONE_LABEL);
-       if (EmitSync & 32768) {
-          masm.nop();                      // avoid branch to branch
-       }
-    }
-  %}
-
-
   enc_class enc_rethrow()
   %{
     cbuf.set_insts_mark();
@@ -2953,7 +2724,7 @@
   c_calling_convention
   %{
     // This is obviously always outgoing
-    (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
+    (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
   %}
 
   // Location of compiled Java return values.  Same as C for now.
@@ -3086,6 +2857,17 @@
   interface(CONST_INTER);
 %}
 
+// Int Immediate non-negative
+operand immU31()
+%{
+  predicate(n->get_int() >= 0);
+  match(ConI);
+
+  op_cost(0);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // Constant for long shifts
 operand immI_32()
 %{
@@ -5042,12 +4824,12 @@
   ins_pipe(ialu_reg_mem);
 %}
 
-// Load Integer with a 32-bit mask into Long Register
-instruct loadI2L_immI(rRegL dst, memory mem, immI mask, rFlagsReg cr) %{
+// Load Integer with a 31-bit mask into Long Register
+instruct loadI2L_immU31(rRegL dst, memory mem, immU31 mask, rFlagsReg cr) %{
   match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
   effect(KILL cr);
 
-  format %{ "movl    $dst, $mem\t# int & 32-bit mask -> long\n\t"
+  format %{ "movl    $dst, $mem\t# int & 31-bit mask -> long\n\t"
             "andl    $dst, $mask" %}
   ins_encode %{
     Register Rdst = $dst$$Register;
@@ -6337,6 +6119,7 @@
 instruct membar_acquire()
 %{
   match(MemBarAcquire);
+  match(LoadFence);
   ins_cost(0);
 
   size(0);
@@ -6359,6 +6142,7 @@
 instruct membar_release()
 %{
   match(MemBarRelease);
+  match(StoreFence);
   ins_cost(0);
 
   size(0);
@@ -6942,82 +6726,6 @@
 //----------Arithmetic Instructions--------------------------------------------
 //----------Addition Instructions----------------------------------------------
 
-instruct addExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
-%{
-  match(AddExactI dst src);
-  effect(DEF cr);
-
-  format %{ "addl    $dst, $src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
-%{
-  match(AddExactI dst src);
-  effect(DEF cr);
-
-  format %{ "addl    $dst, $src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
-%{
-  match(AddExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(125); // XXX
-  format %{ "addl    $dst, $src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$Address);
-  %}
-
-  ins_pipe(ialu_reg_mem);
-%}
-
-instruct addExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
-%{
-  match(AddExactL dst src);
-  effect(DEF cr);
-
-  format %{ "addq    $dst, $src\t# addExact long" %}
-  ins_encode %{
-    __ addq($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr)
-%{
-  match(AddExactL dst src);
-  effect(DEF cr);
-
-  format %{ "addq    $dst, $src\t# addExact long" %}
-  ins_encode %{
-    __ addq($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr)
-%{
-  match(AddExactL dst (LoadL src));
-  effect(DEF cr);
-
-  ins_cost(125); // XXX
-  format %{ "addq    $dst, $src\t# addExact long" %}
-  ins_encode %{
-    __ addq($dst$$Register, $src$$Address);
-  %}
-
-  ins_pipe(ialu_reg_mem);
-%}
-
 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
 %{
   match(Set dst (AddI dst src));
@@ -7630,80 +7338,6 @@
   ins_pipe(ialu_mem_imm);
 %}
 
-instruct subExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
-%{
-  match(SubExactI dst src);
-  effect(DEF cr);
-
-  format %{ "subl    $dst, $src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
-%{
-  match(SubExactI dst src);
-  effect(DEF cr);
-
-  format %{ "subl    $dst, $src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
-%{
-  match(SubExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(125);
-  format %{ "subl    $dst, $src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$Address);
-  %}
-  ins_pipe(ialu_reg_mem);
-%}
-
-instruct subExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
-%{
-  match(SubExactL dst src);
-  effect(DEF cr);
-
-  format %{ "subq    $dst, $src\t# subExact long" %}
-  ins_encode %{
-    __ subq($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr)
-%{
-  match(SubExactL dst (LoadL src));
-  effect(DEF cr);
-
-  format %{ "subq    $dst, $src\t# subExact long" %}
-  ins_encode %{
-    __ subq($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactL_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
-%{
-  match(SubExactI dst src);
-  effect(DEF cr);
-
-  ins_cost(125);
-  format %{ "subq    $dst, $src\t# subExact long" %}
-  ins_encode %{
-    __ subq($dst$$Register, $src$$Address);
-  %}
-  ins_pipe(ialu_reg_mem);
-%}
-
 instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
 %{
   match(Set dst (SubL dst src));
@@ -7820,31 +7454,6 @@
   ins_pipe(ialu_reg);
 %}
 
-instruct negExactI_rReg(rax_RegI dst, rFlagsReg cr)
-%{
-  match(NegExactI dst);
-  effect(KILL cr);
-
-  format %{ "negl    $dst\t# negExact int" %}
-  ins_encode %{
-    __ negl($dst$$Register);
-  %}
-  ins_pipe(ialu_reg);
-%}
-
-instruct negExactL_rReg(rax_RegL dst, rFlagsReg cr)
-%{
-  match(NegExactL dst);
-  effect(KILL cr);
-
-  format %{ "negq    $dst\t# negExact long" %}
-  ins_encode %{
-    __ negq($dst$$Register);
-  %}
-  ins_pipe(ialu_reg);
-%}
-
-
 //----------Multiplication/Division Instructions-------------------------------
 // Integer Multiplication Instructions
 // Multiply Register
@@ -7961,86 +7570,6 @@
   ins_pipe(ialu_reg_reg_alu0);
 %}
 
-
-instruct mulExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
-%{
-  match(MulExactI dst src);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "imull   $dst, $src\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-
-instruct mulExactI_rReg_imm(rax_RegI dst, rRegI src, immI imm, rFlagsReg cr)
-%{
-  match(MulExactI src imm);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "imull   $dst, $src, $imm\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Register, $imm$$constant);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-instruct mulExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
-%{
-  match(MulExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(350);
-  format %{ "imull   $dst, $src\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Address);
-  %}
-  ins_pipe(ialu_reg_mem_alu0);
-%}
-
-instruct mulExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
-%{
-  match(MulExactL dst src);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "imulq   $dst, $src\t# mulExact long" %}
-  ins_encode %{
-    __ imulq($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-instruct mulExactL_rReg_imm(rax_RegL dst, rRegL src, immL32 imm, rFlagsReg cr)
-%{
-  match(MulExactL src imm);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "imulq   $dst, $src, $imm\t# mulExact long" %}
-  ins_encode %{
-    __ imulq($dst$$Register, $src$$Register, $imm$$constant);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-instruct mulExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr)
-%{
-  match(MulExactL dst (LoadL src));
-  effect(DEF cr);
-
-  ins_cost(350);
-  format %{ "imulq   $dst, $src\t# mulExact long" %}
-  ins_encode %{
-    __ imulq($dst$$Register, $src$$Address);
-  %}
-  ins_pipe(ialu_reg_mem_alu0);
-%}
-
 instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
                    rFlagsReg cr)
 %{
@@ -10649,6 +10178,174 @@
   ins_pipe( pipe_slow );
 %}
 
+//----------Overflow Math Instructions-----------------------------------------
+
+instruct overflowAddI_rReg(rFlagsReg cr, rax_RegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowAddI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "addl    $op1, $op2\t# overflow check int" %}
+
+  ins_encode %{
+    __ addl($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowAddI_rReg_imm(rFlagsReg cr, rax_RegI op1, immI op2)
+%{
+  match(Set cr (OverflowAddI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "addl    $op1, $op2\t# overflow check int" %}
+
+  ins_encode %{
+    __ addl($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowAddL_rReg(rFlagsReg cr, rax_RegL op1, rRegL op2)
+%{
+  match(Set cr (OverflowAddL op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "addq    $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ addq($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowAddL_rReg_imm(rFlagsReg cr, rax_RegL op1, immL32 op2)
+%{
+  match(Set cr (OverflowAddL op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "addq    $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ addq($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubI_rReg(rFlagsReg cr, rRegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowSubI op1 op2));
+
+  format %{ "cmpl    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ cmpl($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2)
+%{
+  match(Set cr (OverflowSubI op1 op2));
+
+  format %{ "cmpl    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ cmpl($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubL_rReg(rFlagsReg cr, rRegL op1, rRegL op2)
+%{
+  match(Set cr (OverflowSubL op1 op2));
+
+  format %{ "cmpq    $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ cmpq($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2)
+%{
+  match(Set cr (OverflowSubL op1 op2));
+
+  format %{ "cmpq    $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ cmpq($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowNegI_rReg(rFlagsReg cr, immI0 zero, rax_RegI op2)
+%{
+  match(Set cr (OverflowSubI zero op2));
+  effect(DEF cr, USE_KILL op2);
+
+  format %{ "negl    $op2\t# overflow check int" %}
+  ins_encode %{
+    __ negl($op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowNegL_rReg(rFlagsReg cr, immL0 zero, rax_RegL op2)
+%{
+  match(Set cr (OverflowSubL zero op2));
+  effect(DEF cr, USE_KILL op2);
+
+  format %{ "negq    $op2\t# overflow check long" %}
+  ins_encode %{
+    __ negq($op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowMulI_rReg(rFlagsReg cr, rax_RegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowMulI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "imull    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ imull($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct overflowMulI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2, rRegI tmp)
+%{
+  match(Set cr (OverflowMulI op1 op2));
+  effect(DEF cr, TEMP tmp, USE op1, USE op2);
+
+  format %{ "imull    $tmp, $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ imull($tmp$$Register, $op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct overflowMulL_rReg(rFlagsReg cr, rax_RegL op1, rRegL op2)
+%{
+  match(Set cr (OverflowMulL op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "imulq    $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ imulq($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct overflowMulL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2, rRegL tmp)
+%{
+  match(Set cr (OverflowMulL op1 op2));
+  effect(DEF cr, TEMP tmp, USE op1, USE op2);
+
+  format %{ "imulq    $tmp, $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ imulq($tmp$$Register, $op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
 
 //----------Control Flow Instructions------------------------------------------
 // Signed compare Instructions
@@ -11432,27 +11129,25 @@
 // ============================================================================
 // inlined locking and unlocking
 
-instruct cmpFastLock(rFlagsReg cr,
-                     rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr)
-%{
+instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{
   match(Set cr (FastLock object box));
   effect(TEMP tmp, TEMP scr, USE_KILL box);
-
   ins_cost(300);
   format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %}
-  ins_encode(Fast_Lock(object, box, tmp, scr));
+  ins_encode %{
+    __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
+  %}
   ins_pipe(pipe_slow);
 %}
 
-instruct cmpFastUnlock(rFlagsReg cr,
-                       rRegP object, rax_RegP box, rRegP tmp)
-%{
+instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{
   match(Set cr (FastUnlock object box));
   effect(TEMP tmp, USE_KILL box);
-
   ins_cost(300);
   format %{ "fastunlock $object,$box\t! kills $box,$tmp" %}
-  ins_encode(Fast_Unlock(object, box, tmp));
+  ins_encode %{
+    __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
+  %}
   ins_pipe(pipe_slow);
 %}
 
--- a/src/cpu/zero/vm/assembler_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/assembler_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/bytecodeInterpreter_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/bytecodeInterpreter_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -132,7 +132,7 @@
 #define LOCALS_ADDR(offset)    ((address)locals[-(offset)])
 #define LOCALS_INT(offset)     (*((jint*)&locals[-(offset)]))
 #define LOCALS_FLOAT(offset)   (*((jfloat*)&locals[-(offset)]))
-#define LOCALS_OBJECT(offset)  ((oop)locals[-(offset)])
+#define LOCALS_OBJECT(offset)  (cast_to_oop(locals[-(offset)]))
 #define LOCALS_DOUBLE(offset)  (((VMJavaVal64*)&locals[-((offset) + 1)])->d)
 #define LOCALS_LONG(offset)    (((VMJavaVal64*)&locals[-((offset) + 1)])->l)
 #define LOCALS_LONG_AT(offset) (((address)&locals[-((offset) + 1)]))
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -220,7 +220,7 @@
     }
     InvocationCounter *counter = mcs->invocation_counter();
     counter->increment();
-    if (counter->reached_InvocationLimit()) {
+    if (counter->reached_InvocationLimit(mcs->backedge_counter())) {
       CALL_VM_NOCHECK(
         InterpreterRuntime::frequency_counter_overflow(thread, NULL));
       if (HAS_PENDING_EXCEPTION)
--- a/src/cpu/zero/vm/entryFrame_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/entryFrame_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/frame_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/frame_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/frame_zero.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/frame_zero.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/globalDefinitions_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/globalDefinitions_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -28,4 +28,10 @@
 
 #include <ffi.h>
 
+// Indicates whether the C calling conventions require that
+// 32-bit integer argument values are properly extended to 64 bits.
+// If set, SharedRuntime::c_calling_convention() must adapt
+// signatures accordingly.
+const bool CCallingConventionRequiresIntsAsLongs = false;
+
 #endif // CPU_ZERO_VM_GLOBALDEFINITIONS_ZERO_HPP
--- a/src/cpu/zero/vm/globals_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/globals_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -38,11 +38,13 @@
 define_pd_global(bool,  NeedsDeoptSuspend,    false);
 
 define_pd_global(bool,  ImplicitNullChecks,   true);
+define_pd_global(bool,  TrapBasedNullChecks,  false);
 define_pd_global(bool,  UncommonNullCast,     true);
 
 define_pd_global(intx,  CodeEntryAlignment,   32);
 define_pd_global(intx,  OptoLoopAlignment,    16);
 define_pd_global(intx,  InlineFrequencyCount, 100);
+define_pd_global(intx,  InlineSmallCode,      1000 );
 define_pd_global(intx,  PreInflateSpin,       10);
 
 define_pd_global(intx,  StackYellowPages,     2);
--- a/src/cpu/zero/vm/icBuffer_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/icBuffer_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/interp_masm_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/interp_masm_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/interpreter_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/interpreter_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/jni_zero.h	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/jni_zero.h	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/nativeInst_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/nativeInst_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/register_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/register_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/relocInfo_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/relocInfo_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/sharedRuntime_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -135,6 +135,7 @@
 
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,
+                                         VMRegPair *regs2,
                                          int total_args_passed) {
   ShouldNotCallThis();
   return 0;
--- a/src/cpu/zero/vm/sharkFrame_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/sharkFrame_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/shark_globals_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/shark_globals_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -50,7 +50,6 @@
 
 define_pd_global(intx,     OnStackReplacePercentage,     933  );
 define_pd_global(intx,     FreqInlineSize,               325  );
-define_pd_global(intx,     InlineSmallCode,              1000 );
 define_pd_global(uintx,    NewRatio,                     12   );
 define_pd_global(intx,     NewSizeThreadIncrease,        4*K  );
 define_pd_global(intx,     InitialCodeCacheSize,         160*K);
--- a/src/cpu/zero/vm/stubGenerator_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/stubGenerator_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/vmStructs_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/vmStructs_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/cpu/zero/vm/vtableStubs_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/cpu/zero/vm/vtableStubs_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/attachListener_aix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,574 @@
+/*
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/os.hpp"
+#include "services/attachListener.hpp"
+#include "services/dtraceAttacher.hpp"
+
+#include <unistd.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/stat.h>
+
+#ifndef UNIX_PATH_MAX
+#define UNIX_PATH_MAX   sizeof(((struct sockaddr_un *)0)->sun_path)
+#endif
+
+// The attach mechanism on Linux uses a UNIX domain socket. An attach listener
+// thread is created at startup or is created on-demand via a signal from
+// the client tool. The attach listener creates a socket and binds it to a file
+// in the filesystem. The attach listener then acts as a simple (single-
+// threaded) server - it waits for a client to connect, reads the request,
+// executes it, and returns the response to the client via the socket
+// connection.
+//
+// As the socket is a UNIX domain socket it means that only clients on the
+// local machine can connect. In addition there are two other aspects to
+// the security:
+// 1. The well known file that the socket is bound to has permission 400
+// 2. When a client connect, the SO_PEERID socket option is used to
+//    obtain the credentials of client. We check that the effective uid
+//    of the client matches this process.
+
+// forward reference
+class AixAttachOperation;
+
+class AixAttachListener: AllStatic {
+ private:
+  // the path to which we bind the UNIX domain socket
+  static char _path[UNIX_PATH_MAX];
+  static bool _has_path;
+  // Shutdown marker to prevent accept blocking during clean-up.
+  static bool _shutdown;
+
+  // the file descriptor for the listening socket
+  static int _listener;
+
+  static void set_path(char* path) {
+    if (path == NULL) {
+      _has_path = false;
+    } else {
+      strncpy(_path, path, UNIX_PATH_MAX);
+      _path[UNIX_PATH_MAX-1] = '\0';
+      _has_path = true;
+    }
+  }
+
+  static void set_listener(int s)               { _listener = s; }
+
+  // reads a request from the given connected socket
+  static AixAttachOperation* read_request(int s);
+
+ public:
+  enum {
+    ATTACH_PROTOCOL_VER = 1                     // protocol version
+  };
+  enum {
+    ATTACH_ERROR_BADVERSION     = 101           // error codes
+  };
+
+  // initialize the listener, returns 0 if okay
+  static int init();
+
+  static char* path()                   { return _path; }
+  static bool has_path()                { return _has_path; }
+  static int listener()                 { return _listener; }
+  // Shutdown marker to prevent accept blocking during clean-up
+  static void set_shutdown(bool shutdown) { _shutdown = shutdown; }
+  static bool is_shutdown()     { return _shutdown; }
+
+  // write the given buffer to a socket
+  static int write_fully(int s, char* buf, int len);
+
+  static AixAttachOperation* dequeue();
+};
+
+class AixAttachOperation: public AttachOperation {
+ private:
+  // the connection to the client
+  int _socket;
+
+ public:
+  void complete(jint res, bufferedStream* st);
+
+  void set_socket(int s)                                { _socket = s; }
+  int socket() const                                    { return _socket; }
+
+  AixAttachOperation(char* name) : AttachOperation(name) {
+    set_socket(-1);
+  }
+};
+
+// statics
+char AixAttachListener::_path[UNIX_PATH_MAX];
+bool AixAttachListener::_has_path;
+int AixAttachListener::_listener = -1;
+// Shutdown marker to prevent accept blocking during clean-up
+bool AixAttachListener::_shutdown = false;
+
+// Supporting class to help split a buffer into individual components
+class ArgumentIterator : public StackObj {
+ private:
+  char* _pos;
+  char* _end;
+ public:
+  ArgumentIterator(char* arg_buffer, size_t arg_size) {
+    _pos = arg_buffer;
+    _end = _pos + arg_size - 1;
+  }
+  char* next() {
+    if (*_pos == '\0') {
+      return NULL;
+    }
+    char* res = _pos;
+    char* next_pos = strchr(_pos, '\0');
+    if (next_pos < _end)  {
+      next_pos++;
+    }
+    _pos = next_pos;
+    return res;
+  }
+};
+
+// On AIX if sockets block until all data has been transmitted
+// successfully in some communication domains a socket "close" may
+// never complete. We have to take care that after the socket shutdown
+// the listener never enters accept state.
+
+// atexit hook to stop listener and unlink the file that it is
+// bound too.
+
+// Some modifications to the listener logic to prevent deadlocks on exit.
+// 1. We Shutdown the socket here instead. AixAttachOperation::complete() is not the right place
+//    since more than one agent in a sequence in JPLIS live tests wouldn't work (Listener thread
+//    would be dead after the first operation completion).
+// 2. close(s) may never return if the listener thread is in socket accept(). Unlinking the file
+//    should be sufficient for cleanup.
+extern "C" {
+  static void listener_cleanup() {
+    static int cleanup_done;
+    if (!cleanup_done) {
+      cleanup_done = 1;
+      AixAttachListener::set_shutdown(true);
+      int s = AixAttachListener::listener();
+      if (s != -1) {
+        ::shutdown(s, 2);
+      }
+      if (AixAttachListener::has_path()) {
+        ::unlink(AixAttachListener::path());
+      }
+    }
+  }
+}
+
+// Initialization - create a listener socket and bind it to a file
+
+int AixAttachListener::init() {
+  char path[UNIX_PATH_MAX];          // socket file
+  char initial_path[UNIX_PATH_MAX];  // socket file during setup
+  int listener;                      // listener socket (file descriptor)
+
+  // register function to cleanup
+  ::atexit(listener_cleanup);
+
+  int n = snprintf(path, UNIX_PATH_MAX, "%s/.java_pid%d",
+                   os::get_temp_directory(), os::current_process_id());
+  if (n < (int)UNIX_PATH_MAX) {
+    n = snprintf(initial_path, UNIX_PATH_MAX, "%s.tmp", path);
+  }
+  if (n >= (int)UNIX_PATH_MAX) {
+    return -1;
+  }
+
+  // create the listener socket
+  listener = ::socket(PF_UNIX, SOCK_STREAM, 0);
+  if (listener == -1) {
+    return -1;
+  }
+
+  // bind socket
+  struct sockaddr_un addr;
+  addr.sun_family = AF_UNIX;
+  strcpy(addr.sun_path, initial_path);
+  ::unlink(initial_path);
+  // We must call bind with the actual socketaddr length. This is obligatory for AS400.
+  int res = ::bind(listener, (struct sockaddr*)&addr, SUN_LEN(&addr));
+  if (res == -1) {
+    RESTARTABLE(::close(listener), res);
+    return -1;
+  }
+
+  // put in listen mode, set permissions, and rename into place
+  res = ::listen(listener, 5);
+  if (res == 0) {
+      RESTARTABLE(::chmod(initial_path, (S_IREAD|S_IWRITE) & ~(S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH)), res);
+      if (res == 0) {
+          res = ::rename(initial_path, path);
+      }
+  }
+  if (res == -1) {
+    RESTARTABLE(::close(listener), res);
+    ::unlink(initial_path);
+    return -1;
+  }
+  set_path(path);
+  set_listener(listener);
+  set_shutdown(false);
+
+  return 0;
+}
+
+// Given a socket that is connected to a peer we read the request and
+// create an AttachOperation. As the socket is blocking there is potential
+// for a denial-of-service if the peer does not response. However this happens
+// after the peer credentials have been checked and in the worst case it just
+// means that the attach listener thread is blocked.
+//
+AixAttachOperation* AixAttachListener::read_request(int s) {
+  char ver_str[8];
+  sprintf(ver_str, "%d", ATTACH_PROTOCOL_VER);
+
+  // The request is a sequence of strings so we first figure out the
+  // expected count and the maximum possible length of the request.
+  // The request is:
+  //   <ver>0<cmd>0<arg>0<arg>0<arg>0
+  // where <ver> is the protocol version (1), <cmd> is the command
+  // name ("load", "datadump", ...), and <arg> is an argument
+  int expected_str_count = 2 + AttachOperation::arg_count_max;
+  const int max_len = (sizeof(ver_str) + 1) + (AttachOperation::name_length_max + 1) +
+    AttachOperation::arg_count_max*(AttachOperation::arg_length_max + 1);
+
+  char buf[max_len];
+  int str_count = 0;
+
+  // Read until all (expected) strings have been read, the buffer is
+  // full, or EOF.
+
+  int off = 0;
+  int left = max_len;
+
+  do {
+    int n;
+    // Don't block on interrupts because this will
+    // hang in the clean-up when shutting down.
+    n = read(s, buf+off, left);
+    if (n == -1) {
+      return NULL;      // reset by peer or other error
+    }
+    if (n == 0) {       // end of file reached
+      break;
+    }
+    for (int i=0; i<n; i++) {
+      if (buf[off+i] == 0) {
+        // EOS found
+        str_count++;
+
+        // The first string is <ver> so check it now to
+        // check for protocol mis-match
+        if (str_count == 1) {
+          if ((strlen(buf) != strlen(ver_str)) ||
+              (atoi(buf) != ATTACH_PROTOCOL_VER)) {
+            char msg[32];
+            sprintf(msg, "%d\n", ATTACH_ERROR_BADVERSION);
+            write_fully(s, msg, strlen(msg));
+            return NULL;
+          }
+        }
+      }
+    }
+    off += n;
+    left -= n;
+  } while (left > 0 && str_count < expected_str_count);
+
+  if (str_count != expected_str_count) {
+    return NULL;        // incomplete request
+  }
+
+  // parse request
+
+  ArgumentIterator args(buf, (max_len)-left);
+
+  // version already checked
+  char* v = args.next();
+
+  char* name = args.next();
+  if (name == NULL || strlen(name) > AttachOperation::name_length_max) {
+    return NULL;
+  }
+
+  AixAttachOperation* op = new AixAttachOperation(name);
+
+  for (int i=0; i<AttachOperation::arg_count_max; i++) {
+    char* arg = args.next();
+    if (arg == NULL) {
+      op->set_arg(i, NULL);
+    } else {
+      if (strlen(arg) > AttachOperation::arg_length_max) {
+        delete op;
+        return NULL;
+      }
+      op->set_arg(i, arg);
+    }
+  }
+
+  op->set_socket(s);
+  return op;
+}
+
+
+// Dequeue an operation
+//
+// In the Linux implementation there is only a single operation and clients
+// cannot queue commands (except at the socket level).
+//
+AixAttachOperation* AixAttachListener::dequeue() {
+  for (;;) {
+    int s;
+
+    // wait for client to connect
+    struct sockaddr addr;
+    socklen_t len = sizeof(addr);
+    memset(&addr, 0, len);
+    // We must prevent accept blocking on the socket if it has been shut down.
+    // Therefore we allow interrups and check whether we have been shut down already.
+    if (AixAttachListener::is_shutdown()) {
+      return NULL;
+    }
+    s=::accept(listener(), &addr, &len);
+    if (s == -1) {
+      return NULL;      // log a warning?
+    }
+
+    // Added timeouts for read and write.  If we get no request within the
+    // next AttachListenerTimeout milliseconds we just finish the connection.
+    struct timeval tv;
+    tv.tv_sec = 0;
+    tv.tv_usec = AttachListenerTimeout * 1000;
+    ::setsockopt(s, SOL_SOCKET, SO_RCVTIMEO, (char*)&tv, sizeof(tv));
+    ::setsockopt(s, SOL_SOCKET, SO_SNDTIMEO, (char*)&tv, sizeof(tv));
+
+    // get the credentials of the peer and check the effective uid/guid
+    // - check with jeff on this.
+    struct peercred_struct cred_info;
+    socklen_t optlen = sizeof(cred_info);
+    if (::getsockopt(s, SOL_SOCKET, SO_PEERID, (void*)&cred_info, &optlen) == -1) {
+      int res;
+      RESTARTABLE(::close(s), res);
+      continue;
+    }
+    uid_t euid = geteuid();
+    gid_t egid = getegid();
+
+    if (cred_info.euid != euid || cred_info.egid != egid) {
+      int res;
+      RESTARTABLE(::close(s), res);
+      continue;
+    }
+
+    // peer credential look okay so we read the request
+    AixAttachOperation* op = read_request(s);
+    if (op == NULL) {
+      int res;
+      RESTARTABLE(::close(s), res);
+      continue;
+    } else {
+      return op;
+    }
+  }
+}
+
+// write the given buffer to the socket
+int AixAttachListener::write_fully(int s, char* buf, int len) {
+  do {
+    int n = ::write(s, buf, len);
+    if (n == -1) {
+      if (errno != EINTR) return -1;
+    } else {
+      buf += n;
+      len -= n;
+    }
+  }
+  while (len > 0);
+  return 0;
+}
+
+// Complete an operation by sending the operation result and any result
+// output to the client. At this time the socket is in blocking mode so
+// potentially we can block if there is a lot of data and the client is
+// non-responsive. For most operations this is a non-issue because the
+// default send buffer is sufficient to buffer everything. In the future
+// if there are operations that involves a very big reply then it the
+// socket could be made non-blocking and a timeout could be used.
+
+void AixAttachOperation::complete(jint result, bufferedStream* st) {
+  JavaThread* thread = JavaThread::current();
+  ThreadBlockInVM tbivm(thread);
+
+  thread->set_suspend_equivalent();
+  // cleared by handle_special_suspend_equivalent_condition() or
+  // java_suspend_self() via check_and_wait_while_suspended()
+
+  // write operation result
+  char msg[32];
+  sprintf(msg, "%d\n", result);
+  int rc = AixAttachListener::write_fully(this->socket(), msg, strlen(msg));
+
+  // write any result data
+  if (rc == 0) {
+    // Shutdown the socket in the cleanup function to enable more than
+    // one agent attach in a sequence (see comments to listener_cleanup()).
+    AixAttachListener::write_fully(this->socket(), (char*) st->base(), st->size());
+  }
+
+  // done
+  RESTARTABLE(::close(this->socket()), rc);
+
+  // were we externally suspended while we were waiting?
+  thread->check_and_wait_while_suspended();
+
+  delete this;
+}
+
+
+// AttachListener functions
+
+AttachOperation* AttachListener::dequeue() {
+  JavaThread* thread = JavaThread::current();
+  ThreadBlockInVM tbivm(thread);
+
+  thread->set_suspend_equivalent();
+  // cleared by handle_special_suspend_equivalent_condition() or
+  // java_suspend_self() via check_and_wait_while_suspended()
+
+  AttachOperation* op = AixAttachListener::dequeue();
+
+  // were we externally suspended while we were waiting?
+  thread->check_and_wait_while_suspended();
+
+  return op;
+}
+
+// Performs initialization at vm startup
+// For AIX we remove any stale .java_pid file which could cause
+// an attaching process to think we are ready to receive on the
+// domain socket before we are properly initialized
+
+void AttachListener::vm_start() {
+  char fn[UNIX_PATH_MAX];
+  struct stat64 st;
+  int ret;
+
+  int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d",
+           os::get_temp_directory(), os::current_process_id());
+  assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow");
+
+  RESTARTABLE(::stat64(fn, &st), ret);
+  if (ret == 0) {
+    ret = ::unlink(fn);
+    if (ret == -1) {
+      debug_only(warning("failed to remove stale attach pid file at %s", fn));
+    }
+  }
+}
+
+int AttachListener::pd_init() {
+  JavaThread* thread = JavaThread::current();
+  ThreadBlockInVM tbivm(thread);
+
+  thread->set_suspend_equivalent();
+  // cleared by handle_special_suspend_equivalent_condition() or
+  // java_suspend_self() via check_and_wait_while_suspended()
+
+  int ret_code = AixAttachListener::init();
+
+  // were we externally suspended while we were waiting?
+  thread->check_and_wait_while_suspended();
+
+  return ret_code;
+}
+
+// Attach Listener is started lazily except in the case when
+// +ReduseSignalUsage is used
+bool AttachListener::init_at_startup() {
+  if (ReduceSignalUsage) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// If the file .attach_pid<pid> exists in the working directory
+// or /tmp then this is the trigger to start the attach mechanism
+bool AttachListener::is_init_trigger() {
+  if (init_at_startup() || is_initialized()) {
+    return false;               // initialized at startup or already initialized
+  }
+  char fn[PATH_MAX+1];
+  sprintf(fn, ".attach_pid%d", os::current_process_id());
+  int ret;
+  struct stat64 st;
+  RESTARTABLE(::stat64(fn, &st), ret);
+  if (ret == -1) {
+    snprintf(fn, sizeof(fn), "%s/.attach_pid%d",
+             os::get_temp_directory(), os::current_process_id());
+    RESTARTABLE(::stat64(fn, &st), ret);
+  }
+  if (ret == 0) {
+    // simple check to avoid starting the attach mechanism when
+    // a bogus user creates the file
+    if (st.st_uid == geteuid()) {
+      init();
+      return true;
+    }
+  }
+  return false;
+}
+
+// if VM aborts then remove listener
+void AttachListener::abort() {
+  listener_cleanup();
+}
+
+void AttachListener::pd_data_dump() {
+  os::signal_notify(SIGQUIT);
+}
+
+AttachOperationFunctionInfo* AttachListener::pd_find_operation(const char* n) {
+  return NULL;
+}
+
+jint AttachListener::pd_set_flag(AttachOperation* op, outputStream* out) {
+  out->print_cr("flag '%s' cannot be changed", op->arg(0));
+  return JNI_ERR;
+}
+
+void AttachListener::pd_detachall() {
+  // Cleanup server socket to detach clients.
+  listener_cleanup();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/c2_globals_aix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_AIX_VM_C2_GLOBALS_AIX_HPP
+#define OS_AIX_VM_C2_GLOBALS_AIX_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+//
+// Sets the default values for operating system dependent flags used by the
+// server compiler. (see c2_globals.hpp)
+//
+
+#endif // OS_AIX_VM_C2_GLOBALS_AIX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/decoder_aix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "utilities/decoder.hpp"
+#include "porting_aix.hpp"
+
+// Provide simple AIXDecoder which enables decoding of C frames in VM.
+class AIXDecoder: public AbstractDecoder {
+ public:
+  AIXDecoder() {
+    _decoder_status = no_error;
+  }
+  ~AIXDecoder() {}
+
+  virtual bool can_decode_C_frame_in_vm() const { return true; }
+
+  virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // demangled by getFuncName
+
+  virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
+    return (::getFuncName((codeptr_t)addr, buf, buflen, offset, 0, 0, 0) == 0);
+  }
+  virtual bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
+    ShouldNotReachHere();
+    return false;
+  }
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/globals_aix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_AIX_VM_GLOBALS_AIX_HPP
+#define OS_AIX_VM_GLOBALS_AIX_HPP
+
+//
+// Defines Aix specific flags. They are not available on other platforms.
+//
+#define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
+                                                                                    \
+  /* If UseLargePages == true allow or deny usage of 16M pages. 16M pages are  */   \
+  /* a scarce resource and there may be situations where we do not want the VM */   \
+  /* to run with 16M pages. (Will fall back to 64K pages).                     */   \
+  product_pd(bool, Use16MPages,                                                     \
+          "Use 16M pages if available.")                                            \
+                                                                                    \
+  /*  use optimized addresses for the polling page, */                              \
+  /* e.g. map it to a special 32-bit address.       */                              \
+  product_pd(bool, OptimizePollingPageLocation,                                     \
+          "Optimize the location of the polling page used for Safepoints")          \
+                                                                                    \
+  product_pd(intx, AttachListenerTimeout,                                           \
+          "Timeout in ms the attach listener waits for a request")                  \
+                                                                                    \
+
+// Per default, do not allow 16M pages. 16M pages have to be switched on specifically.
+define_pd_global(bool, Use16MPages, false);
+define_pd_global(bool, OptimizePollingPageLocation, true);
+define_pd_global(intx, AttachListenerTimeout, 1000);
+
+//
+// Defines Aix-specific default values. The flags are available on all
+// platforms, but they may have different default values on other platforms.
+//
+define_pd_global(bool, UseLargePages, true);
+define_pd_global(bool, UseLargePagesIndividualAllocation, false);
+define_pd_global(bool, UseOSErrorReporting, false);
+define_pd_global(bool, UseThreadPriorities, true) ;
+
+#endif // OS_AIX_VM_GLOBALS_AIX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/interfaceSupport_aix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_LINUX_VM_INTERFACESUPPORT_LINUX_HPP
+#define OS_LINUX_VM_INTERFACESUPPORT_LINUX_HPP
+
+// Contains inlined functions for class InterfaceSupport
+
+static inline void serialize_memory(JavaThread *thread) {
+  os::write_memory_serialize_page(thread);
+}
+
+#endif // OS_LINUX_VM_INTERFACESUPPORT_LINUX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/jsig.c	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/* CopyrightVersion 1.2 */
+
+/* This is a special library that should be loaded before libc &
+ * libthread to interpose the signal handler installation functions:
+ * sigaction(), signal(), sigset().
+ * Used for signal-chaining. See RFE 4381843.
+ */
+
+#include <signal.h>
+#include <dlfcn.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#define bool int
+#define true 1
+#define false 0
+
+// Highest so far on AIX 5.2 is SIGSAK (63)
+#define MAXSIGNUM 63
+#define MASK(sig) ((unsigned int)1 << sig)
+
+static struct sigaction sact[MAXSIGNUM]; /* saved signal handlers */
+static unsigned int jvmsigs = 0; /* signals used by jvm */
+
+/* used to synchronize the installation of signal handlers */
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+static pthread_t tid = 0;
+
+typedef void (*sa_handler_t)(int);
+typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
+// signal_t is already defined on AIX
+typedef sa_handler_t (*signal_like_function_t)(int, sa_handler_t);
+typedef int (*sigaction_t)(int, const struct sigaction *, struct sigaction *);
+
+static signal_like_function_t os_signal = 0; /* os's version of signal()/sigset() */
+static sigaction_t os_sigaction = 0; /* os's version of sigaction() */
+
+static bool jvm_signal_installing = false;
+static bool jvm_signal_installed = false;
+
+static void signal_lock() {
+  pthread_mutex_lock(&mutex);
+  /* When the jvm is installing its set of signal handlers, threads
+   * other than the jvm thread should wait */
+  if (jvm_signal_installing) {
+    if (tid != pthread_self()) {
+      pthread_cond_wait(&cond, &mutex);
+    }
+  }
+}
+
+static void signal_unlock() {
+  pthread_mutex_unlock(&mutex);
+}
+
+static sa_handler_t call_os_signal(int sig, sa_handler_t disp,
+                                   bool is_sigset) {
+  if (os_signal == NULL) {
+    if (!is_sigset) {
+      // Aix: call functions directly instead of dlsym'ing them
+      os_signal = signal;
+    } else {
+      // Aix: call functions directly instead of dlsym'ing them
+      os_signal = sigset;
+    }
+    if (os_signal == NULL) {
+      printf("%s\n", dlerror());
+      exit(0);
+    }
+  }
+  return (*os_signal)(sig, disp);
+}
+
+static void save_signal_handler(int sig, sa_handler_t disp) {
+  sigset_t set;
+  sact[sig].sa_handler = disp;
+  sigemptyset(&set);
+  sact[sig].sa_mask = set;
+  sact[sig].sa_flags = 0;
+}
+
+static sa_handler_t set_signal(int sig, sa_handler_t disp, bool is_sigset) {
+  sa_handler_t oldhandler;
+  bool sigused;
+
+  signal_lock();
+
+  sigused = (MASK(sig) & jvmsigs) != 0;
+  if (jvm_signal_installed && sigused) {
+    /* jvm has installed its signal handler for this signal. */
+    /* Save the handler. Don't really install it. */
+    oldhandler = sact[sig].sa_handler;
+    save_signal_handler(sig, disp);
+
+    signal_unlock();
+    return oldhandler;
+  } else if (jvm_signal_installing) {
+    /* jvm is installing its signal handlers. Install the new
+     * handlers and save the old ones. jvm uses sigaction().
+     * Leave the piece here just in case. */
+    oldhandler = call_os_signal(sig, disp, is_sigset);
+    save_signal_handler(sig, oldhandler);
+
+    /* Record the signals used by jvm */
+    jvmsigs |= MASK(sig);
+
+    signal_unlock();
+    return oldhandler;
+  } else {
+    /* jvm has no relation with this signal (yet). Install the
+     * the handler. */
+    oldhandler = call_os_signal(sig, disp, is_sigset);
+
+    signal_unlock();
+    return oldhandler;
+  }
+}
+
+sa_handler_t signal(int sig, sa_handler_t disp) {
+  return set_signal(sig, disp, false);
+}
+
+sa_handler_t sigset(int sig, sa_handler_t disp) {
+  return set_signal(sig, disp, true);
+ }
+
+static int call_os_sigaction(int sig, const struct sigaction  *act,
+                             struct sigaction *oact) {
+  if (os_sigaction == NULL) {
+    // Aix: call functions directly instead of dlsym'ing them
+    os_sigaction = sigaction;
+    if (os_sigaction == NULL) {
+      printf("%s\n", dlerror());
+      exit(0);
+    }
+  }
+  return (*os_sigaction)(sig, act, oact);
+}
+
+int sigaction(int sig, const struct sigaction *act, struct sigaction *oact) {
+  int res;
+  bool sigused;
+  struct sigaction oldAct;
+
+  signal_lock();
+
+  sigused = (MASK(sig) & jvmsigs) != 0;
+  if (jvm_signal_installed && sigused) {
+    /* jvm has installed its signal handler for this signal. */
+    /* Save the handler. Don't really install it. */
+    if (oact != NULL) {
+      *oact = sact[sig];
+    }
+    if (act != NULL) {
+      sact[sig] = *act;
+    }
+
+    signal_unlock();
+    return 0;
+  } else if (jvm_signal_installing) {
+    /* jvm is installing its signal handlers. Install the new
+     * handlers and save the old ones. */
+    res = call_os_sigaction(sig, act, &oldAct);
+    sact[sig] = oldAct;
+    if (oact != NULL) {
+      *oact = oldAct;
+    }
+
+    /* Record the signals used by jvm */
+    jvmsigs |= MASK(sig);
+
+    signal_unlock();
+    return res;
+  } else {
+    /* jvm has no relation with this signal (yet). Install the
+     * the handler. */
+    res = call_os_sigaction(sig, act, oact);
+
+    signal_unlock();
+    return res;
+  }
+}
+
+/* The three functions for the jvm to call into */
+void JVM_begin_signal_setting() {
+  signal_lock();
+  jvm_signal_installing = true;
+  tid = pthread_self();
+  signal_unlock();
+}
+
+void JVM_end_signal_setting() {
+  signal_lock();
+  jvm_signal_installed = true;
+  jvm_signal_installing = false;
+  pthread_cond_broadcast(&cond);
+  signal_unlock();
+}
+
+struct sigaction *JVM_get_signal_action(int sig) {
+  /* Does race condition make sense here? */
+  if ((MASK(sig) & jvmsigs) != 0) {
+    return &sact[sig];
+  }
+  return NULL;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/jvm_aix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "prims/jvm.h"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/osThread.hpp"
+
+#include <signal.h>
+
+
+// sun.misc.Signal ///////////////////////////////////////////////////////////
+// Signal code is mostly copied from classic vm, signals_md.c   1.4 98/08/23
+/*
+ * This function is included primarily as a debugging aid. If Java is
+ * running in a console window, then pressing <CTRL-\\> will cause
+ * the current state of all active threads and monitors to be written
+ * to the console window.
+ */
+
+JVM_ENTRY_NO_ENV(void*, JVM_RegisterSignal(jint sig, void* handler))
+  // Copied from classic vm
+  // signals_md.c       1.4 98/08/23
+  void* newHandler = handler == (void *)2
+                   ? os::user_handler()
+                   : handler;
+  switch (sig) {
+    /* The following are already used by the VM. */
+    case INTERRUPT_SIGNAL:
+    case SIGFPE:
+    case SIGILL:
+    case SIGSEGV:
+
+    /* The following signal is used by the VM to dump thread stacks unless
+       ReduceSignalUsage is set, in which case the user is allowed to set
+       his own _native_ handler for this signal; thus, in either case,
+       we do not allow JVM_RegisterSignal to change the handler. */
+    case BREAK_SIGNAL:
+      return (void *)-1;
+
+    /* The following signals are used for Shutdown Hooks support. However, if
+       ReduceSignalUsage (-Xrs) is set, Shutdown Hooks must be invoked via
+       System.exit(), Java is not allowed to use these signals, and the the
+       user is allowed to set his own _native_ handler for these signals and
+       invoke System.exit() as needed. Terminator.setup() is avoiding
+       registration of these signals when -Xrs is present.
+       - If the HUP signal is ignored (from the nohup) command, then Java
+         is not allowed to use this signal.
+     */
+
+    case SHUTDOWN1_SIGNAL:
+    case SHUTDOWN2_SIGNAL:
+    case SHUTDOWN3_SIGNAL:
+      if (ReduceSignalUsage) return (void*)-1;
+      if (os::Aix::is_sig_ignored(sig)) return (void*)1;
+  }
+
+  void* oldHandler = os::signal(sig, newHandler);
+  if (oldHandler == os::user_handler()) {
+      return (void *)2;
+  } else {
+      return oldHandler;
+  }
+JVM_END
+
+
+JVM_ENTRY_NO_ENV(jboolean, JVM_RaiseSignal(jint sig))
+  if (ReduceSignalUsage) {
+    // do not allow SHUTDOWN1_SIGNAL,SHUTDOWN2_SIGNAL,SHUTDOWN3_SIGNAL,
+    // BREAK_SIGNAL to be raised when ReduceSignalUsage is set, since
+    // no handler for them is actually registered in JVM or via
+    // JVM_RegisterSignal.
+    if (sig == SHUTDOWN1_SIGNAL || sig == SHUTDOWN2_SIGNAL ||
+        sig == SHUTDOWN3_SIGNAL || sig == BREAK_SIGNAL) {
+      return JNI_FALSE;
+    }
+  }
+  else if ((sig == SHUTDOWN1_SIGNAL || sig == SHUTDOWN2_SIGNAL ||
+            sig == SHUTDOWN3_SIGNAL) && os::Aix::is_sig_ignored(sig)) {
+    // do not allow SHUTDOWN1_SIGNAL to be raised when SHUTDOWN1_SIGNAL
+    // is ignored, since no handler for them is actually registered in JVM
+    // or via JVM_RegisterSignal.
+    // This also applies for SHUTDOWN2_SIGNAL and SHUTDOWN3_SIGNAL
+    return JNI_FALSE;
+  }
+
+  os::signal_raise(sig);
+  return JNI_TRUE;
+JVM_END
+
+/*
+  All the defined signal names for Linux.
+
+  NOTE that not all of these names are accepted by our Java implementation
+
+  Via an existing claim by the VM, sigaction restrictions, or
+  the "rules of Unix" some of these names will be rejected at runtime.
+  For example the VM sets up to handle USR1, sigaction returns EINVAL for
+  STOP, and Linux simply doesn't allow catching of KILL.
+
+  Here are the names currently accepted by a user of sun.misc.Signal with
+  1.4.1 (ignoring potential interaction with use of chaining, etc):
+
+    HUP, INT, TRAP, ABRT, IOT, BUS, USR2, PIPE, ALRM, TERM, STKFLT,
+    CLD, CHLD, CONT, TSTP, TTIN, TTOU, URG, XCPU, XFSZ, VTALRM, PROF,
+    WINCH, POLL, IO, PWR, SYS
+
+*/
+
+struct siglabel {
+  const char *name;
+  int   number;
+};
+
+struct siglabel siglabels[] = {
+  /* derived from /usr/include/bits/signum.h on RH7.2 */
+   "HUP",       SIGHUP,         /* Hangup (POSIX).  */
+  "INT",        SIGINT,         /* Interrupt (ANSI).  */
+  "QUIT",       SIGQUIT,        /* Quit (POSIX).  */
+  "ILL",        SIGILL,         /* Illegal instruction (ANSI).  */
+  "TRAP",       SIGTRAP,        /* Trace trap (POSIX).  */
+  "ABRT",       SIGABRT,        /* Abort (ANSI).  */
+  "IOT",        SIGIOT,         /* IOT trap (4.2 BSD).  */
+  "BUS",        SIGBUS,         /* BUS error (4.2 BSD).  */
+  "FPE",        SIGFPE,         /* Floating-point exception (ANSI).  */
+  "KILL",       SIGKILL,        /* Kill, unblockable (POSIX).  */
+  "USR1",       SIGUSR1,        /* User-defined signal 1 (POSIX).  */
+  "SEGV",       SIGSEGV,        /* Segmentation violation (ANSI).  */
+  "USR2",       SIGUSR2,        /* User-defined signal 2 (POSIX).  */
+  "PIPE",       SIGPIPE,        /* Broken pipe (POSIX).  */
+  "ALRM",       SIGALRM,        /* Alarm clock (POSIX).  */
+  "TERM",       SIGTERM,        /* Termination (ANSI).  */
+#ifdef SIGSTKFLT
+  "STKFLT",     SIGSTKFLT,      /* Stack fault.  */
+#endif
+  "CLD",        SIGCLD,         /* Same as SIGCHLD (System V).  */
+  "CHLD",       SIGCHLD,        /* Child status has changed (POSIX).  */
+  "CONT",       SIGCONT,        /* Continue (POSIX).  */
+  "STOP",       SIGSTOP,        /* Stop, unblockable (POSIX).  */
+  "TSTP",       SIGTSTP,        /* Keyboard stop (POSIX).  */
+  "TTIN",       SIGTTIN,        /* Background read from tty (POSIX).  */
+  "TTOU",       SIGTTOU,        /* Background write to tty (POSIX).  */
+  "URG",        SIGURG,         /* Urgent condition on socket (4.2 BSD).  */
+  "XCPU",       SIGXCPU,        /* CPU limit exceeded (4.2 BSD).  */
+  "XFSZ",       SIGXFSZ,        /* File size limit exceeded (4.2 BSD).  */
+  "DANGER",     SIGDANGER,      /* System crash imminent; free up some page space (AIX). */
+  "VTALRM",     SIGVTALRM,      /* Virtual alarm clock (4.2 BSD).  */
+  "PROF",       SIGPROF,        /* Profiling alarm clock (4.2 BSD).  */
+  "WINCH",      SIGWINCH,       /* Window size change (4.3 BSD, Sun).  */
+  "POLL",       SIGPOLL,        /* Pollable event occurred (System V).  */
+  "IO",         SIGIO,          /* I/O now possible (4.2 BSD).  */
+  "PWR",        SIGPWR,         /* Power failure restart (System V).  */
+#ifdef SIGSYS
+  "SYS",        SIGSYS          /* Bad system call. Only on some Linuxen! */
+#endif
+  };
+
+JVM_ENTRY_NO_ENV(jint, JVM_FindSignal(const char *name))
+
+  /* find and return the named signal's number */
+
+  for(uint i=0; i<ARRAY_SIZE(siglabels); i++)
+    if(!strcmp(name, siglabels[i].name))
+      return siglabels[i].number;
+
+  return -1;
+
+JVM_END
+
+// used by os::exception_name()
+extern bool signal_name(int signo, char* buf, size_t len) {
+  for(uint i = 0; i < ARRAY_SIZE(siglabels); i++) {
+    if (signo == siglabels[i].number) {
+      jio_snprintf(buf, len, "SIG%s", siglabels[i].name);
+      return true;
+    }
+  }
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/jvm_aix.h	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_AIX_VM_JVM_AIX_H
+#define OS_AIX_VM_JVM_AIX_H
+
+// HotSpot integration note:
+//
+// This is derived from the JDK classic file:
+// "$JDK/src/solaris/javavm/export/jvm_md.h":15 (ver. 1.10 98/04/22)
+// All local includes have been commented out.
+
+#ifndef JVM_MD_H
+#define JVM_MD_H
+
+/*
+ * This file is currently collecting system-specific dregs for the
+ * JNI conversion, which should be sorted out later.
+ */
+
+// Since we are compiling with c++, we need the following to make c macros
+// visible.
+#if !defined(__STDC_LIMIT_MACROS)
+#  define __STDC_LIMIT_MACROS           1
+#endif
+#if !defined(__STDC_CONSTANT_MACROS)
+#  define __STDC_CONSTANT_MACROS        1
+#endif
+#if !defined(__STDC_FORMAT_MACROS)
+#  define __STDC_FORMAT_MACROS          1
+#endif
+
+#include <dirent.h>             /* For DIR */
+
+// Must redefine NULL because the macro gets redefined to int 0
+// by dirent.h. This redefinition is included later then the standard definition in
+// globalDefinitions_<compiler>.hpp and leads to assertions in the VM initialization.
+// We definitely need NULL to have the same lengh as an address pointer.
+#ifdef _LP64
+#undef NULL
+#define NULL 0L
+#else
+#ifndef NULL
+#define NULL 0
+#endif
+#endif
+
+#include <sys/param.h>          /* For MAXPATHLEN */
+#include <sys/socket.h>         /* For socklen_t */
+#include <unistd.h>             /* For F_OK, R_OK, W_OK */
+
+#define JNI_ONLOAD_SYMBOLS      {"JNI_OnLoad"}
+#define JNI_ONUNLOAD_SYMBOLS    {"JNI_OnUnload"}
+#define JVM_ONLOAD_SYMBOLS      {"JVM_OnLoad"}
+#define AGENT_ONLOAD_SYMBOLS    {"Agent_OnLoad"}
+#define AGENT_ONUNLOAD_SYMBOLS  {"Agent_OnUnload"}
+#define AGENT_ONATTACH_SYMBOLS  {"Agent_OnAttach"}
+
+#define JNI_LIB_PREFIX "lib"
+#define JNI_LIB_SUFFIX ".so"
+
+// Hack: MAXPATHLEN is 4095 on some Linux and 4096 on others. This may
+//       cause problems if JVM and the rest of JDK are built on different
+//       Linux releases. Here we define JVM_MAXPATHLEN to be MAXPATHLEN + 1,
+//       so buffers declared in VM are always >= 4096.
+#define JVM_MAXPATHLEN MAXPATHLEN + 1
+
+#define JVM_R_OK    R_OK
+#define JVM_W_OK    W_OK
+#define JVM_X_OK    X_OK
+#define JVM_F_OK    F_OK
+
+/*
+ * File I/O
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+
+/* O Flags */
+
+#define JVM_O_RDONLY     O_RDONLY
+#define JVM_O_WRONLY     O_WRONLY
+#define JVM_O_RDWR       O_RDWR
+#define JVM_O_O_APPEND   O_APPEND
+#define JVM_O_EXCL       O_EXCL
+#define JVM_O_CREAT      O_CREAT
+
+/* Signal definitions */
+
+#define BREAK_SIGNAL     SIGQUIT           /* Thread dumping support.    */
+#define INTERRUPT_SIGNAL SIGUSR1           /* Interruptible I/O support. */
+#define SHUTDOWN1_SIGNAL SIGHUP            /* Shutdown Hooks support.    */
+#define SHUTDOWN2_SIGNAL SIGINT
+#define SHUTDOWN3_SIGNAL SIGTERM
+
+#endif /* JVM_MD_H */
+
+#endif // OS_AIX_VM_JVM_AIX_H
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/libperfstat_aix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "runtime/arguments.hpp"
+#include "libperfstat_aix.hpp"
+
+// For dlopen and friends
+#include <fcntl.h>
+
+// handle to the libperfstat
+static void* g_libhandle = NULL;
+
+// whether initialization worked
+static bool g_initialized = false;
+
+
+typedef int (*fun_perfstat_cpu_total_t) (perfstat_id_t *name, perfstat_cpu_total_t* userbuff,
+                                         int sizeof_userbuff, int desired_number);
+
+typedef int (*fun_perfstat_memory_total_t) (perfstat_id_t *name, perfstat_memory_total_t* userbuff,
+                                            int sizeof_userbuff, int desired_number);
+
+typedef void (*fun_perfstat_reset_t) ();
+
+static fun_perfstat_cpu_total_t     g_fun_perfstat_cpu_total = NULL;
+static fun_perfstat_memory_total_t  g_fun_perfstat_memory_total = NULL;
+static fun_perfstat_reset_t         g_fun_perfstat_reset = NULL;
+
+bool libperfstat::init() {
+
+  if (g_initialized) {
+    return true;
+  }
+
+  g_initialized = false;
+
+  // dynamically load the libperfstat porting library.
+  g_libhandle = dlopen("/usr/lib/libperfstat.a(shr_64.o)", RTLD_MEMBER | RTLD_NOW);
+  if (!g_libhandle) {
+    if (Verbose) {
+      fprintf(stderr, "Cannot load libperfstat.a (dlerror: %s)", dlerror());
+    }
+    return false;
+  }
+
+  // resolve function pointers
+
+#define RESOLVE_FUN_NO_ERROR(name) \
+  g_fun_##name = (fun_##name##_t) dlsym(g_libhandle, #name);
+
+#define RESOLVE_FUN(name) \
+  RESOLVE_FUN_NO_ERROR(name) \
+  if (!g_fun_##name) { \
+    if (Verbose) { \
+      fprintf(stderr, "Cannot resolve " #name "() from libperfstat.a\n" \
+                      "   (dlerror: %s)", dlerror()); \
+      } \
+    return false; \
+  }
+
+  RESOLVE_FUN(perfstat_cpu_total);
+  RESOLVE_FUN(perfstat_memory_total);
+  RESOLVE_FUN(perfstat_reset);
+
+  g_initialized = true;
+
+  return true;
+}
+
+void libperfstat::cleanup() {
+
+  g_initialized = false;
+
+  if (g_libhandle) {
+    dlclose(g_libhandle);
+    g_libhandle = NULL;
+  }
+
+  g_fun_perfstat_cpu_total = NULL;
+  g_fun_perfstat_memory_total = NULL;
+  g_fun_perfstat_reset = NULL;
+}
+
+int libperfstat::perfstat_memory_total(perfstat_id_t *name,
+                                       perfstat_memory_total_t* userbuff,
+                                       int sizeof_userbuff, int desired_number) {
+  assert(g_initialized, "libperfstat not initialized");
+  assert(g_fun_perfstat_memory_total, "");
+  return g_fun_perfstat_memory_total(name, userbuff, sizeof_userbuff, desired_number);
+}
+
+int libperfstat::perfstat_cpu_total(perfstat_id_t *name, perfstat_cpu_total_t* userbuff,
+                                    int sizeof_userbuff, int desired_number) {
+  assert(g_initialized, "libperfstat not initialized");
+  assert(g_fun_perfstat_cpu_total, "");
+  return g_fun_perfstat_cpu_total(name, userbuff, sizeof_userbuff, desired_number);
+}
+
+void libperfstat::perfstat_reset() {
+  assert(g_initialized, "libperfstat not initialized");
+  assert(g_fun_perfstat_reset, "");
+  g_fun_perfstat_reset();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/libperfstat_aix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// encapsulates the libperfstat library.
+//
+// The purpose of this code is to dynamically load the libperfstat library
+// instead of statically linking against it. The libperfstat library is an
+// AIX-specific library which only exists on AIX, not on PASE. If I want to
+// share binaries between AIX and PASE, I cannot directly link against libperfstat.so.
+
+#ifndef OS_AIX_VM_LIBPERFSTAT_AIX_HPP
+#define OS_AIX_VM_LIBPERFSTAT_AIX_HPP
+
+#include <libperfstat.h>
+
+class libperfstat {
+
+public:
+
+  // Load the libperfstat library (must be in LIBPATH).
+  // Returns true if succeeded, false if error.
+  static bool init();
+
+  // cleanup of the libo4 porting library.
+  static void cleanup();
+
+  // direct wrappers for the libperfstat functionality. All they do is
+  // to call the functions with the same name via function pointers.
+  static int perfstat_cpu_total(perfstat_id_t *name, perfstat_cpu_total_t* userbuff,
+                                int sizeof_userbuff, int desired_number);
+
+  static int perfstat_memory_total(perfstat_id_t *name, perfstat_memory_total_t* userbuff,
+                                   int sizeof_userbuff, int desired_number);
+
+  static void perfstat_reset();
+};
+
+#endif // OS_AIX_VM_LIBPERFSTAT_AIX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/loadlib_aix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+// Implementation of LoadedLibraries and friends
+
+// Ultimately this just uses loadquery()
+// See:
+// http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp
+//      ?topic=/com.ibm.aix.basetechref/doc/basetrf1/loadquery.htm
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+// 'allocation.inline.hpp' triggers the inclusion of 'inttypes.h' which defines macros
+// required by the definitions in 'globalDefinitions.hpp'. But these macros in 'inttypes.h'
+// are only defined if '__STDC_FORMAT_MACROS' is defined!
+#include "memory/allocation.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/threadCritical.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/ostream.hpp"
+#include "loadlib_aix.hpp"
+#include "porting_aix.hpp"
+
+// For loadquery()
+#include <sys/ldr.h>
+
+///////////////////////////////////////////////////////////////////////////////
+// Implementation for LoadedLibraryModule
+
+// output debug info
+void LoadedLibraryModule::print(outputStream* os) const {
+  os->print("%15.15s: text: " INTPTR_FORMAT " - " INTPTR_FORMAT
+               ", data: " INTPTR_FORMAT " - " INTPTR_FORMAT " ",
+      shortname, text_from, text_to, data_from, data_to);
+  os->print(" %s", fullpath);
+  if (strlen(membername) > 0) {
+    os->print("(%s)", membername);
+  }
+  os->cr();
+}
+
+
+///////////////////////////////////////////////////////////////////////////////
+// Implementation for LoadedLibraries
+
+// class variables
+LoadedLibraryModule LoadedLibraries::tab[MAX_MODULES];
+int LoadedLibraries::num_loaded = 0;
+
+// Checks whether the address p points to any of the loaded code segments.
+// If it does, returns the LoadedLibraryModule entry. If not, returns NULL.
+// static
+const LoadedLibraryModule* LoadedLibraries::find_for_text_address(const unsigned char* p) {
+
+  if (num_loaded == 0) {
+    reload();
+  }
+  for (int i = 0; i < num_loaded; i++) {
+    if (tab[i].is_in_text(p)) {
+      return &tab[i];
+    }
+  }
+  return NULL;
+}
+
+// Checks whether the address p points to any of the loaded data segments.
+// If it does, returns the LoadedLibraryModule entry. If not, returns NULL.
+// static
+const LoadedLibraryModule* LoadedLibraries::find_for_data_address(const unsigned char* p) {
+  if (num_loaded == 0) {
+    reload();
+  }
+  for (int i = 0; i < num_loaded; i++) {
+    if (tab[i].is_in_data(p)) {
+      return &tab[i];
+    }
+  }
+  return NULL;
+}
+
+// Rebuild the internal table of LoadedLibraryModule objects
+// static
+void LoadedLibraries::reload() {
+
+  ThreadCritical cs;
+
+  // discard old content
+  num_loaded = 0;
+
+  // Call loadquery(L_GETINFO..) to get a list of all loaded Dlls from AIX.
+  size_t buf_size = 4096;
+  char* loadquery_buf = AllocateHeap(buf_size, mtInternal);
+
+  while(loadquery(L_GETINFO, loadquery_buf, buf_size) == -1) {
+    if (errno == ENOMEM) {
+      buf_size *= 2;
+      loadquery_buf = ReallocateHeap(loadquery_buf, buf_size, mtInternal);
+    } else {
+      FreeHeap(loadquery_buf);
+      // Ensure that the uintptr_t pointer is valid
+      assert(errno != EFAULT, "loadquery: Invalid uintptr_t in info buffer.");
+      fprintf(stderr, "loadquery failed (%d %s)", errno, strerror(errno));
+      return;
+    }
+  }
+
+  // Iterate over the loadquery result. For details see sys/ldr.h on AIX.
+  const struct ld_info* p = (struct ld_info*) loadquery_buf;
+
+  // Ensure we have all loaded libs.
+  bool all_loaded = false;
+  while(num_loaded < MAX_MODULES) {
+    LoadedLibraryModule& mod = tab[num_loaded];
+    mod.text_from = (const unsigned char*) p->ldinfo_textorg;
+    mod.text_to   = (const unsigned char*) (((char*)p->ldinfo_textorg) + p->ldinfo_textsize);
+    mod.data_from = (const unsigned char*) p->ldinfo_dataorg;
+    mod.data_to   = (const unsigned char*) (((char*)p->ldinfo_dataorg) + p->ldinfo_datasize);
+    sprintf(mod.fullpath, "%.*s", sizeof(mod.fullpath), p->ldinfo_filename);
+    // do we have a member name as well (see ldr.h)?
+    const char* p_mbr_name = p->ldinfo_filename + strlen(p->ldinfo_filename) + 1;
+    if (*p_mbr_name) {
+      sprintf(mod.membername, "%.*s", sizeof(mod.membername), p_mbr_name);
+    } else {
+      mod.membername[0] = '\0';
+    }
+
+    // fill in the short name
+    const char* p_slash = strrchr(mod.fullpath, '/');
+    if (p_slash) {
+      sprintf(mod.shortname, "%.*s", sizeof(mod.shortname), p_slash + 1);
+    } else {
+      sprintf(mod.shortname, "%.*s", sizeof(mod.shortname), mod.fullpath);
+    }
+    num_loaded ++;
+
+    // next entry...
+    if (p->ldinfo_next) {
+      p = (struct ld_info*)(((char*)p) + p->ldinfo_next);
+    } else {
+      all_loaded = true;
+      break;
+    }
+  }
+
+  FreeHeap(loadquery_buf);
+
+  // Ensure we have all loaded libs
+  assert(all_loaded, "loadquery returned more entries then expected. Please increase MAX_MODULES");
+
+} // end LoadedLibraries::reload()
+
+
+// output loaded libraries table
+//static
+void LoadedLibraries::print(outputStream* os) {
+
+  for (int i = 0; i < num_loaded; i++) {
+    tab[i].print(os);
+  }
+
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/loadlib_aix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+// Loadlib_aix.cpp contains support code for analysing the memory
+// layout of loaded binaries in ones own process space.
+//
+// It is needed, among other things, to provide a  dladdr() emulation, because
+// that one is not provided by AIX
+
+#ifndef OS_AIX_VM_LOADLIB_AIX_HPP
+#define OS_AIX_VM_LOADLIB_AIX_HPP
+
+class outputStream;
+
+// This class holds information about a single loaded library module.
+// Note that on AIX, a single library can be spread over multiple
+// uintptr_t range on a module base, eg.
+// libC.a(shr3_64.o) or libC.a(shrcore_64.o).
+class LoadedLibraryModule {
+
+    friend class LoadedLibraries;
+
+    char fullpath[512];  // eg /usr/lib/libC.a
+    char shortname[30];  // eg libC.a
+    char membername[30]; // eg shrcore_64.o
+    const unsigned char* text_from;
+    const unsigned char* text_to;
+    const unsigned char* data_from;
+    const unsigned char* data_to;
+
+  public:
+
+    const char* get_fullpath() const {
+      return fullpath;
+    }
+    const char* get_shortname() const {
+      return shortname;
+    }
+    const char* get_membername() const {
+      return membername;
+    }
+
+    // text_from, text_to: returns the range of the text (code)
+    // segment for that module
+    const unsigned char* get_text_from() const {
+      return text_from;
+    }
+    const unsigned char* get_text_to() const {
+      return text_to;
+    }
+
+    // data_from/data_to: returns the range of the data
+    // segment for that module
+    const unsigned char* get_data_from() const {
+      return data_from;
+    }
+    const unsigned char* get_data_to() const {
+      return data_to;
+    }
+
+    // returns true if the
+    bool is_in_text(const unsigned char* p) const {
+      return p >= text_from && p < text_to ? true : false;
+    }
+
+    bool is_in_data(const unsigned char* p) const {
+      return p >= data_from && p < data_to ? true : false;
+    }
+
+    // output debug info
+    void print(outputStream* os) const;
+
+}; // end LoadedLibraryModule
+
+// This class is a singleton holding a map of all loaded binaries
+// in the AIX process space.
+class LoadedLibraries
+// : AllStatic (including allocation.hpp just for AllStatic is overkill.)
+{
+
+  private:
+
+    enum {MAX_MODULES = 100};
+    static LoadedLibraryModule tab[MAX_MODULES];
+    static int num_loaded;
+
+  public:
+
+    // rebuild the internal table of LoadedLibraryModule objects
+    static void reload();
+
+    // checks whether the address p points to any of the loaded code segments.
+    // If it does, returns the LoadedLibraryModule entry. If not, returns NULL.
+    static const LoadedLibraryModule* find_for_text_address(const  unsigned char* p);
+
+    // checks whether the address p points to any of the loaded data segments.
+    // If it does, returns the LoadedLibraryModule entry. If not, returns NULL.
+    static const LoadedLibraryModule* find_for_data_address(const  unsigned char* p);
+
+    // output debug info
+    static void print(outputStream* os);
+
+}; // end LoadedLibraries
+
+
+#endif // OS_AIX_VM_LOADLIB_AIX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/mutex_aix.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_AIX_VM_MUTEX_AIX_INLINE_HPP
+#define OS_AIX_VM_MUTEX_AIX_INLINE_HPP
+
+#include "os_aix.inline.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "thread_aix.inline.hpp"
+
+#endif // OS_AIX_VM_MUTEX_AIX_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/osThread_aix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// no precompiled headers
+#include "runtime/atomic.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
+#include "runtime/osThread.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/vmThread.hpp"
+#ifdef TARGET_ARCH_ppc
+# include "assembler_ppc.inline.hpp"
+#endif
+
+
+void OSThread::pd_initialize() {
+  assert(this != NULL, "check");
+  _thread_id        = 0;
+  _pthread_id       = 0;
+  _siginfo = NULL;
+  _ucontext = NULL;
+  _expanding_stack = 0;
+  _alt_sig_stack = NULL;
+
+  _last_cpu_times.sys = _last_cpu_times.user = 0L;
+
+  sigemptyset(&_caller_sigmask);
+
+  _startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
+  assert(_startThread_lock !=NULL, "check");
+}
+
+void OSThread::pd_destroy() {
+  delete _startThread_lock;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/osThread_aix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_AIX_VM_OSTHREAD_AIX_HPP
+#define OS_AIX_VM_OSTHREAD_AIX_HPP
+
+ public:
+  typedef pid_t thread_id_t;
+
+ private:
+  int _thread_type;
+
+ public:
+
+  int thread_type() const {
+    return _thread_type;
+  }
+  void set_thread_type(int type) {
+    _thread_type = type;
+  }
+
+ private:
+
+  // _pthread_id is the pthread id, which is used by library calls
+  // (e.g. pthread_kill).
+  pthread_t _pthread_id;
+
+  sigset_t _caller_sigmask; // Caller's signal mask
+
+ public:
+
+  // Methods to save/restore caller's signal mask
+  sigset_t  caller_sigmask() const       { return _caller_sigmask; }
+  void    set_caller_sigmask(sigset_t sigmask)  { _caller_sigmask = sigmask; }
+
+#ifndef PRODUCT
+  // Used for debugging, return a unique integer for each thread.
+  int thread_identifier() const   { return _thread_id; }
+#endif
+#ifdef ASSERT
+  // We expect no reposition failures so kill vm if we get one.
+  //
+  bool valid_reposition_failure() {
+    return false;
+  }
+#endif // ASSERT
+  pthread_t pthread_id() const {
+    return _pthread_id;
+  }
+  void set_pthread_id(pthread_t tid) {
+    _pthread_id = tid;
+  }
+
+  // ***************************************************************
+  // suspension support.
+  // ***************************************************************
+
+ public:
+  // flags that support signal based suspend/resume on Linux are in a
+  // separate class to avoid confusion with many flags in OSThread that
+  // are used by VM level suspend/resume.
+  os::SuspendResume sr;
+
+  // _ucontext and _siginfo are used by SR_handler() to save thread context,
+  // and they will later be used to walk the stack or reposition thread PC.
+  // If the thread is not suspended in SR_handler() (e.g. self suspend),
+  // the value in _ucontext is meaningless, so we must use the last Java
+  // frame information as the frame. This will mean that for threads
+  // that are parked on a mutex the profiler (and safepoint mechanism)
+  // will see the thread as if it were still in the Java frame. This
+  // not a problem for the profiler since the Java frame is a close
+  // enough result. For the safepoint mechanism when the give it the
+  // Java frame we are not at a point where the safepoint needs the
+  // frame to that accurate (like for a compiled safepoint) since we
+  // should be in a place where we are native and will block ourselves
+  // if we transition.
+ private:
+  void* _siginfo;
+  ucontext_t* _ucontext;
+  int _expanding_stack;                 // non zero if manually expanding stack
+  address _alt_sig_stack;               // address of base of alternate signal stack
+
+ public:
+  void* siginfo() const                   { return _siginfo;  }
+  void set_siginfo(void* ptr)             { _siginfo = ptr;   }
+  ucontext_t* ucontext() const            { return _ucontext; }
+  void set_ucontext(ucontext_t* ptr)      { _ucontext = ptr;  }
+  void set_expanding_stack(void)          { _expanding_stack = 1;  }
+  void clear_expanding_stack(void)        { _expanding_stack = 0;  }
+  int  expanding_stack(void)              { return _expanding_stack;  }
+
+  void set_alt_sig_stack(address val)     { _alt_sig_stack = val; }
+  address alt_sig_stack(void)             { return _alt_sig_stack; }
+
+ private:
+  Monitor* _startThread_lock;     // sync parent and child in thread creation
+
+ public:
+
+  Monitor* startThread_lock() const {
+    return _startThread_lock;
+  }
+
+  // ***************************************************************
+  // Platform dependent initialization and cleanup
+  // ***************************************************************
+
+ private:
+
+  void pd_initialize();
+  void pd_destroy();
+
+ public:
+
+  // The last measured values of cpu timing to prevent the "stale
+  // value return" bug in thread_cpu_time.
+  volatile struct {
+    jlong sys;
+    jlong user;
+  } _last_cpu_times;
+
+#endif // OS_AIX_VM_OSTHREAD_AIX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/os_aix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,5278 @@
+/*
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// According to the AIX OS doc #pragma alloca must be used
+// with C++ compiler before referencing the function alloca()
+#pragma alloca
+
+// no precompiled headers
+#include "classfile/classLoader.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "code/icBuffer.hpp"
+#include "code/vtableStubs.hpp"
+#include "compiler/compileBroker.hpp"
+#include "interpreter/interpreter.hpp"
+#include "jvm_aix.h"
+#include "libperfstat_aix.hpp"
+#include "loadlib_aix.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/filemap.hpp"
+#include "mutex_aix.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "os_share_aix.hpp"
+#include "porting_aix.hpp"
+#include "prims/jniFastGetField.hpp"
+#include "prims/jvm.h"
+#include "prims/jvm_misc.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/extendedPC.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/java.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/objectMonitor.hpp"
+#include "runtime/osThread.hpp"
+#include "runtime/perfMemory.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/statSampler.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/threadCritical.hpp"
+#include "runtime/timer.hpp"
+#include "services/attachListener.hpp"
+#include "services/runtimeService.hpp"
+#include "thread_aix.inline.hpp"
+#include "utilities/decoder.hpp"
+#include "utilities/defaultStream.hpp"
+#include "utilities/events.hpp"
+#include "utilities/growableArray.hpp"
+#include "utilities/vmError.hpp"
+#ifdef TARGET_ARCH_ppc
+# include "assembler_ppc.inline.hpp"
+# include "nativeInst_ppc.hpp"
+#endif
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/runtime.hpp"
+#endif
+
+// put OS-includes here (sorted alphabetically)
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <poll.h>
+#include <procinfo.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/ipc.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/select.h>
+#include <sys/shm.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/sysinfo.h>
+#include <sys/systemcfg.h>
+#include <sys/time.h>
+#include <sys/times.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <sys/vminfo.h>
+#include <sys/wait.h>
+
+// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
+#if !defined(_AIXVERSION_610)
+extern "C" {
+  int getthrds64(pid_t ProcessIdentifier,
+                 struct thrdentry64* ThreadBuffer,
+                 int ThreadSize,
+                 tid64_t* IndexPointer,
+                 int Count);
+}
+#endif
+
+// Excerpts from systemcfg.h definitions newer than AIX 5.3
+#ifndef PV_7
+# define PV_7 0x200000          // Power PC 7
+# define PV_7_Compat 0x208000   // Power PC 7
+#endif
+
+#define MAX_PATH (2 * K)
+
+// for timer info max values which include all bits
+#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
+// for multipage initialization error analysis (in 'g_multipage_error')
+#define ERROR_MP_OS_TOO_OLD                          100
+#define ERROR_MP_EXTSHM_ACTIVE                       101
+#define ERROR_MP_VMGETINFO_FAILED                    102
+#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
+
+// the semantics in this file are thus that codeptr_t is a *real code ptr*
+// This means that any function taking codeptr_t as arguments will assume
+// a real codeptr and won't handle function descriptors (eg getFuncName),
+// whereas functions taking address as args will deal with function
+// descriptors (eg os::dll_address_to_library_name)
+typedef unsigned int* codeptr_t;
+
+// typedefs for stackslots, stack pointers, pointers to op codes
+typedef unsigned long stackslot_t;
+typedef stackslot_t* stackptr_t;
+
+// query dimensions of the stack of the calling thread
+static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
+
+// function to check a given stack pointer against given stack limits
+inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
+  if (((uintptr_t)sp) & 0x7) {
+    return false;
+  }
+  if (sp > stack_base) {
+    return false;
+  }
+  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
+    return false;
+  }
+  return true;
+}
+
+// returns true if function is a valid codepointer
+inline bool is_valid_codepointer(codeptr_t p) {
+  if (!p) {
+    return false;
+  }
+  if (((uintptr_t)p) & 0x3) {
+    return false;
+  }
+  if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
+    return false;
+  }
+  return true;
+}
+
+// macro to check a given stack pointer against given stack limits and to die if test fails
+#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
+    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
+}
+
+// macro to check the current stack pointer against given stacklimits
+#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
+  address sp; \
+  sp = os::current_stack_pointer(); \
+  CHECK_STACK_PTR(sp, stack_base, stack_size); \
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// global variables (for a description see os_aix.hpp)
+
+julong    os::Aix::_physical_memory = 0;
+pthread_t os::Aix::_main_thread = ((pthread_t)0);
+int       os::Aix::_page_size = -1;
+int       os::Aix::_on_pase = -1;
+int       os::Aix::_os_version = -1;
+int       os::Aix::_stack_page_size = -1;
+size_t    os::Aix::_shm_default_page_size = -1;
+int       os::Aix::_can_use_64K_pages = -1;
+int       os::Aix::_can_use_16M_pages = -1;
+int       os::Aix::_xpg_sus_mode = -1;
+int       os::Aix::_extshm = -1;
+int       os::Aix::_logical_cpus = -1;
+
+////////////////////////////////////////////////////////////////////////////////
+// local variables
+
+static int      g_multipage_error  = -1;   // error analysis for multipage initialization
+static jlong    initial_time_count = 0;
+static int      clock_tics_per_sec = 100;
+static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
+static bool     check_signals      = true;
+static pid_t    _initial_pid       = 0;
+static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
+static sigset_t SR_sigset;
+static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
+
+julong os::available_memory() {
+  return Aix::available_memory();
+}
+
+julong os::Aix::available_memory() {
+  os::Aix::meminfo_t mi;
+  if (os::Aix::get_meminfo(&mi)) {
+    return mi.real_free;
+  } else {
+    return 0xFFFFFFFFFFFFFFFFLL;
+  }
+}
+
+julong os::physical_memory() {
+  return Aix::physical_memory();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// environment support
+
+bool os::getenv(const char* name, char* buf, int len) {
+  const char* val = ::getenv(name);
+  if (val != NULL && strlen(val) < (size_t)len) {
+    strcpy(buf, val);
+    return true;
+  }
+  if (len > 0) buf[0] = 0;  // return a null string
+  return false;
+}
+
+
+// Return true if user is running as root.
+
+bool os::have_special_privileges() {
+  static bool init = false;
+  static bool privileges = false;
+  if (!init) {
+    privileges = (getuid() != geteuid()) || (getgid() != getegid());
+    init = true;
+  }
+  return privileges;
+}
+
+// Helper function, emulates disclaim64 using multiple 32bit disclaims
+// because we cannot use disclaim64() on AS/400 and old AIX releases.
+static bool my_disclaim64(char* addr, size_t size) {
+
+  if (size == 0) {
+    return true;
+  }
+
+  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
+  const unsigned int maxDisclaimSize = 0x80000000;
+
+  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
+  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
+
+  char* p = addr;
+
+  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
+    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
+      //if (Verbose)
+      fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
+      return false;
+    }
+    p += maxDisclaimSize;
+  }
+
+  if (lastDisclaimSize > 0) {
+    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
+      //if (Verbose)
+        fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
+      return false;
+    }
+  }
+
+  return true;
+}
+
+// Cpu architecture string
+#if defined(PPC32)
+static char cpu_arch[] = "ppc";
+#elif defined(PPC64)
+static char cpu_arch[] = "ppc64";
+#else
+#error Add appropriate cpu_arch setting
+#endif
+
+
+// Given an address, returns the size of the page backing that address.
+size_t os::Aix::query_pagesize(void* addr) {
+
+  vm_page_info pi;
+  pi.addr = (uint64_t)addr;
+  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
+    return pi.pagesize;
+  } else {
+    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
+    assert(false, "vmgetinfo failed to retrieve page size");
+    return SIZE_4K;
+  }
+
+}
+
+// Returns the kernel thread id of the currently running thread.
+pid_t os::Aix::gettid() {
+  return (pid_t) thread_self();
+}
+
+void os::Aix::initialize_system_info() {
+
+  // get the number of online(logical) cpus instead of configured
+  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
+  assert(_processor_count > 0, "_processor_count must be > 0");
+
+  // retrieve total physical storage
+  os::Aix::meminfo_t mi;
+  if (!os::Aix::get_meminfo(&mi)) {
+    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
+    assert(false, "os::Aix::get_meminfo failed.");
+  }
+  _physical_memory = (julong) mi.real_total;
+}
+
+// Helper function for tracing page sizes.
+static const char* describe_pagesize(size_t pagesize) {
+  switch (pagesize) {
+    case SIZE_4K : return "4K";
+    case SIZE_64K: return "64K";
+    case SIZE_16M: return "16M";
+    case SIZE_16G: return "16G";
+    default:
+      assert(false, "surprise");
+      return "??";
+  }
+}
+
+// Retrieve information about multipage size support. Will initialize
+// Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
+// Aix::_can_use_16M_pages.
+// Must be called before calling os::large_page_init().
+void os::Aix::query_multipage_support() {
+
+  guarantee(_page_size == -1 &&
+            _stack_page_size == -1 &&
+            _can_use_64K_pages == -1 &&
+            _can_use_16M_pages == -1 &&
+            g_multipage_error == -1,
+            "do not call twice");
+
+  _page_size = ::sysconf(_SC_PAGESIZE);
+
+  // This really would surprise me.
+  assert(_page_size == SIZE_4K, "surprise!");
+
+
+  // query default data page size (default page size for C-Heap, pthread stacks and .bss).
+  // Default data page size is influenced either by linker options (-bdatapsize)
+  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
+  // default should be 4K.
+  size_t data_page_size = SIZE_4K;
+  {
+    void* p = ::malloc(SIZE_16M);
+    data_page_size = os::Aix::query_pagesize(p);
+    ::free(p);
+  }
+
+  // query default shm page size (LDR_CNTRL SHMPSIZE)
+  {
+    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
+    guarantee(shmid != -1, "shmget failed");
+    void* p = ::shmat(shmid, NULL, 0);
+    ::shmctl(shmid, IPC_RMID, NULL);
+    guarantee(p != (void*) -1, "shmat failed");
+    _shm_default_page_size = os::Aix::query_pagesize(p);
+    ::shmdt(p);
+  }
+
+  // before querying the stack page size, make sure we are not running as primordial
+  // thread (because primordial thread's stack may have different page size than
+  // pthread thread stacks). Running a VM on the primordial thread won't work for a
+  // number of reasons so we may just as well guarantee it here
+  guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
+
+  // query stack page size
+  {
+    int dummy = 0;
+    _stack_page_size = os::Aix::query_pagesize(&dummy);
+    // everything else would surprise me and should be looked into
+    guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
+    // also, just for completeness: pthread stacks are allocated from C heap, so
+    // stack page size should be the same as data page size
+    guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
+  }
+
+  // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
+  // for system V shm.
+  if (Aix::extshm()) {
+    if (Verbose) {
+      fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
+                      "Please make sure EXTSHM is OFF for large page support.\n");
+    }
+    g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
+    _can_use_64K_pages = _can_use_16M_pages = 0;
+    goto query_multipage_support_end;
+  }
+
+  // now check which page sizes the OS claims it supports, and of those, which actually can be used.
+  {
+    const int MAX_PAGE_SIZES = 4;
+    psize_t sizes[MAX_PAGE_SIZES];
+    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
+    if (num_psizes == -1) {
+      if (Verbose) {
+        fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
+        fprintf(stderr, "disabling multipage support.\n");
+      }
+      g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
+      _can_use_64K_pages = _can_use_16M_pages = 0;
+      goto query_multipage_support_end;
+    }
+    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
+    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
+    if (Verbose) {
+      fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
+      for (int i = 0; i < num_psizes; i ++) {
+        fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
+      }
+      fprintf(stderr, " .\n");
+    }
+
+    // Can we use 64K, 16M pages?
+    _can_use_64K_pages = 0;
+    _can_use_16M_pages = 0;
+    for (int i = 0; i < num_psizes; i ++) {
+      if (sizes[i] == SIZE_64K) {
+        _can_use_64K_pages = 1;
+      } else if (sizes[i] == SIZE_16M) {
+        _can_use_16M_pages = 1;
+      }
+    }
+
+    if (!_can_use_64K_pages) {
+      g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
+    }
+
+    // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
+    // there must be an actual 16M page pool, and we must run with enough rights.
+    if (_can_use_16M_pages) {
+      const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
+      guarantee(shmid != -1, "shmget failed");
+      struct shmid_ds shm_buf = { 0 };
+      shm_buf.shm_pagesize = SIZE_16M;
+      const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
+      const int en = errno;
+      ::shmctl(shmid, IPC_RMID, NULL);
+      if (!can_set_pagesize) {
+        if (Verbose) {
+          fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
+                          "Will deactivate 16M support.\n", en, strerror(en));
+        }
+        _can_use_16M_pages = 0;
+      }
+    }
+
+  } // end: check which pages can be used for shared memory
+
+query_multipage_support_end:
+
+  guarantee(_page_size != -1 &&
+            _stack_page_size != -1 &&
+            _can_use_64K_pages != -1 &&
+            _can_use_16M_pages != -1, "Page sizes not properly initialized");
+
+  if (_can_use_64K_pages) {
+    g_multipage_error = 0;
+  }
+
+  if (Verbose) {
+    fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
+    fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
+    fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
+    fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
+    fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
+    fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
+  }
+
+} // end os::Aix::query_multipage_support()
+
+
+// The code for this method was initially derived from the version in os_linux.cpp
+void os::init_system_properties_values() {
+  // The next few definitions allow the code to be verbatim:
+#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
+#define DEFAULT_LIBPATH "/usr/lib:/lib"
+#define EXTENSIONS_DIR  "/lib/ext"
+#define ENDORSED_DIR    "/lib/endorsed"
+
+  // sysclasspath, java_home, dll_dir
+  char *home_path;
+  char *dll_path;
+  char *pslash;
+  char buf[MAXPATHLEN];
+  os::jvm_path(buf, sizeof(buf));
+
+  // Found the full path to libjvm.so.
+  // Now cut the path to <java_home>/jre if we can.
+  *(strrchr(buf, '/')) = '\0'; // get rid of /libjvm.so
+  pslash = strrchr(buf, '/');
+  if (pslash != NULL) {
+    *pslash = '\0';            // get rid of /{client|server|hotspot}
+  }
+
+  dll_path = malloc(strlen(buf) + 1);
+  strcpy(dll_path, buf);
+  Arguments::set_dll_dir(dll_path);
+
+  if (pslash != NULL) {
+    pslash = strrchr(buf, '/');
+    if (pslash != NULL) {
+      *pslash = '\0';          // get rid of /<arch>
+      pslash = strrchr(buf, '/');
+      if (pslash != NULL) {
+        *pslash = '\0';        // get rid of /lib
+      }
+    }
+  }
+
+  home_path = malloc(strlen(buf) + 1);
+  strcpy(home_path, buf);
+  Arguments::set_java_home(home_path);
+
+  if (!set_boot_path('/', ':')) return;
+
+  // Where to look for native libraries
+
+  // On Aix we get the user setting of LIBPATH
+  // Eventually, all the library path setting will be done here.
+  char *ld_library_path;
+
+  // Construct the invariant part of ld_library_path.
+  ld_library_path = (char *) malloc(sizeof(DEFAULT_LIBPATH));
+  sprintf(ld_library_path, DEFAULT_LIBPATH);
+
+  // Get the user setting of LIBPATH, and prepended it.
+  char *v = ::getenv("LIBPATH");
+  if (v == NULL) {
+    v = "";
+  }
+
+  char *t = ld_library_path;
+  // That's +1 for the colon and +1 for the trailing '\0'
+  ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
+  sprintf(ld_library_path, "%s:%s", v, t);
+
+  Arguments::set_library_path(ld_library_path);
+
+  // Extensions directories
+  char* cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(EXTENSIONS_DIR));
+  sprintf(cbuf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
+  Arguments::set_ext_dirs(cbuf);
+
+  // Endorsed standards default directory.
+  cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
+  sprintf(cbuf, "%s" ENDORSED_DIR, Arguments::get_java_home());
+  Arguments::set_endorsed_dirs(cbuf);
+
+#undef malloc
+#undef DEFAULT_LIBPATH
+#undef EXTENSIONS_DIR
+#undef ENDORSED_DIR
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// breakpoint support
+
+void os::breakpoint() {
+  BREAKPOINT;
+}
+
+extern "C" void breakpoint() {
+  // use debugger to set breakpoint here
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// signal support
+
+debug_only(static bool signal_sets_initialized = false);
+static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
+
+bool os::Aix::is_sig_ignored(int sig) {
+  struct sigaction oact;
+  sigaction(sig, (struct sigaction*)NULL, &oact);
+  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
+    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
+  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
+    return true;
+  else
+    return false;
+}
+
+void os::Aix::signal_sets_init() {
+  // Should also have an assertion stating we are still single-threaded.
+  assert(!signal_sets_initialized, "Already initialized");
+  // Fill in signals that are necessarily unblocked for all threads in
+  // the VM. Currently, we unblock the following signals:
+  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
+  //                         by -Xrs (=ReduceSignalUsage));
+  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
+  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
+  // the dispositions or masks wrt these signals.
+  // Programs embedding the VM that want to use the above signals for their
+  // own purposes must, at this time, use the "-Xrs" option to prevent
+  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
+  // (See bug 4345157, and other related bugs).
+  // In reality, though, unblocking these signals is really a nop, since
+  // these signals are not blocked by default.
+  sigemptyset(&unblocked_sigs);
+  sigemptyset(&allowdebug_blocked_sigs);
+  sigaddset(&unblocked_sigs, SIGILL);
+  sigaddset(&unblocked_sigs, SIGSEGV);
+  sigaddset(&unblocked_sigs, SIGBUS);
+  sigaddset(&unblocked_sigs, SIGFPE);
+  sigaddset(&unblocked_sigs, SIGTRAP);
+  sigaddset(&unblocked_sigs, SIGDANGER);
+  sigaddset(&unblocked_sigs, SR_signum);
+
+  if (!ReduceSignalUsage) {
+   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
+     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
+     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
+   }
+   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
+     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
+     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
+   }
+   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
+     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
+     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
+   }
+  }
+  // Fill in signals that are blocked by all but the VM thread.
+  sigemptyset(&vm_sigs);
+  if (!ReduceSignalUsage)
+    sigaddset(&vm_sigs, BREAK_SIGNAL);
+  debug_only(signal_sets_initialized = true);
+}
+
+// These are signals that are unblocked while a thread is running Java.
+// (For some reason, they get blocked by default.)
+sigset_t* os::Aix::unblocked_signals() {
+  assert(signal_sets_initialized, "Not initialized");
+  return &unblocked_sigs;
+}
+
+// These are the signals that are blocked while a (non-VM) thread is
+// running Java. Only the VM thread handles these signals.
+sigset_t* os::Aix::vm_signals() {
+  assert(signal_sets_initialized, "Not initialized");
+  return &vm_sigs;
+}
+
+// These are signals that are blocked during cond_wait to allow debugger in
+sigset_t* os::Aix::allowdebug_blocked_signals() {
+  assert(signal_sets_initialized, "Not initialized");
+  return &allowdebug_blocked_sigs;
+}
+
+void os::Aix::hotspot_sigmask(Thread* thread) {
+
+  //Save caller's signal mask before setting VM signal mask
+  sigset_t caller_sigmask;
+  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
+
+  OSThread* osthread = thread->osthread();
+  osthread->set_caller_sigmask(caller_sigmask);
+
+  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
+
+  if (!ReduceSignalUsage) {
+    if (thread->is_VM_thread()) {
+      // Only the VM thread handles BREAK_SIGNAL ...
+      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
+    } else {
+      // ... all other threads block BREAK_SIGNAL
+      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
+    }
+  }
+}
+
+// retrieve memory information.
+// Returns false if something went wrong;
+// content of pmi undefined in this case.
+bool os::Aix::get_meminfo(meminfo_t* pmi) {
+
+  assert(pmi, "get_meminfo: invalid parameter");
+
+  memset(pmi, 0, sizeof(meminfo_t));
+
+  if (os::Aix::on_pase()) {
+
+    Unimplemented();
+    return false;
+
+  } else {
+
+    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
+    // See:
+    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
+    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
+    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
+    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
+
+    perfstat_memory_total_t psmt;
+    memset (&psmt, '\0', sizeof(psmt));
+    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
+    if (rc == -1) {
+      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
+      assert(0, "perfstat_memory_total() failed");
+      return false;
+    }
+
+    assert(rc == 1, "perfstat_memory_total() - weird return code");
+
+    // excerpt from
+    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
+    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
+    // The fields of perfstat_memory_total_t:
+    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
+    // u_longlong_t real_total         Total real memory (in 4 KB pages).
+    // u_longlong_t real_free          Free real memory (in 4 KB pages).
+    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
+    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
+
+    pmi->virt_total = psmt.virt_total * 4096;
+    pmi->real_total = psmt.real_total * 4096;
+    pmi->real_free = psmt.real_free * 4096;
+    pmi->pgsp_total = psmt.pgsp_total * 4096;
+    pmi->pgsp_free = psmt.pgsp_free * 4096;
+
+    return true;
+
+  }
+} // end os::Aix::get_meminfo
+
+// Retrieve global cpu information.
+// Returns false if something went wrong;
+// the content of pci is undefined in this case.
+bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
+  assert(pci, "get_cpuinfo: invalid parameter");
+  memset(pci, 0, sizeof(cpuinfo_t));
+
+  perfstat_cpu_total_t psct;
+  memset (&psct, '\0', sizeof(psct));
+
+  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
+    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
+    assert(0, "perfstat_cpu_total() failed");
+    return false;
+  }
+
+  // global cpu information
+  strcpy (pci->description, psct.description);
+  pci->processorHZ = psct.processorHZ;
+  pci->ncpus = psct.ncpus;
+  os::Aix::_logical_cpus = psct.ncpus;
+  for (int i = 0; i < 3; i++) {
+    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
+  }
+
+  // get the processor version from _system_configuration
+  switch (_system_configuration.version) {
+  case PV_7:
+    strcpy(pci->version, "Power PC 7");
+    break;
+  case PV_6_1:
+    strcpy(pci->version, "Power PC 6 DD1.x");
+    break;
+  case PV_6:
+    strcpy(pci->version, "Power PC 6");
+    break;
+  case PV_5:
+    strcpy(pci->version, "Power PC 5");
+    break;
+  case PV_5_2:
+    strcpy(pci->version, "Power PC 5_2");
+    break;
+  case PV_5_3:
+    strcpy(pci->version, "Power PC 5_3");
+    break;
+  case PV_5_Compat:
+    strcpy(pci->version, "PV_5_Compat");
+    break;
+  case PV_6_Compat:
+    strcpy(pci->version, "PV_6_Compat");
+    break;
+  case PV_7_Compat:
+    strcpy(pci->version, "PV_7_Compat");
+    break;
+  default:
+    strcpy(pci->version, "unknown");
+  }
+
+  return true;
+
+} //end os::Aix::get_cpuinfo
+
+//////////////////////////////////////////////////////////////////////////////
+// detecting pthread library
+
+void os::Aix::libpthread_init() {
+  return;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// create new thread
+
+// Thread start routine for all newly created threads
+static void *java_start(Thread *thread) {
+
+  // find out my own stack dimensions
+  {
+    // actually, this should do exactly the same as thread->record_stack_base_and_size...
+    address base = 0;
+    size_t size = 0;
+    query_stack_dimensions(&base, &size);
+    thread->set_stack_base(base);
+    thread->set_stack_size(size);
+  }
+
+  // Do some sanity checks.
+  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
+
+  // Try to randomize the cache line index of hot stack frames.
+  // This helps when threads of the same stack traces evict each other's
+  // cache lines. The threads can be either from the same JVM instance, or
+  // from different JVM instances. The benefit is especially true for
+  // processors with hyperthreading technology.
+
+  static int counter = 0;
+  int pid = os::current_process_id();
+  alloca(((pid ^ counter++) & 7) * 128);
+
+  ThreadLocalStorage::set_thread(thread);
+
+  OSThread* osthread = thread->osthread();
+
+  // thread_id is kernel thread id (similar to Solaris LWP id)
+  osthread->set_thread_id(os::Aix::gettid());
+
+  // initialize signal mask for this thread
+  os::Aix::hotspot_sigmask(thread);
+
+  // initialize floating point control register
+  os::Aix::init_thread_fpu_state();
+
+  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
+
+  // call one more level start routine
+  thread->run();
+
+  return 0;
+}
+
+bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
+
+  // We want the whole function to be synchronized.
+  ThreadCritical cs;
+
+  assert(thread->osthread() == NULL, "caller responsible");
+
+  // Allocate the OSThread object
+  OSThread* osthread = new OSThread(NULL, NULL);
+  if (osthread == NULL) {
+    return false;
+  }
+
+  // set the correct thread state
+  osthread->set_thread_type(thr_type);
+
+  // Initial state is ALLOCATED but not INITIALIZED
+  osthread->set_state(ALLOCATED);
+
+  thread->set_osthread(osthread);
+
+  // init thread attributes
+  pthread_attr_t attr;
+  pthread_attr_init(&attr);
+  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
+
+  // Make sure we run in 1:1 kernel-user-thread mode.
+  if (os::Aix::on_aix()) {
+    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
+    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
+  } // end: aix
+
+  // Start in suspended state, and in os::thread_start, wake the thread up.
+  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
+
+  // calculate stack size if it's not specified by caller
+  if (os::Aix::supports_variable_stack_size()) {
+    if (stack_size == 0) {
+      stack_size = os::Aix::default_stack_size(thr_type);
+
+      switch (thr_type) {
+      case os::java_thread:
+        // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
+        assert(JavaThread::stack_size_at_create() > 0, "this should be set");
+        stack_size = JavaThread::stack_size_at_create();
+        break;
+      case os::compiler_thread:
+        if (CompilerThreadStackSize > 0) {
+          stack_size = (size_t)(CompilerThreadStackSize * K);
+          break;
+        } // else fall through:
+          // use VMThreadStackSize if CompilerThreadStackSize is not defined
+      case os::vm_thread:
+      case os::pgc_thread:
+      case os::cgc_thread:
+      case os::watcher_thread:
+        if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
+        break;
+      }
+    }
+
+    stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
+    pthread_attr_setstacksize(&attr, stack_size);
+  } //else let thread_create() pick the default value (96 K on AIX)
+
+  pthread_t tid;
+  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
+
+  pthread_attr_destroy(&attr);
+
+  if (ret != 0) {
+    if (PrintMiscellaneous && (Verbose || WizardMode)) {
+      perror("pthread_create()");
+    }
+    // Need to clean up stuff we've allocated so far
+    thread->set_osthread(NULL);
+    delete osthread;
+    return false;
+  }
+
+  // Store pthread info into the OSThread
+  osthread->set_pthread_id(tid);
+
+  return true;
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// attach existing thread
+
+// bootstrap the main thread
+bool os::create_main_thread(JavaThread* thread) {
+  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
+  return create_attached_thread(thread);
+}
+
+bool os::create_attached_thread(JavaThread* thread) {
+#ifdef ASSERT
+    thread->verify_not_published();
+#endif
+
+  // Allocate the OSThread object
+  OSThread* osthread = new OSThread(NULL, NULL);
+
+  if (osthread == NULL) {
+    return false;
+  }
+
+  // Store pthread info into the OSThread
+  osthread->set_thread_id(os::Aix::gettid());
+  osthread->set_pthread_id(::pthread_self());
+
+  // initialize floating point control register
+  os::Aix::init_thread_fpu_state();
+
+  // some sanity checks
+  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
+
+  // Initial thread state is RUNNABLE
+  osthread->set_state(RUNNABLE);
+
+  thread->set_osthread(osthread);
+
+  if (UseNUMA) {
+    int lgrp_id = os::numa_get_group_id();
+    if (lgrp_id != -1) {
+      thread->set_lgrp_id(lgrp_id);
+    }
+  }
+
+  // initialize signal mask for this thread
+  // and save the caller's signal mask
+  os::Aix::hotspot_sigmask(thread);
+
+  return true;
+}
+
+void os::pd_start_thread(Thread* thread) {
+  int status = pthread_continue_np(thread->osthread()->pthread_id());
+  assert(status == 0, "thr_continue failed");
+}
+
+// Free OS resources related to the OSThread
+void os::free_thread(OSThread* osthread) {
+  assert(osthread != NULL, "osthread not set");
+
+  if (Thread::current()->osthread() == osthread) {
+    // Restore caller's signal mask
+    sigset_t sigmask = osthread->caller_sigmask();
+    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
+   }
+
+  delete osthread;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// thread local storage
+
+int os::allocate_thread_local_storage() {
+  pthread_key_t key;
+  int rslt = pthread_key_create(&key, NULL);
+  assert(rslt == 0, "cannot allocate thread local storage");
+  return (int)key;
+}
+
+// Note: This is currently not used by VM, as we don't destroy TLS key
+// on VM exit.
+void os::free_thread_local_storage(int index) {
+  int rslt = pthread_key_delete((pthread_key_t)index);
+  assert(rslt == 0, "invalid index");
+}
+
+void os::thread_local_storage_at_put(int index, void* value) {
+  int rslt = pthread_setspecific((pthread_key_t)index, value);
+  assert(rslt == 0, "pthread_setspecific failed");
+}
+
+extern "C" Thread* get_thread() {
+  return ThreadLocalStorage::thread();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// time support
+
+// Time since start-up in seconds to a fine granularity.
+// Used by VMSelfDestructTimer and the MemProfiler.
+double os::elapsedTime() {
+  return (double)(os::elapsed_counter()) * 0.000001;
+}
+
+jlong os::elapsed_counter() {
+  timeval time;
+  int status = gettimeofday(&time, NULL);
+  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
+}
+
+jlong os::elapsed_frequency() {
+  return (1000 * 1000);
+}
+
+// For now, we say that linux does not support vtime. I have no idea
+// whether it can actually be made to (DLD, 9/13/05).
+
+bool os::supports_vtime() { return false; }
+bool os::enable_vtime()   { return false; }
+bool os::vtime_enabled()  { return false; }
+double os::elapsedVTime() {
+  // better than nothing, but not much
+  return elapsedTime();
+}
+
+jlong os::javaTimeMillis() {
+  timeval time;
+  int status = gettimeofday(&time, NULL);
+  assert(status != -1, "aix error at gettimeofday()");
+  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
+}
+
+// We need to manually declare mread_real_time,
+// because IBM didn't provide a prototype in time.h.
+// (they probably only ever tested in C, not C++)
+extern "C"
+int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
+
+jlong os::javaTimeNanos() {
+  if (os::Aix::on_pase()) {
+    Unimplemented();
+    return 0;
+  }
+  else {
+    // On AIX use the precision of processors real time clock
+    // or time base registers.
+    timebasestruct_t time;
+    int rc;
+
+    // If the CPU has a time register, it will be used and
+    // we have to convert to real time first. After convertion we have following data:
+    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
+    // time.tb_low  [nanoseconds after the last full second above]
+    // We better use mread_real_time here instead of read_real_time
+    // to ensure that we will get a monotonic increasing time.
+    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
+      rc = time_base_to_time(&time, TIMEBASE_SZ);
+      assert(rc != -1, "aix error at time_base_to_time()");
+    }
+    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
+  }
+}
+
+void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
+  {
+    // gettimeofday - based on time in seconds since the Epoch thus does not wrap
+    info_ptr->max_value = ALL_64_BITS;
+
+    // gettimeofday is a real time clock so it skips
+    info_ptr->may_skip_backward = true;
+    info_ptr->may_skip_forward = true;
+  }
+
+  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
+}
+
+// Return the real, user, and system times in seconds from an
+// arbitrary fixed point in the past.
+bool os::getTimesSecs(double* process_real_time,
+                      double* process_user_time,
+                      double* process_system_time) {
+  struct tms ticks;
+  clock_t real_ticks = times(&ticks);
+
+  if (real_ticks == (clock_t) (-1)) {
+    return false;
+  } else {
+    double ticks_per_second = (double) clock_tics_per_sec;
+    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
+    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
+    *process_real_time = ((double) real_ticks) / ticks_per_second;
+
+    return true;
+  }
+}
+
+
+char * os::local_time_string(char *buf, size_t buflen) {
+  struct tm t;
+  time_t long_time;
+  time(&long_time);
+  localtime_r(&long_time, &t);
+  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
+               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
+               t.tm_hour, t.tm_min, t.tm_sec);
+  return buf;
+}
+
+struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
+  return localtime_r(clock, res);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// runtime exit support
+
+// Note: os::shutdown() might be called very early during initialization, or
+// called from signal handler. Before adding something to os::shutdown(), make
+// sure it is async-safe and can handle partially initialized VM.
+void os::shutdown() {
+
+  // allow PerfMemory to attempt cleanup of any persistent resources
+  perfMemory_exit();
+
+  // needs to remove object in file system
+  AttachListener::abort();
+
+  // flush buffered output, finish log files
+  ostream_abort();
+
+  // Check for abort hook
+  abort_hook_t abort_hook = Arguments::abort_hook();
+  if (abort_hook != NULL) {
+    abort_hook();
+  }
+
+}
+
+// Note: os::abort() might be called very early during initialization, or
+// called from signal handler. Before adding something to os::abort(), make
+// sure it is async-safe and can handle partially initialized VM.
+void os::abort(bool dump_core) {
+  os::shutdown();
+  if (dump_core) {
+#ifndef PRODUCT
+    fdStream out(defaultStream::output_fd());
+    out.print_raw("Current thread is ");
+    char buf[16];
+    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
+    out.print_raw_cr(buf);
+    out.print_raw_cr("Dumping core ...");
+#endif
+    ::abort(); // dump core
+  }
+
+  ::exit(1);
+}
+
+// Die immediately, no exit hook, no abort hook, no cleanup.
+void os::die() {
+  ::abort();
+}
+
+// Unused on Aix for now.
+void os::set_error_file(const char *logfile) {}
+
+
+// This method is a copy of JDK's sysGetLastErrorString
+// from src/solaris/hpi/src/system_md.c
+
+size_t os::lasterror(char *buf, size_t len) {
+
+  if (errno == 0)  return 0;
+
+  const char *s = ::strerror(errno);
+  size_t n = ::strlen(s);
+  if (n >= len) {
+    n = len - 1;
+  }
+  ::strncpy(buf, s, n);
+  buf[n] = '\0';
+  return n;
+}
+
+intx os::current_thread_id() { return (intx)pthread_self(); }
+int os::current_process_id() {
+
+  // This implementation returns a unique pid, the pid of the
+  // launcher thread that starts the vm 'process'.
+
+  // Under POSIX, getpid() returns the same pid as the
+  // launcher thread rather than a unique pid per thread.
+  // Use gettid() if you want the old pre NPTL behaviour.
+
+  // if you are looking for the result of a call to getpid() that
+  // returns a unique pid for the calling thread, then look at the
+  // OSThread::thread_id() method in osThread_linux.hpp file
+
+  return (int)(_initial_pid ? _initial_pid : getpid());
+}
+
+// DLL functions
+
+const char* os::dll_file_extension() { return ".so"; }
+
+// This must be hard coded because it's the system's temporary
+// directory not the java application's temp directory, ala java.io.tmpdir.
+const char* os::get_temp_directory() { return "/tmp"; }
+
+static bool file_exists(const char* filename) {
+  struct stat statbuf;
+  if (filename == NULL || strlen(filename) == 0) {
+    return false;
+  }
+  return os::stat(filename, &statbuf) == 0;
+}
+
+bool os::dll_build_name(char* buffer, size_t buflen,
+                        const char* pname, const char* fname) {
+  bool retval = false;
+  // Copied from libhpi
+  const size_t pnamelen = pname ? strlen(pname) : 0;
+
+  // Return error on buffer overflow.
+  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
+    *buffer = '\0';
+    return retval;
+  }
+
+  if (pnamelen == 0) {
+    snprintf(buffer, buflen, "lib%s.so", fname);
+    retval = true;
+  } else if (strchr(pname, *os::path_separator()) != NULL) {
+    int n;
+    char** pelements = split_path(pname, &n);
+    for (int i = 0; i < n; i++) {
+      // Really shouldn't be NULL, but check can't hurt
+      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
+        continue; // skip the empty path values
+      }
+      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
+      if (file_exists(buffer)) {
+        retval = true;
+        break;
+      }
+    }
+    // release the storage
+    for (int i = 0; i < n; i++) {
+      if (pelements[i] != NULL) {
+        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
+      }
+    }
+    if (pelements != NULL) {
+      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
+    }
+  } else {
+    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
+    retval = true;
+  }
+  return retval;
+}
+
+// Check if addr is inside libjvm.so.
+bool os::address_is_in_vm(address addr) {
+
+  // Input could be a real pc or a function pointer literal. The latter
+  // would be a function descriptor residing in the data segment of a module.
+
+  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
+  if (lib) {
+    if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
+      return true;
+    } else {
+      return false;
+    }
+  } else {
+    lib = LoadedLibraries::find_for_data_address(addr);
+    if (lib) {
+      if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
+        return true;
+      } else {
+        return false;
+      }
+    } else {
+      return false;
+    }
+  }
+}
+
+// Resolve an AIX function descriptor literal to a code pointer.
+// If the input is a valid code pointer to a text segment of a loaded module,
+//   it is returned unchanged.
+// If the input is a valid AIX function descriptor, it is resolved to the
+//   code entry point.
+// If the input is neither a valid function descriptor nor a valid code pointer,
+//   NULL is returned.
+static address resolve_function_descriptor_to_code_pointer(address p) {
+
+  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
+  if (lib) {
+    // its a real code pointer
+    return p;
+  } else {
+    lib = LoadedLibraries::find_for_data_address(p);
+    if (lib) {
+      // pointer to data segment, potential function descriptor
+      address code_entry = (address)(((FunctionDescriptor*)p)->entry());
+      if (LoadedLibraries::find_for_text_address(code_entry)) {
+        // Its a function descriptor
+        return code_entry;
+      }
+    }
+  }
+  return NULL;
+}
+
+bool os::dll_address_to_function_name(address addr, char *buf,
+                                      int buflen, int *offset) {
+  if (offset) {
+    *offset = -1;
+  }
+  if (buf) {
+    buf[0] = '\0';
+  }
+
+  // Resolve function ptr literals first.
+  addr = resolve_function_descriptor_to_code_pointer(addr);
+  if (!addr) {
+    return false;
+  }
+
+  // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
+  return Decoder::decode(addr, buf, buflen, offset);
+}
+
+static int getModuleName(codeptr_t pc,                    // [in] program counter
+                         char* p_name, size_t namelen,    // [out] optional: function name
+                         char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
+                         ) {
+
+  // initialize output parameters
+  if (p_name && namelen > 0) {
+    *p_name = '\0';
+  }
+  if (p_errmsg && errmsglen > 0) {
+    *p_errmsg = '\0';
+  }
+
+  const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
+  if (lib) {
+    if (p_name && namelen > 0) {
+      sprintf(p_name, "%.*s", namelen, lib->get_shortname());
+    }
+    return 0;
+  }
+
+  if (Verbose) {
+    fprintf(stderr, "pc outside any module");
+  }
+
+  return -1;
+
+}
+
+bool os::dll_address_to_library_name(address addr, char* buf,
+                                     int buflen, int* offset) {
+  if (offset) {
+    *offset = -1;
+  }
+  if (buf) {
+      buf[0] = '\0';
+  }
+
+  // Resolve function ptr literals first.
+  addr = resolve_function_descriptor_to_code_pointer(addr);
+  if (!addr) {
+    return false;
+  }
+
+  if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
+    return true;
+  }
+  return false;
+}
+
+// Loads .dll/.so and in case of error it checks if .dll/.so was built
+// for the same architecture as Hotspot is running on
+void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
+
+  if (ebuf && ebuflen > 0) {
+    ebuf[0] = '\0';
+    ebuf[ebuflen - 1] = '\0';
+  }
+
+  if (!filename || strlen(filename) == 0) {
+    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
+    return NULL;
+  }
+
+  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
+  void * result= ::dlopen(filename, RTLD_LAZY);
+  if (result != NULL) {
+    // Reload dll cache. Don't do this in signal handling.
+    LoadedLibraries::reload();
+    return result;
+  } else {
+    // error analysis when dlopen fails
+    const char* const error_report = ::dlerror();
+    if (error_report && ebuf && ebuflen > 0) {
+      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
+               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
+    }
+  }
+  return NULL;
+}
+
+// Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
+// chances are you might want to run the generated bits against glibc-2.0
+// libdl.so, so always use locking for any version of glibc.
+void* os::dll_lookup(void* handle, const char* name) {
+  pthread_mutex_lock(&dl_mutex);
+  void* res = dlsym(handle, name);
+  pthread_mutex_unlock(&dl_mutex);
+  return res;
+}
+
+void* os::get_default_process_handle() {
+  return (void*)::dlopen(NULL, RTLD_LAZY);
+}
+
+void os::print_dll_info(outputStream *st) {
+  st->print_cr("Dynamic libraries:");
+  LoadedLibraries::print(st);
+}
+
+void os::print_os_info(outputStream* st) {
+  st->print("OS:");
+
+  st->print("uname:");
+  struct utsname name;
+  uname(&name);
+  st->print(name.sysname); st->print(" ");
+  st->print(name.nodename); st->print(" ");
+  st->print(name.release); st->print(" ");
+  st->print(name.version); st->print(" ");
+  st->print(name.machine);
+  st->cr();
+
+  // rlimit
+  st->print("rlimit:");
+  struct rlimit rlim;
+
+  st->print(" STACK ");
+  getrlimit(RLIMIT_STACK, &rlim);
+  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+  else st->print("%uk", rlim.rlim_cur >> 10);
+
+  st->print(", CORE ");
+  getrlimit(RLIMIT_CORE, &rlim);
+  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+  else st->print("%uk", rlim.rlim_cur >> 10);
+
+  st->print(", NPROC ");
+  st->print("%d", sysconf(_SC_CHILD_MAX));
+
+  st->print(", NOFILE ");
+  getrlimit(RLIMIT_NOFILE, &rlim);
+  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+  else st->print("%d", rlim.rlim_cur);
+
+  st->print(", AS ");
+  getrlimit(RLIMIT_AS, &rlim);
+  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+  else st->print("%uk", rlim.rlim_cur >> 10);
+
+  // Print limits on DATA, because it limits the C-heap.
+  st->print(", DATA ");
+  getrlimit(RLIMIT_DATA, &rlim);
+  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+  else st->print("%uk", rlim.rlim_cur >> 10);
+  st->cr();
+
+  // load average
+  st->print("load average:");
+  double loadavg[3] = {-1.L, -1.L, -1.L};
+  os::loadavg(loadavg, 3);
+  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
+  st->cr();
+}
+
+void os::print_memory_info(outputStream* st) {
+
+  st->print_cr("Memory:");
+
+  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
+  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
+  st->print_cr("  default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
+  st->print_cr("  can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
+  st->print_cr("  can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
+  if (g_multipage_error != 0) {
+    st->print_cr("  multipage error: %d", g_multipage_error);
+  }
+
+  // print out LDR_CNTRL because it affects the default page sizes
+  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
+  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
+
+  const char* const extshm = ::getenv("EXTSHM");
+  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
+
+  // Call os::Aix::get_meminfo() to retrieve memory statistics.
+  os::Aix::meminfo_t mi;
+  if (os::Aix::get_meminfo(&mi)) {
+    char buffer[256];
+    if (os::Aix::on_aix()) {
+      jio_snprintf(buffer, sizeof(buffer),
+                   "  physical total : %llu\n"
+                   "  physical free  : %llu\n"
+                   "  swap total     : %llu\n"
+                   "  swap free      : %llu\n",
+                   mi.real_total,
+                   mi.real_free,
+                   mi.pgsp_total,
+                   mi.pgsp_free);
+    } else {
+      Unimplemented();
+    }
+    st->print_raw(buffer);
+  } else {
+    st->print_cr("  (no more information available)");
+  }
+}
+
+void os::pd_print_cpu_info(outputStream* st) {
+  // cpu
+  st->print("CPU:");
+  st->print("total %d", os::processor_count());
+  // It's not safe to query number of active processors after crash
+  // st->print("(active %d)", os::active_processor_count());
+  st->print(" %s", VM_Version::cpu_features());
+  st->cr();
+}
+
+void os::print_siginfo(outputStream* st, void* siginfo) {
+  // Use common posix version.
+  os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
+  st->cr();
+}
+
+
+static void print_signal_handler(outputStream* st, int sig,
+                                 char* buf, size_t buflen);
+
+void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
+  st->print_cr("Signal Handlers:");
+  print_signal_handler(st, SIGSEGV, buf, buflen);
+  print_signal_handler(st, SIGBUS , buf, buflen);
+  print_signal_handler(st, SIGFPE , buf, buflen);
+  print_signal_handler(st, SIGPIPE, buf, buflen);
+  print_signal_handler(st, SIGXFSZ, buf, buflen);
+  print_signal_handler(st, SIGILL , buf, buflen);
+  print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
+  print_signal_handler(st, SR_signum, buf, buflen);
+  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
+  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
+  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
+  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
+  print_signal_handler(st, SIGTRAP, buf, buflen);
+  print_signal_handler(st, SIGDANGER, buf, buflen);
+}
+
+static char saved_jvm_path[MAXPATHLEN] = {0};
+
+// Find the full path to the current module, libjvm.so or libjvm_g.so
+void os::jvm_path(char *buf, jint buflen) {
+  // Error checking.
+  if (buflen < MAXPATHLEN) {
+    assert(false, "must use a large-enough buffer");
+    buf[0] = '\0';
+    return;
+  }
+  // Lazy resolve the path to current module.
+  if (saved_jvm_path[0] != 0) {
+    strcpy(buf, saved_jvm_path);
+    return;
+  }
+
+  Dl_info dlinfo;
+  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
+  assert(ret != 0, "cannot locate libjvm");
+  char* rp = realpath((char *)dlinfo.dli_fname, buf);
+  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
+
+  strcpy(saved_jvm_path, buf);
+}
+
+void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
+  // no prefix required, not even "_"
+}
+
+void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
+  // no suffix required
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// sun.misc.Signal support
+
+static volatile jint sigint_count = 0;
+
+static void
+UserHandler(int sig, void *siginfo, void *context) {
+  // 4511530 - sem_post is serialized and handled by the manager thread. When
+  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
+  // don't want to flood the manager thread with sem_post requests.
+  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
+    return;
+
+  // Ctrl-C is pressed during error reporting, likely because the error
+  // handler fails to abort. Let VM die immediately.
+  if (sig == SIGINT && is_error_reported()) {
+    os::die();
+  }
+
+  os::signal_notify(sig);
+}
+
+void* os::user_handler() {
+  return CAST_FROM_FN_PTR(void*, UserHandler);
+}
+
+extern "C" {
+  typedef void (*sa_handler_t)(int);
+  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
+}
+
+void* os::signal(int signal_number, void* handler) {
+  struct sigaction sigAct, oldSigAct;
+
+  sigfillset(&(sigAct.sa_mask));
+
+  // Do not block out synchronous signals in the signal handler.
+  // Blocking synchronous signals only makes sense if you can really
+  // be sure that those signals won't happen during signal handling,
+  // when the blocking applies.  Normal signal handlers are lean and
+  // do not cause signals. But our signal handlers tend to be "risky"
+  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
+  // On AIX, PASE there was a case where a SIGSEGV happened, followed
+  // by a SIGILL, which was blocked due to the signal mask. The process
+  // just hung forever. Better to crash from a secondary signal than to hang.
+  sigdelset(&(sigAct.sa_mask), SIGSEGV);
+  sigdelset(&(sigAct.sa_mask), SIGBUS);
+  sigdelset(&(sigAct.sa_mask), SIGILL);
+  sigdelset(&(sigAct.sa_mask), SIGFPE);
+  sigdelset(&(sigAct.sa_mask), SIGTRAP);
+
+  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
+
+  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
+
+  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
+    // -1 means registration failed
+    return (void *)-1;
+  }
+
+  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
+}
+
+void os::signal_raise(int signal_number) {
+  ::raise(signal_number);
+}
+
+//
+// The following code is moved from os.cpp for making this
+// code platform specific, which it is by its very nature.
+//
+
+// Will be modified when max signal is changed to be dynamic
+int os::sigexitnum_pd() {
+  return NSIG;
+}
+
+// a counter for each possible signal value
+static volatile jint pending_signals[NSIG+1] = { 0 };
+
+// Linux(POSIX) specific hand shaking semaphore.
+static sem_t sig_sem;
+
+void os::signal_init_pd() {
+  // Initialize signal structures
+  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
+
+  // Initialize signal semaphore
+  int rc = ::sem_init(&sig_sem, 0, 0);
+  guarantee(rc != -1, "sem_init failed");
+}
+
+void os::signal_notify(int sig) {
+  Atomic::inc(&pending_signals[sig]);
+  ::sem_post(&sig_sem);
+}
+
+static int check_pending_signals(bool wait) {
+  Atomic::store(0, &sigint_count);
+  for (;;) {
+    for (int i = 0; i < NSIG + 1; i++) {
+      jint n = pending_signals[i];
+      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+        return i;
+      }
+    }
+    if (!wait) {
+      return -1;
+    }
+    JavaThread *thread = JavaThread::current();
+    ThreadBlockInVM tbivm(thread);
+
+    bool threadIsSuspended;
+    do {
+      thread->set_suspend_equivalent();
+      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
+
+      ::sem_wait(&sig_sem);
+
+      // were we externally suspended while we were waiting?
+      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
+      if (threadIsSuspended) {
+        //
+        // The semaphore has been incremented, but while we were waiting
+        // another thread suspended us. We don't want to continue running
+        // while suspended because that would surprise the thread that
+        // suspended us.
+        //
+        ::sem_post(&sig_sem);
+
+        thread->java_suspend_self();
+      }
+    } while (threadIsSuspended);
+  }
+}
+
+int os::signal_lookup() {
+  return check_pending_signals(false);
+}
+
+int os::signal_wait() {
+  return check_pending_signals(true);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Virtual Memory
+
+// AddrRange describes an immutable address range
+//
+// This is a helper class for the 'shared memory bookkeeping' below.
+class AddrRange {
+  friend class ShmBkBlock;
+
+  char* _start;
+  size_t _size;
+
+public:
+
+  AddrRange(char* start, size_t size)
+    : _start(start), _size(size)
+  {}
+
+  AddrRange(const AddrRange& r)
+    : _start(r.start()), _size(r.size())
+  {}
+
+  char* start() const { return _start; }
+  size_t size() const { return _size; }
+  char* end() const { return _start + _size; }
+  bool is_empty() const { return _size == 0 ? true : false; }
+
+  static AddrRange empty_range() { return AddrRange(NULL, 0); }
+
+  bool contains(const char* p) const {
+    return start() <= p && end() > p;
+  }
+
+  bool contains(const AddrRange& range) const {
+    return start() <= range.start() && end() >= range.end();
+  }
+
+  bool intersects(const AddrRange& range) const {
+    return (range.start() <= start() && range.end() > start()) ||
+           (range.start() < end() && range.end() >= end()) ||
+           contains(range);
+  }
+
+  bool is_same_range(const AddrRange& range) const {
+    return start() == range.start() && size() == range.size();
+  }
+
+  // return the closest inside range consisting of whole pages
+  AddrRange find_closest_aligned_range(size_t pagesize) const {
+    if (pagesize == 0 || is_empty()) {
+      return empty_range();
+    }
+    char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
+    char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
+    if (from > to) {
+      return empty_range();
+    }
+    return AddrRange(from, to - from);
+  }
+};
+
+////////////////////////////////////////////////////////////////////////////
+// shared memory bookkeeping
+//
+// the os::reserve_memory() API and friends hand out different kind of memory, depending
+// on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
+//
+// But these memory types have to be treated differently. For example, to uncommit
+// mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
+// disclaim64() is needed.
+//
+// Therefore we need to keep track of the allocated memory segments and their
+// properties.
+
+// ShmBkBlock: base class for all blocks in the shared memory bookkeeping
+class ShmBkBlock {
+
+  ShmBkBlock* _next;
+
+protected:
+
+  AddrRange _range;
+  const size_t _pagesize;
+  const bool _pinned;
+
+public:
+
+  ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
+    : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
+
+    assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
+    assert(!_range.is_empty(), "invalid range");
+  }
+
+  virtual void print(outputStream* st) const {
+    st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
+              _range.start(), _range.end(), _range.size(),
+              _range.size() / _pagesize, describe_pagesize(_pagesize),
+              _pinned ? "pinned" : "");
+  }
+
+  enum Type { MMAP, SHMAT };
+  virtual Type getType() = 0;
+
+  char* base() const { return _range.start(); }
+  size_t size() const { return _range.size(); }
+
+  void setAddrRange(AddrRange range) {
+    _range = range;
+  }
+
+  bool containsAddress(const char* p) const {
+    return _range.contains(p);
+  }
+
+  bool containsRange(const char* p, size_t size) const {
+    return _range.contains(AddrRange((char*)p, size));
+  }
+
+  bool isSameRange(const char* p, size_t size) const {
+    return _range.is_same_range(AddrRange((char*)p, size));
+  }
+
+  virtual bool disclaim(char* p, size_t size) = 0;
+  virtual bool release() = 0;
+
+  // blocks live in a list.
+  ShmBkBlock* next() const { return _next; }
+  void set_next(ShmBkBlock* blk) { _next = blk; }
+
+}; // end: ShmBkBlock
+
+
+// ShmBkMappedBlock: describes an block allocated with mmap()
+class ShmBkMappedBlock : public ShmBkBlock {
+public:
+
+  ShmBkMappedBlock(AddrRange range)
+    : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
+
+  void print(outputStream* st) const {
+    ShmBkBlock::print(st);
+    st->print_cr(" - mmap'ed");
+  }
+
+  Type getType() {
+    return MMAP;
+  }
+
+  bool disclaim(char* p, size_t size) {
+
+    AddrRange r(p, size);
+
+    guarantee(_range.contains(r), "invalid disclaim");
+
+    // only disclaim whole ranges.
+    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
+    if (r2.is_empty()) {
+      return true;
+    }
+
+    const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
+
+    if (rc != 0) {
+      warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
+    }
+
+    return rc == 0 ? true : false;
+  }
+
+  bool release() {
+    // mmap'ed blocks are released using munmap
+    if (::munmap(_range.start(), _range.size()) != 0) {
+      warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
+      return false;
+    }
+    return true;
+  }
+}; // end: ShmBkMappedBlock
+
+// ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
+class ShmBkShmatedBlock : public ShmBkBlock {
+public:
+
+  ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
+    : ShmBkBlock(range, pagesize, pinned) {}
+
+  void print(outputStream* st) const {
+    ShmBkBlock::print(st);
+    st->print_cr(" - shmat'ed");
+  }
+
+  Type getType() {
+    return SHMAT;
+  }
+
+  bool disclaim(char* p, size_t size) {
+
+    AddrRange r(p, size);
+
+    if (_pinned) {
+      return true;
+    }
+
+    // shmat'ed blocks are disclaimed using disclaim64
+    guarantee(_range.contains(r), "invalid disclaim");
+
+    // only disclaim whole ranges.
+    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
+    if (r2.is_empty()) {
+      return true;
+    }
+
+    const bool rc = my_disclaim64(r2.start(), r2.size());
+
+    if (Verbose && !rc) {
+      warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
+    }
+
+    return rc;
+  }
+
+  bool release() {
+    bool rc = false;
+    if (::shmdt(_range.start()) != 0) {
+      warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
+    } else {
+      rc = true;
+    }
+    return rc;
+  }
+
+}; // end: ShmBkShmatedBlock
+
+static ShmBkBlock* g_shmbk_list = NULL;
+static volatile jint g_shmbk_table_lock = 0;
+
+// keep some usage statistics
+static struct {
+  int nodes;    // number of nodes in list
+  size_t bytes; // reserved - not committed - bytes.
+  int reserves; // how often reserve was called
+  int lookups;  // how often a lookup was made
+} g_shmbk_stats = { 0, 0, 0, 0 };
+
+// add information about a shared memory segment to the bookkeeping
+static void shmbk_register(ShmBkBlock* p_block) {
+  guarantee(p_block, "logic error");
+  p_block->set_next(g_shmbk_list);
+  g_shmbk_list = p_block;
+  g_shmbk_stats.reserves ++;
+  g_shmbk_stats.bytes += p_block->size();
+  g_shmbk_stats.nodes ++;
+}
+
+// remove information about a shared memory segment by its starting address
+static void shmbk_unregister(ShmBkBlock* p_block) {
+  ShmBkBlock* p = g_shmbk_list;
+  ShmBkBlock* prev = NULL;
+  while (p) {
+    if (p == p_block) {
+      if (prev) {
+        prev->set_next(p->next());
+      } else {
+        g_shmbk_list = p->next();
+      }
+      g_shmbk_stats.nodes --;
+      g_shmbk_stats.bytes -= p->size();
+      return;
+    }
+    prev = p;
+    p = p->next();
+  }
+  assert(false, "should not happen");
+}
+
+// given a pointer, return shared memory bookkeeping record for the segment it points into
+// using the returned block info must happen under lock protection
+static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
+  g_shmbk_stats.lookups ++;
+  ShmBkBlock* p = g_shmbk_list;
+  while (p) {
+    if (p->containsAddress(addr)) {
+      return p;
+    }
+    p = p->next();
+  }
+  return NULL;
+}
+
+// dump all information about all memory segments allocated with os::reserve_memory()
+void shmbk_dump_info() {
+  tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
+    "total reserves: %d total lookups: %d)",
+    g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
+  const ShmBkBlock* p = g_shmbk_list;
+  int i = 0;
+  while (p) {
+    p->print(tty);
+    p = p->next();
+    i ++;
+  }
+}
+
+#define LOCK_SHMBK     { ThreadCritical _LOCK_SHMBK;
+#define UNLOCK_SHMBK   }
+
+// End: shared memory bookkeeping
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+int os::vm_page_size() {
+  // Seems redundant as all get out
+  assert(os::Aix::page_size() != -1, "must call os::init");
+  return os::Aix::page_size();
+}
+
+// Aix allocates memory by pages.
+int os::vm_allocation_granularity() {
+  assert(os::Aix::page_size() != -1, "must call os::init");
+  return os::Aix::page_size();
+}
+
+int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
+
+  // Commit is a noop. There is no explicit commit
+  // needed on AIX. Memory is committed when touched.
+  //
+  // Debug : check address range for validity
+#ifdef ASSERT
+  LOCK_SHMBK
+    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
+    if (!block) {
+      fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
+      shmbk_dump_info();
+      assert(false, "invalid pointer");
+      return false;
+    } else if (!block->containsRange(addr, size)) {
+      fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
+      shmbk_dump_info();
+      assert(false, "invalid range");
+      return false;
+    }
+  UNLOCK_SHMBK
+#endif // ASSERT
+
+  return 0;
+}
+
+bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
+  return os::Aix::commit_memory_impl(addr, size, exec) == 0;
+}
+
+void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
+                                  const char* mesg) {
+  assert(mesg != NULL, "mesg must be specified");
+  os::Aix::commit_memory_impl(addr, size, exec);
+}
+
+int os::Aix::commit_memory_impl(char* addr, size_t size,
+                                size_t alignment_hint, bool exec) {
+  return os::Aix::commit_memory_impl(addr, size, exec);
+}
+
+bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
+                          bool exec) {
+  return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
+}
+
+void os::pd_commit_memory_or_exit(char* addr, size_t size,
+                                  size_t alignment_hint, bool exec,
+                                  const char* mesg) {
+  os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
+}
+
+bool os::pd_uncommit_memory(char* addr, size_t size) {
+
+  // Delegate to ShmBkBlock class which knows how to uncommit its memory.
+
+  bool rc = false;
+  LOCK_SHMBK
+    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
+    if (!block) {
+      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
+      shmbk_dump_info();
+      assert(false, "invalid pointer");
+      return false;
+    } else if (!block->containsRange(addr, size)) {
+      fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
+      shmbk_dump_info();
+      assert(false, "invalid range");
+      return false;
+    }
+    rc = block->disclaim(addr, size);
+  UNLOCK_SHMBK
+
+  if (Verbose && !rc) {
+    warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
+  }
+  return rc;
+}
+
+bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
+  return os::guard_memory(addr, size);
+}
+
+bool os::remove_stack_guard_pages(char* addr, size_t size) {
+  return os::unguard_memory(addr, size);
+}
+
+void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
+}
+
+void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
+}
+
+void os::numa_make_global(char *addr, size_t bytes) {
+}
+
+void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
+}
+
+bool os::numa_topology_changed() {
+  return false;
+}
+
+size_t os::numa_get_groups_num() {
+  return 1;
+}
+
+int os::numa_get_group_id() {
+  return 0;
+}
+
+size_t os::numa_get_leaf_groups(int *ids, size_t size) {
+  if (size > 0) {
+    ids[0] = 0;
+    return 1;
+  }
+  return 0;
+}
+
+bool os::get_page_info(char *start, page_info* info) {
+  return false;
+}
+
+char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
+  return end;
+}
+
+// Flags for reserve_shmatted_memory:
+#define RESSHM_WISHADDR_OR_FAIL                     1
+#define RESSHM_TRY_16M_PAGES                        2
+#define RESSHM_16M_PAGES_OR_FAIL                    4
+
+// Result of reserve_shmatted_memory:
+struct shmatted_memory_info_t {
+  char* addr;
+  size_t pagesize;
+  bool pinned;
+};
+
+// Reserve a section of shmatted memory.
+// params:
+// bytes [in]: size of memory, in bytes
+// requested_addr [in]: wish address.
+//                      NULL = no wish.
+//                      If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
+//                      be obtained, function will fail. Otherwise wish address is treated as hint and
+//                      another pointer is returned.
+// flags [in]:          some flags. Valid flags are:
+//                      RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
+//                      RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
+//                          (requires UseLargePages and Use16MPages)
+//                      RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
+//                          Otherwise any other page size will do.
+// p_info [out] :       holds information about the created shared memory segment.
+static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
+
+  assert(p_info, "parameter error");
+
+  // init output struct.
+  p_info->addr = NULL;
+
+  // neither should we be here for EXTSHM=ON.
+  if (os::Aix::extshm()) {
+    ShouldNotReachHere();
+  }
+
+  // extract flags. sanity checks.
+  const bool wishaddr_or_fail =
+    flags & RESSHM_WISHADDR_OR_FAIL;
+  const bool try_16M_pages =
+    flags & RESSHM_TRY_16M_PAGES;
+  const bool f16M_pages_or_fail =
+    flags & RESSHM_16M_PAGES_OR_FAIL;
+
+  // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
+  // shmat will fail anyway, so save some cycles by failing right away
+  if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
+    if (wishaddr_or_fail) {
+      return false;
+    } else {
+      requested_addr = NULL;
+    }
+  }
+
+  char* addr = NULL;
+
+  // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
+  // pagesize dynamically.
+  const size_t size = align_size_up(bytes, SIZE_16M);
+
+  // reserve the shared segment
+  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
+  if (shmid == -1) {
+    warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
+    return false;
+  }
+
+  // Important note:
+  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
+  // We must right after attaching it remove it from the system. System V shm segments are global and
+  // survive the process.
+  // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
+
+  // try forcing the page size
+  size_t pagesize = -1; // unknown so far
+
+  if (UseLargePages) {
+
+    struct shmid_ds shmbuf;
+    memset(&shmbuf, 0, sizeof(shmbuf));
+
+    // First, try to take from 16M page pool if...
+    if (os::Aix::can_use_16M_pages()  // we can ...
+        && Use16MPages                // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
+        && try_16M_pages) {           // caller wants us to.
+      shmbuf.shm_pagesize = SIZE_16M;
+      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
+        pagesize = SIZE_16M;
+      } else {
+        warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
+                size / SIZE_16M, errno);
+        if (f16M_pages_or_fail) {
+          goto cleanup_shm;
+        }
+      }
+    }
+
+    // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
+    // because the 64K page pool may also be exhausted.
+    if (pagesize == -1) {
+      shmbuf.shm_pagesize = SIZE_64K;
+      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
+        pagesize = SIZE_64K;
+      } else {
+        warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
+                size / SIZE_64K, errno);
+        // here I give up. leave page_size -1 - later, after attaching, we will query the
+        // real page size of the attached memory. (in theory, it may be something different
+        // from 4K if LDR_CNTRL SHM_PSIZE is set)
+      }
+    }
+  }
+
+  // sanity point
+  assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
+
+  // Now attach the shared segment.
+  addr = (char*) shmat(shmid, requested_addr, 0);
+  if (addr == (char*)-1) {
+    // How to handle attach failure:
+    // If it failed for a specific wish address, tolerate this: in that case, if wish address was
+    // mandatory, fail, if not, retry anywhere.
+    // If it failed for any other reason, treat that as fatal error.
+    addr = NULL;
+    if (requested_addr) {
+      if (wishaddr_or_fail) {
+        goto cleanup_shm;
+      } else {
+        addr = (char*) shmat(shmid, NULL, 0);
+        if (addr == (char*)-1) { // fatal
+          addr = NULL;
+          warning("shmat failed (errno: %d)", errno);
+          goto cleanup_shm;
+        }
+      }
+    } else { // fatal
+      addr = NULL;
+      warning("shmat failed (errno: %d)", errno);
+      goto cleanup_shm;
+    }
+  }
+
+  // sanity point
+  assert(addr && addr != (char*) -1, "wrong address");
+
+  // after successful Attach remove the segment - right away.
+  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
+    warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
+    guarantee(false, "failed to remove shared memory segment!");
+  }
+  shmid = -1;
+
+  // query the real page size. In case setting the page size did not work (see above), the system
+  // may have given us something other then 4K (LDR_CNTRL)
+  {
+    const size_t real_pagesize = os::Aix::query_pagesize(addr);
+    if (pagesize != -1) {
+      assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
+    } else {
+      pagesize = real_pagesize;
+    }
+  }
+
+  // Now register the reserved block with internal book keeping.
+  LOCK_SHMBK
+    const bool pinned = pagesize >= SIZE_16M ? true : false;
+    ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
+    assert(p_block, "");
+    shmbk_register(p_block);
+  UNLOCK_SHMBK
+
+cleanup_shm:
+
+  // if we have not done so yet, remove the shared memory segment. This is very important.
+  if (shmid != -1) {
+    if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
+      warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
+      guarantee(false, "failed to remove shared memory segment!");
+    }
+    shmid = -1;
+  }
+
+  // trace
+  if (Verbose && !addr) {
+    if (requested_addr != NULL) {
+      warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);
+    } else {
+      warning("failed to shm-allocate 0x%llX bytes at any address.", size);
+    }
+  }
+
+  // hand info to caller
+  if (addr) {
+    p_info->addr = addr;
+    p_info->pagesize = pagesize;
+    p_info->pinned = pagesize == SIZE_16M ? true : false;
+  }
+
+  // sanity test:
+  if (requested_addr && addr && wishaddr_or_fail) {
+    guarantee(addr == requested_addr, "shmat error");
+  }
+
+  // just one more test to really make sure we have no dangling shm segments.
+  guarantee(shmid == -1, "dangling shm segments");
+
+  return addr ? true : false;
+
+} // end: reserve_shmatted_memory
+
+// Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
+// will return NULL in case of an error.
+static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
+
+  // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
+  if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
+    warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
+    return NULL;
+  }
+
+  const size_t size = align_size_up(bytes, SIZE_4K);
+
+  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
+  // msync(MS_INVALIDATE) (see os::uncommit_memory)
+  int flags = MAP_ANONYMOUS | MAP_SHARED;
+
+  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
+  // it means if wishaddress is given but MAP_FIXED is not set.
+  //
+  // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
+  // clobbers the address range, which is probably not what the caller wants. That's
+  // why I assert here (again) that the SPEC1170 compat mode is off.
+  // If we want to be able to run under SPEC1170, we have to do some porting and
+  // testing.
+  if (requested_addr != NULL) {
+    assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
+    flags |= MAP_FIXED;
+  }
+
+  char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
+
+  if (addr == MAP_FAILED) {
+    // attach failed: tolerate for specific wish addresses. Not being able to attach
+    // anywhere is a fatal error.
+    if (requested_addr == NULL) {
+      // It's ok to fail here if the machine has not enough memory.
+      warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
+    }
+    addr = NULL;
+    goto cleanup_mmap;
+  }
+
+  // If we did request a specific address and that address was not available, fail.
+  if (addr && requested_addr) {
+    guarantee(addr == requested_addr, "unexpected");
+  }
+
+  // register this mmap'ed segment with book keeping
+  LOCK_SHMBK
+    ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
+    assert(p_block, "");
+    shmbk_register(p_block);
+  UNLOCK_SHMBK
+
+cleanup_mmap:
+
+  // trace
+  if (Verbose) {
+    if (addr) {
+      fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
+    }
+    else {
+      if (requested_addr != NULL) {
+        warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
+      } else {
+        warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
+      }
+    }
+  }
+
+  return addr;
+
+} // end: reserve_mmaped_memory
+
+// Reserves and attaches a shared memory segment.
+// Will assert if a wish address is given and could not be obtained.
+char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
+  return os::attempt_reserve_memory_at(bytes, requested_addr);
+}
+
+bool os::pd_release_memory(char* addr, size_t size) {
+
+  // delegate to ShmBkBlock class which knows how to uncommit its memory.
+
+  bool rc = false;
+  LOCK_SHMBK
+    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
+    if (!block) {
+      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
+      shmbk_dump_info();
+      assert(false, "invalid pointer");
+      return false;
+    }
+    else if (!block->isSameRange(addr, size)) {
+      if (block->getType() == ShmBkBlock::MMAP) {
+        // Release only the same range or a the beginning or the end of a range.
+        if (block->base() == addr && size < block->size()) {
+          ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
+          assert(b, "");
+          shmbk_register(b);
+          block->setAddrRange(AddrRange(addr, size));
+        }
+        else if (addr > block->base() && addr + size == block->base() + block->size()) {
+          ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
+          assert(b, "");
+          shmbk_register(b);
+          block->setAddrRange(AddrRange(addr, size));
+        }
+        else {
+          fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
+          shmbk_dump_info();
+          assert(false, "invalid mmap range");
+          return false;
+        }
+      }
+      else {
+        // Release only the same range. No partial release allowed.
+        // Soften the requirement a bit, because the user may think he owns a smaller size
+        // than the block is due to alignment etc.
+        if (block->base() != addr || block->size() < size) {
+          fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
+          shmbk_dump_info();
+          assert(false, "invalid shmget range");
+          return false;
+        }
+      }
+    }
+    rc = block->release();
+    assert(rc, "release failed");
+    // remove block from bookkeeping
+    shmbk_unregister(block);
+    delete block;
+  UNLOCK_SHMBK
+
+  if (!rc) {
+    warning("failed to released %lu bytes at 0x%p", size, addr);
+  }
+
+  return rc;
+}
+
+static bool checked_mprotect(char* addr, size_t size, int prot) {
+
+  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
+  // not tell me if protection failed when trying to protect an un-protectable range.
+  //
+  // This means if the memory was allocated using shmget/shmat, protection wont work
+  // but mprotect will still return 0:
+  //
+  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
+
+  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
+
+  if (!rc) {
+    const char* const s_errno = strerror(errno);
+    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
+    return false;
+  }
+
+  // mprotect success check
+  //
+  // Mprotect said it changed the protection but can I believe it?
+  //
+  // To be sure I need to check the protection afterwards. Try to
+  // read from protected memory and check whether that causes a segfault.
+  //
+  if (!os::Aix::xpg_sus_mode()) {
+
+    if (StubRoutines::SafeFetch32_stub()) {
+
+      const bool read_protected =
+        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
+         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
+
+      if (prot & PROT_READ) {
+        rc = !read_protected;
+      } else {
+        rc = read_protected;
+      }
+    }
+  }
+  if (!rc) {
+    assert(false, "mprotect failed.");
+  }
+  return rc;
+}
+
+// Set protections specified
+bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
+  unsigned int p = 0;
+  switch (prot) {
+  case MEM_PROT_NONE: p = PROT_NONE; break;
+  case MEM_PROT_READ: p = PROT_READ; break;
+  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
+  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
+  default:
+    ShouldNotReachHere();
+  }
+  // is_committed is unused.
+  return checked_mprotect(addr, size, p);
+}
+
+bool os::guard_memory(char* addr, size_t size) {
+  return checked_mprotect(addr, size, PROT_NONE);
+}
+
+bool os::unguard_memory(char* addr, size_t size) {
+  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
+}
+
+// Large page support
+
+static size_t _large_page_size = 0;
+
+// Enable large page support if OS allows that.
+void os::large_page_init() {
+
+  // Note: os::Aix::query_multipage_support must run first.
+
+  if (!UseLargePages) {
+    return;
+  }
+
+  if (!Aix::can_use_64K_pages()) {
+    assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
+    UseLargePages = false;
+    return;
+  }
+
+  if (!Aix::can_use_16M_pages() && Use16MPages) {
+    fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
+            " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
+  }
+
+  // Do not report 16M page alignment as part of os::_page_sizes if we are
+  // explicitly forbidden from using 16M pages. Doing so would increase the
+  // alignment the garbage collector calculates with, slightly increasing
+  // heap usage. We should only pay for 16M alignment if we really want to
+  // use 16M pages.
+  if (Use16MPages && Aix::can_use_16M_pages()) {
+    _large_page_size = SIZE_16M;
+    _page_sizes[0] = SIZE_16M;
+    _page_sizes[1] = SIZE_64K;
+    _page_sizes[2] = SIZE_4K;
+    _page_sizes[3] = 0;
+  } else if (Aix::can_use_64K_pages()) {
+    _large_page_size = SIZE_64K;
+    _page_sizes[0] = SIZE_64K;
+    _page_sizes[1] = SIZE_4K;
+    _page_sizes[2] = 0;
+  }
+
+  if (Verbose) {
+    ("Default large page size is 0x%llX.", _large_page_size);
+  }
+} // end: os::large_page_init()
+
+char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
+  // "exec" is passed in but not used. Creating the shared image for
+  // the code cache doesn't have an SHM_X executable permission to check.
+  Unimplemented();
+  return 0;
+}
+
+bool os::release_memory_special(char* base, size_t bytes) {
+  // detaching the SHM segment will also delete it, see reserve_memory_special()
+  Unimplemented();
+  return false;
+}
+
+size_t os::large_page_size() {
+  return _large_page_size;
+}
+
+bool os::can_commit_large_page_memory() {
+  // Well, sadly we cannot commit anything at all (see comment in
+  // os::commit_memory) but we claim to so we can make use of large pages
+  return true;
+}
+
+bool os::can_execute_large_page_memory() {
+  // We can do that
+  return true;
+}
+
+// Reserve memory at an arbitrary address, only if that area is
+// available (and not reserved for something else).
+char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
+
+  bool use_mmap = false;
+
+  // mmap: smaller graining, no large page support
+  // shm: large graining (256M), large page support, limited number of shm segments
+  //
+  // Prefer mmap wherever we either do not need large page support or have OS limits
+
+  if (!UseLargePages || bytes < SIZE_16M) {
+    use_mmap = true;
+  }
+
+  char* addr = NULL;
+  if (use_mmap) {
+    addr = reserve_mmaped_memory(bytes, requested_addr);
+  } else {
+    // shmat: wish address is mandatory, and do not try 16M pages here.
+    shmatted_memory_info_t info;
+    const int flags = RESSHM_WISHADDR_OR_FAIL;
+    if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
+      addr = info.addr;
+    }
+  }
+
+  return addr;
+}
+
+size_t os::read(int fd, void *buf, unsigned int nBytes) {
+  return ::read(fd, buf, nBytes);
+}
+
+#define NANOSECS_PER_MILLISEC 1000000
+
+int os::sleep(Thread* thread, jlong millis, bool interruptible) {
+  assert(thread == Thread::current(), "thread consistency check");
+
+  // Prevent nasty overflow in deadline calculation
+  // by handling long sleeps similar to solaris or windows.
+  const jlong limit = INT_MAX;
+  int result;
+  while (millis > limit) {
+    if ((result = os::sleep(thread, limit, interruptible)) != OS_OK) {
+      return result;
+    }
+    millis -= limit;
+  }
+
+  ParkEvent * const slp = thread->_SleepEvent;
+  slp->reset();
+  OrderAccess::fence();
+
+  if (interruptible) {
+    jlong prevtime = javaTimeNanos();
+
+    // Prevent precision loss and too long sleeps
+    jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
+
+    for (;;) {
+      if (os::is_interrupted(thread, true)) {
+        return OS_INTRPT;
+      }
+
+      jlong newtime = javaTimeNanos();
+
+      assert(newtime >= prevtime, "time moving backwards");
+      // Doing prevtime and newtime in microseconds doesn't help precision,
+      // and trying to round up to avoid lost milliseconds can result in a
+      // too-short delay.
+      millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
+
+      if (millis <= 0) {
+        return OS_OK;
+      }
+
+      // Stop sleeping if we passed the deadline
+      if (newtime >= deadline) {
+        return OS_OK;
+      }
+
+      prevtime = newtime;
+
+      {
+        assert(thread->is_Java_thread(), "sanity check");
+        JavaThread *jt = (JavaThread *) thread;
+        ThreadBlockInVM tbivm(jt);
+        OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
+
+        jt->set_suspend_equivalent();
+
+        slp->park(millis);
+
+        // were we externally suspended while we were waiting?
+        jt->check_and_wait_while_suspended();
+      }
+    }
+  } else {
+    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
+    jlong prevtime = javaTimeNanos();
+
+    // Prevent precision loss and too long sleeps
+    jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
+
+    for (;;) {
+      // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
+      // the 1st iteration ...
+      jlong newtime = javaTimeNanos();
+
+      if (newtime - prevtime < 0) {
+        // time moving backwards, should only happen if no monotonic clock
+        // not a guarantee() because JVM should not abort on kernel/glibc bugs
+        // - HS14 Commented out as not implemented.
+        // - TODO Maybe we should implement it?
+        //assert(!Aix::supports_monotonic_clock(), "time moving backwards");
+      } else {
+        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
+      }
+
+      if (millis <= 0) break;
+
+      if (newtime >= deadline) {
+        break;
+      }
+
+      prevtime = newtime;
+      slp->park(millis);
+    }
+    return OS_OK;
+  }
+}
+
+void os::naked_short_sleep(jlong ms) {
+  struct timespec req;
+
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  req.tv_sec = 0;
+  if (ms > 0) {
+    req.tv_nsec = (ms % 1000) * 1000000;
+  }
+  else {
+    req.tv_nsec = 1;
+  }
+
+  nanosleep(&req, NULL);
+
+  return;
+}
+
+// Sleep forever; naked call to OS-specific sleep; use with CAUTION
+void os::infinite_sleep() {
+  while (true) {    // sleep forever ...
+    ::sleep(100);   // ... 100 seconds at a time
+  }
+}
+
+// Used to convert frequent JVM_Yield() to nops
+bool os::dont_yield() {
+  return DontYieldALot;
+}
+
+void os::yield() {
+  sched_yield();
+}
+
+os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
+
+void os::yield_all(int attempts) {
+  // Yields to all threads, including threads with lower priorities
+  // Threads on Linux are all with same priority. The Solaris style
+  // os::yield_all() with nanosleep(1ms) is not necessary.
+  sched_yield();
+}
+
+// Called from the tight loops to possibly influence time-sharing heuristics
+void os::loop_breaker(int attempts) {
+  os::yield_all(attempts);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// thread priority support
+
+// From AIX manpage to pthread_setschedparam
+// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
+//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
+//
+// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
+// range from 40 to 80, where 40 is the least favored priority and 80
+// is the most favored."
+//
+// (Actually, I doubt this even has an impact on AIX, as we do kernel
+// scheduling there; however, this still leaves iSeries.)
+//
+// We use the same values for AIX and PASE.
+int os::java_to_os_priority[CriticalPriority + 1] = {
+  54,             // 0 Entry should never be used
+
+  55,             // 1 MinPriority
+  55,             // 2
+  56,             // 3
+
+  56,             // 4
+  57,             // 5 NormPriority
+  57,             // 6
+
+  58,             // 7
+  58,             // 8
+  59,             // 9 NearMaxPriority
+
+  60,             // 10 MaxPriority
+
+  60              // 11 CriticalPriority
+};
+
+OSReturn os::set_native_priority(Thread* thread, int newpri) {
+  if (!UseThreadPriorities) return OS_OK;
+  pthread_t thr = thread->osthread()->pthread_id();
+  int policy = SCHED_OTHER;
+  struct sched_param param;
+  param.sched_priority = newpri;
+  int ret = pthread_setschedparam(thr, policy, &param);
+
+  if (Verbose) {
+    if (ret == 0) {
+      fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
+    } else {
+      fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
+              (int)thr, newpri, ret, strerror(ret));
+    }
+  }
+  return (ret == 0) ? OS_OK : OS_ERR;
+}
+
+OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
+  if (!UseThreadPriorities) {
+    *priority_ptr = java_to_os_priority[NormPriority];
+    return OS_OK;
+  }
+  pthread_t thr = thread->osthread()->pthread_id();
+  int policy = SCHED_OTHER;
+  struct sched_param param;
+  int ret = pthread_getschedparam(thr, &policy, &param);
+  *priority_ptr = param.sched_priority;
+
+  return (ret == 0) ? OS_OK : OS_ERR;
+}
+
+// Hint to the underlying OS that a task switch would not be good.
+// Void return because it's a hint and can fail.
+void os::hint_no_preempt() {}
+
+////////////////////////////////////////////////////////////////////////////////
+// suspend/resume support
+
+//  the low-level signal-based suspend/resume support is a remnant from the
+//  old VM-suspension that used to be for java-suspension, safepoints etc,
+//  within hotspot. Now there is a single use-case for this:
+//    - calling get_thread_pc() on the VMThread by the flat-profiler task
+//      that runs in the watcher thread.
+//  The remaining code is greatly simplified from the more general suspension
+//  code that used to be used.
+//
+//  The protocol is quite simple:
+//  - suspend:
+//      - sends a signal to the target thread
+//      - polls the suspend state of the osthread using a yield loop
+//      - target thread signal handler (SR_handler) sets suspend state
+//        and blocks in sigsuspend until continued
+//  - resume:
+//      - sets target osthread state to continue
+//      - sends signal to end the sigsuspend loop in the SR_handler
+//
+//  Note that the SR_lock plays no role in this suspend/resume protocol.
+//
+
+static void resume_clear_context(OSThread *osthread) {
+  osthread->set_ucontext(NULL);
+  osthread->set_siginfo(NULL);
+}
+
+static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
+  osthread->set_ucontext(context);
+  osthread->set_siginfo(siginfo);
+}
+
+//
+// Handler function invoked when a thread's execution is suspended or
+// resumed. We have to be careful that only async-safe functions are
+// called here (Note: most pthread functions are not async safe and
+// should be avoided.)
+//
+// Note: sigwait() is a more natural fit than sigsuspend() from an
+// interface point of view, but sigwait() prevents the signal hander
+// from being run. libpthread would get very confused by not having
+// its signal handlers run and prevents sigwait()'s use with the
+// mutex granting granting signal.
+//
+// Currently only ever called on the VMThread and JavaThreads (PC sampling).
+//
+static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
+  // Save and restore errno to avoid confusing native code with EINTR
+  // after sigsuspend.
+  int old_errno = errno;
+
+  Thread* thread = Thread::current();
+  OSThread* osthread = thread->osthread();
+  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
+
+  os::SuspendResume::State current = osthread->sr.state();
+  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
+    suspend_save_context(osthread, siginfo, context);
+
+    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
+    os::SuspendResume::State state = osthread->sr.suspended();
+    if (state == os::SuspendResume::SR_SUSPENDED) {
+      sigset_t suspend_set;  // signals for sigsuspend()
+
+      // get current set of blocked signals and unblock resume signal
+      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
+      sigdelset(&suspend_set, SR_signum);
+
+      // wait here until we are resumed
+      while (1) {
+        sigsuspend(&suspend_set);
+
+        os::SuspendResume::State result = osthread->sr.running();
+        if (result == os::SuspendResume::SR_RUNNING) {
+          break;
+        }
+      }
+
+    } else if (state == os::SuspendResume::SR_RUNNING) {
+      // request was cancelled, continue
+    } else {
+      ShouldNotReachHere();
+    }
+
+    resume_clear_context(osthread);
+  } else if (current == os::SuspendResume::SR_RUNNING) {
+    // request was cancelled, continue
+  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
+    // ignore
+  } else {
+    ShouldNotReachHere();
+  }
+
+  errno = old_errno;
+}
+
+
+static int SR_initialize() {
+  struct sigaction act;
+  char *s;
+  // Get signal number to use for suspend/resume
+  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
+    int sig = ::strtol(s, 0, 10);
+    if (sig > 0 || sig < NSIG) {
+      SR_signum = sig;
+    }
+  }
+
+  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
+        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
+
+  sigemptyset(&SR_sigset);
+  sigaddset(&SR_sigset, SR_signum);
+
+  // Set up signal handler for suspend/resume.
+  act.sa_flags = SA_RESTART|SA_SIGINFO;
+  act.sa_handler = (void (*)(int)) SR_handler;
+
+  // SR_signum is blocked by default.
+  // 4528190 - We also need to block pthread restart signal (32 on all
+  // supported Linux platforms). Note that LinuxThreads need to block
+  // this signal for all threads to work properly. So we don't have
+  // to use hard-coded signal number when setting up the mask.
+  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
+
+  if (sigaction(SR_signum, &act, 0) == -1) {
+    return -1;
+  }
+
+  // Save signal flag
+  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
+  return 0;
+}
+
+static int SR_finalize() {
+  return 0;
+}
+
+static int sr_notify(OSThread* osthread) {
+  int status = pthread_kill(osthread->pthread_id(), SR_signum);
+  assert_status(status == 0, status, "pthread_kill");
+  return status;
+}
+
+// "Randomly" selected value for how long we want to spin
+// before bailing out on suspending a thread, also how often
+// we send a signal to a thread we want to resume
+static const int RANDOMLY_LARGE_INTEGER = 1000000;
+static const int RANDOMLY_LARGE_INTEGER2 = 100;
+
+// returns true on success and false on error - really an error is fatal
+// but this seems the normal response to library errors
+static bool do_suspend(OSThread* osthread) {
+  assert(osthread->sr.is_running(), "thread should be running");
+  // mark as suspended and send signal
+
+  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
+    // failed to switch, state wasn't running?
+    ShouldNotReachHere();
+    return false;
+  }
+
+  if (sr_notify(osthread) != 0) {
+    // try to cancel, switch to running
+
+    os::SuspendResume::State result = osthread->sr.cancel_suspend();
+    if (result == os::SuspendResume::SR_RUNNING) {
+      // cancelled
+      return false;
+    } else if (result == os::SuspendResume::SR_SUSPENDED) {
+      // somehow managed to suspend
+      return true;
+    } else {
+      ShouldNotReachHere();
+      return false;
+    }
+  }
+
+  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
+
+  for (int n = 0; !osthread->sr.is_suspended(); n++) {
+    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
+      os::yield_all(i);
+    }
+
+    // timeout, try to cancel the request
+    if (n >= RANDOMLY_LARGE_INTEGER) {
+      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
+      if (cancelled == os::SuspendResume::SR_RUNNING) {
+        return false;
+      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
+        return true;
+      } else {
+        ShouldNotReachHere();
+        return false;
+      }
+    }
+  }
+
+  guarantee(osthread->sr.is_suspended(), "Must be suspended");
+  return true;
+}
+
+static void do_resume(OSThread* osthread) {
+  //assert(osthread->sr.is_suspended(), "thread should be suspended");
+
+  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
+    // failed to switch to WAKEUP_REQUEST
+    ShouldNotReachHere();
+    return;
+  }
+
+  while (!osthread->sr.is_running()) {
+    if (sr_notify(osthread) == 0) {
+      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
+        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
+          os::yield_all(i);
+        }
+      }
+    } else {
+      ShouldNotReachHere();
+    }
+  }
+
+  guarantee(osthread->sr.is_running(), "Must be running!");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// interrupt support
+
+void os::interrupt(Thread* thread) {
+  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
+    "possibility of dangling Thread pointer");
+
+  OSThread* osthread = thread->osthread();
+
+  if (!osthread->interrupted()) {
+    osthread->set_interrupted(true);
+    // More than one thread can get here with the same value of osthread,
+    // resulting in multiple notifications.  We do, however, want the store
+    // to interrupted() to be visible to other threads before we execute unpark().
+    OrderAccess::fence();
+    ParkEvent * const slp = thread->_SleepEvent;
+    if (slp != NULL) slp->unpark();
+  }
+
+  // For JSR166. Unpark even if interrupt status already was set
+  if (thread->is_Java_thread())
+    ((JavaThread*)thread)->parker()->unpark();
+
+  ParkEvent * ev = thread->_ParkEvent;
+  if (ev != NULL) ev->unpark();
+
+}
+
+bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
+  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
+    "possibility of dangling Thread pointer");
+
+  OSThread* osthread = thread->osthread();
+
+  bool interrupted = osthread->interrupted();
+
+  if (interrupted && clear_interrupted) {
+    osthread->set_interrupted(false);
+    // consider thread->_SleepEvent->reset() ... optional optimization
+  }
+
+  return interrupted;
+}
+
+///////////////////////////////////////////////////////////////////////////////////
+// signal handling (except suspend/resume)
+
+// This routine may be used by user applications as a "hook" to catch signals.
+// The user-defined signal handler must pass unrecognized signals to this
+// routine, and if it returns true (non-zero), then the signal handler must
+// return immediately. If the flag "abort_if_unrecognized" is true, then this
+// routine will never retun false (zero), but instead will execute a VM panic
+// routine kill the process.
+//
+// If this routine returns false, it is OK to call it again. This allows
+// the user-defined signal handler to perform checks either before or after
+// the VM performs its own checks. Naturally, the user code would be making
+// a serious error if it tried to handle an exception (such as a null check
+// or breakpoint) that the VM was generating for its own correct operation.
+//
+// This routine may recognize any of the following kinds of signals:
+//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
+// It should be consulted by handlers for any of those signals.
+//
+// The caller of this routine must pass in the three arguments supplied
+// to the function referred to in the "sa_sigaction" (not the "sa_handler")
+// field of the structure passed to sigaction(). This routine assumes that
+// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
+//
+// Note that the VM will print warnings if it detects conflicting signal
+// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
+//
+extern "C" JNIEXPORT int
+JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
+
+// Set thread signal mask (for some reason on AIX sigthreadmask() seems
+// to be the thing to call; documentation is not terribly clear about whether
+// pthread_sigmask also works, and if it does, whether it does the same.
+bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
+  const int rc = ::pthread_sigmask(how, set, oset);
+  // return value semantics differ slightly for error case:
+  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
+  // (so, pthread_sigmask is more theadsafe for error handling)
+  // But success is always 0.
+  return rc == 0 ? true : false;
+}
+
+// Function to unblock all signals which are, according
+// to POSIX, typical program error signals. If they happen while being blocked,
+// they typically will bring down the process immediately.
+bool unblock_program_error_signals() {
+  sigset_t set;
+  ::sigemptyset(&set);
+  ::sigaddset(&set, SIGILL);
+  ::sigaddset(&set, SIGBUS);
+  ::sigaddset(&set, SIGFPE);
+  ::sigaddset(&set, SIGSEGV);
+  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
+}
+
+// Renamed from 'signalHandler' to avoid collision with other shared libs.
+void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
+  assert(info != NULL && uc != NULL, "it must be old kernel");
+
+  // Never leave program error signals blocked;
+  // on all our platforms they would bring down the process immediately when
+  // getting raised while being blocked.
+  unblock_program_error_signals();
+
+  JVM_handle_aix_signal(sig, info, uc, true);
+}
+
+
+// This boolean allows users to forward their own non-matching signals
+// to JVM_handle_aix_signal, harmlessly.
+bool os::Aix::signal_handlers_are_installed = false;
+
+// For signal-chaining
+struct sigaction os::Aix::sigact[MAXSIGNUM];
+unsigned int os::Aix::sigs = 0;
+bool os::Aix::libjsig_is_loaded = false;
+typedef struct sigaction *(*get_signal_t)(int);
+get_signal_t os::Aix::get_signal_action = NULL;
+
+struct sigaction* os::Aix::get_chained_signal_action(int sig) {
+  struct sigaction *actp = NULL;
+
+  if (libjsig_is_loaded) {
+    // Retrieve the old signal handler from libjsig
+    actp = (*get_signal_action)(sig);
+  }
+  if (actp == NULL) {
+    // Retrieve the preinstalled signal handler from jvm
+    actp = get_preinstalled_handler(sig);
+  }
+
+  return actp;
+}
+
+static bool call_chained_handler(struct sigaction *actp, int sig,
+                                 siginfo_t *siginfo, void *context) {
+  // Call the old signal handler
+  if (actp->sa_handler == SIG_DFL) {
+    // It's more reasonable to let jvm treat it as an unexpected exception
+    // instead of taking the default action.
+    return false;
+  } else if (actp->sa_handler != SIG_IGN) {
+    if ((actp->sa_flags & SA_NODEFER) == 0) {
+      // automaticlly block the signal
+      sigaddset(&(actp->sa_mask), sig);
+    }
+
+    sa_handler_t hand = NULL;
+    sa_sigaction_t sa = NULL;
+    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
+    // retrieve the chained handler
+    if (siginfo_flag_set) {
+      sa = actp->sa_sigaction;
+    } else {
+      hand = actp->sa_handler;
+    }
+
+    if ((actp->sa_flags & SA_RESETHAND) != 0) {
+      actp->sa_handler = SIG_DFL;
+    }
+
+    // try to honor the signal mask
+    sigset_t oset;
+    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
+
+    // call into the chained handler
+    if (siginfo_flag_set) {
+      (*sa)(sig, siginfo, context);
+    } else {
+      (*hand)(sig);
+    }
+
+    // restore the signal mask
+    pthread_sigmask(SIG_SETMASK, &oset, 0);
+  }
+  // Tell jvm's signal handler the signal is taken care of.
+  return true;
+}
+
+bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
+  bool chained = false;
+  // signal-chaining
+  if (UseSignalChaining) {
+    struct sigaction *actp = get_chained_signal_action(sig);
+    if (actp != NULL) {
+      chained = call_chained_handler(actp, sig, siginfo, context);
+    }
+  }
+  return chained;
+}
+
+struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
+  if ((((unsigned int)1 << sig) & sigs) != 0) {
+    return &sigact[sig];
+  }
+  return NULL;
+}
+
+void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
+  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
+  sigact[sig] = oldAct;
+  sigs |= (unsigned int)1 << sig;
+}
+
+// for diagnostic
+int os::Aix::sigflags[MAXSIGNUM];
+
+int os::Aix::get_our_sigflags(int sig) {
+  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
+  return sigflags[sig];
+}
+
+void os::Aix::set_our_sigflags(int sig, int flags) {
+  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
+  sigflags[sig] = flags;
+}
+
+void os::Aix::set_signal_handler(int sig, bool set_installed) {
+  // Check for overwrite.
+  struct sigaction oldAct;
+  sigaction(sig, (struct sigaction*)NULL, &oldAct);
+
+  void* oldhand = oldAct.sa_sigaction
+    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
+    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
+  // Renamed 'signalHandler' to avoid collision with other shared libs.
+  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
+      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
+      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
+    if (AllowUserSignalHandlers || !set_installed) {
+      // Do not overwrite; user takes responsibility to forward to us.
+      return;
+    } else if (UseSignalChaining) {
+      // save the old handler in jvm
+      save_preinstalled_handler(sig, oldAct);
+      // libjsig also interposes the sigaction() call below and saves the
+      // old sigaction on it own.
+    } else {
+      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
+                    "%#lx for signal %d.", (long)oldhand, sig));
+    }
+  }
+
+  struct sigaction sigAct;
+  sigfillset(&(sigAct.sa_mask));
+  if (!set_installed) {
+    sigAct.sa_handler = SIG_DFL;
+    sigAct.sa_flags = SA_RESTART;
+  } else {
+    // Renamed 'signalHandler' to avoid collision with other shared libs.
+    sigAct.sa_sigaction = javaSignalHandler;
+    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
+  }
+  // Save flags, which are set by ours
+  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
+  sigflags[sig] = sigAct.sa_flags;
+
+  int ret = sigaction(sig, &sigAct, &oldAct);
+  assert(ret == 0, "check");
+
+  void* oldhand2 = oldAct.sa_sigaction
+                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
+                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
+  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
+}
+
+// install signal handlers for signals that HotSpot needs to
+// handle in order to support Java-level exception handling.
+void os::Aix::install_signal_handlers() {
+  if (!signal_handlers_are_installed) {
+    signal_handlers_are_installed = true;
+
+    // signal-chaining
+    typedef void (*signal_setting_t)();
+    signal_setting_t begin_signal_setting = NULL;
+    signal_setting_t end_signal_setting = NULL;
+    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
+                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
+    if (begin_signal_setting != NULL) {
+      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
+                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
+      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
+                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
+      libjsig_is_loaded = true;
+      assert(UseSignalChaining, "should enable signal-chaining");
+    }
+    if (libjsig_is_loaded) {
+      // Tell libjsig jvm is setting signal handlers
+      (*begin_signal_setting)();
+    }
+
+    set_signal_handler(SIGSEGV, true);
+    set_signal_handler(SIGPIPE, true);
+    set_signal_handler(SIGBUS, true);
+    set_signal_handler(SIGILL, true);
+    set_signal_handler(SIGFPE, true);
+    set_signal_handler(SIGTRAP, true);
+    set_signal_handler(SIGXFSZ, true);
+    set_signal_handler(SIGDANGER, true);
+
+    if (libjsig_is_loaded) {
+      // Tell libjsig jvm finishes setting signal handlers
+      (*end_signal_setting)();
+    }
+
+    // We don't activate signal checker if libjsig is in place, we trust ourselves
+    // and if UserSignalHandler is installed all bets are off.
+    // Log that signal checking is off only if -verbose:jni is specified.
+    if (CheckJNICalls) {
+      if (libjsig_is_loaded) {
+        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
+        check_signals = false;
+      }
+      if (AllowUserSignalHandlers) {
+        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
+        check_signals = false;
+      }
+      // need to initialize check_signal_done
+      ::sigemptyset(&check_signal_done);
+    }
+  }
+}
+
+static const char* get_signal_handler_name(address handler,
+                                           char* buf, int buflen) {
+  int offset;
+  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
+  if (found) {
+    // skip directory names
+    const char *p1, *p2;
+    p1 = buf;
+    size_t len = strlen(os::file_separator());
+    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
+    // The way os::dll_address_to_library_name is implemented on Aix
+    // right now, it always returns -1 for the offset which is not
+    // terribly informative.
+    // Will fix that. For now, omit the offset.
+    jio_snprintf(buf, buflen, "%s", p1);
+  } else {
+    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
+  }
+  return buf;
+}
+
+static void print_signal_handler(outputStream* st, int sig,
+                                 char* buf, size_t buflen) {
+  struct sigaction sa;
+  sigaction(sig, NULL, &sa);
+
+  st->print("%s: ", os::exception_name(sig, buf, buflen));
+
+  address handler = (sa.sa_flags & SA_SIGINFO)
+    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
+    : CAST_FROM_FN_PTR(address, sa.sa_handler);
+
+  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
+    st->print("SIG_DFL");
+  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
+    st->print("SIG_IGN");
+  } else {
+    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
+  }
+
+  // Print readable mask.
+  st->print(", sa_mask[0]=");
+  os::Posix::print_signal_set_short(st, &sa.sa_mask);
+
+  address rh = VMError::get_resetted_sighandler(sig);
+  // May be, handler was resetted by VMError?
+  if (rh != NULL) {
+    handler = rh;
+    sa.sa_flags = VMError::get_resetted_sigflags(sig);
+  }
+
+  // Print textual representation of sa_flags.
+  st->print(", sa_flags=");
+  os::Posix::print_sa_flags(st, sa.sa_flags);
+
+  // Check: is it our handler?
+  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
+      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
+    // It is our signal handler.
+    // Check for flags, reset system-used one!
+    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
+      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
+                os::Aix::get_our_sigflags(sig));
+    }
+  }
+  st->cr();
+}
+
+
+#define DO_SIGNAL_CHECK(sig) \
+  if (!sigismember(&check_signal_done, sig)) \
+    os::Aix::check_signal_handler(sig)
+
+// This method is a periodic task to check for misbehaving JNI applications
+// under CheckJNI, we can add any periodic checks here
+
+void os::run_periodic_checks() {
+
+  if (check_signals == false) return;
+
+  // SEGV and BUS if overridden could potentially prevent
+  // generation of hs*.log in the event of a crash, debugging
+  // such a case can be very challenging, so we absolutely
+  // check the following for a good measure:
+  DO_SIGNAL_CHECK(SIGSEGV);
+  DO_SIGNAL_CHECK(SIGILL);
+  DO_SIGNAL_CHECK(SIGFPE);
+  DO_SIGNAL_CHECK(SIGBUS);
+  DO_SIGNAL_CHECK(SIGPIPE);
+  DO_SIGNAL_CHECK(SIGXFSZ);
+  if (UseSIGTRAP) {
+    DO_SIGNAL_CHECK(SIGTRAP);
+  }
+  DO_SIGNAL_CHECK(SIGDANGER);
+
+  // ReduceSignalUsage allows the user to override these handlers
+  // see comments at the very top and jvm_solaris.h
+  if (!ReduceSignalUsage) {
+    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
+    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
+    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
+    DO_SIGNAL_CHECK(BREAK_SIGNAL);
+  }
+
+  DO_SIGNAL_CHECK(SR_signum);
+  DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
+}
+
+typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
+
+static os_sigaction_t os_sigaction = NULL;
+
+void os::Aix::check_signal_handler(int sig) {
+  char buf[O_BUFLEN];
+  address jvmHandler = NULL;
+
+  struct sigaction act;
+  if (os_sigaction == NULL) {
+    // only trust the default sigaction, in case it has been interposed
+    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
+    if (os_sigaction == NULL) return;
+  }
+
+  os_sigaction(sig, (struct sigaction*)NULL, &act);
+
+  address thisHandler = (act.sa_flags & SA_SIGINFO)
+    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
+    : CAST_FROM_FN_PTR(address, act.sa_handler);
+
+
+  switch(sig) {
+  case SIGSEGV:
+  case SIGBUS:
+  case SIGFPE:
+  case SIGPIPE:
+  case SIGILL:
+  case SIGXFSZ:
+    // Renamed 'signalHandler' to avoid collision with other shared libs.
+    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
+    break;
+
+  case SHUTDOWN1_SIGNAL:
+  case SHUTDOWN2_SIGNAL:
+  case SHUTDOWN3_SIGNAL:
+  case BREAK_SIGNAL:
+    jvmHandler = (address)user_handler();
+    break;
+
+  case INTERRUPT_SIGNAL:
+    jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
+    break;
+
+  default:
+    if (sig == SR_signum) {
+      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
+    } else {
+      return;
+    }
+    break;
+  }
+
+  if (thisHandler != jvmHandler) {
+    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
+    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
+    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
+    // No need to check this sig any longer
+    sigaddset(&check_signal_done, sig);
+  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
+    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
+    tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
+    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
+    // No need to check this sig any longer
+    sigaddset(&check_signal_done, sig);
+  }
+
+  // Dump all the signal
+  if (sigismember(&check_signal_done, sig)) {
+    print_signal_handlers(tty, buf, O_BUFLEN);
+  }
+}
+
+extern bool signal_name(int signo, char* buf, size_t len);
+
+const char* os::exception_name(int exception_code, char* buf, size_t size) {
+  if (0 < exception_code && exception_code <= SIGRTMAX) {
+    // signal
+    if (!signal_name(exception_code, buf, size)) {
+      jio_snprintf(buf, size, "SIG%d", exception_code);
+    }
+    return buf;
+  } else {
+    return NULL;
+  }
+}
+
+// To install functions for atexit system call
+extern "C" {
+  static void perfMemory_exit_helper() {
+    perfMemory_exit();
+  }
+}
+
+// This is called _before_ the most of global arguments have been parsed.
+void os::init(void) {
+  // This is basic, we want to know if that ever changes.
+  // (shared memory boundary is supposed to be a 256M aligned)
+  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
+
+  // First off, we need to know whether we run on AIX or PASE, and
+  // the OS level we run on.
+  os::Aix::initialize_os_info();
+
+  // Scan environment (SPEC1170 behaviour, etc)
+  os::Aix::scan_environment();
+
+  // Check which pages are supported by AIX.
+  os::Aix::query_multipage_support();
+
+  // Next, we need to initialize libo4 and libperfstat libraries.
+  if (os::Aix::on_pase()) {
+    os::Aix::initialize_libo4();
+  } else {
+    os::Aix::initialize_libperfstat();
+  }
+
+  // Reset the perfstat information provided by ODM.
+  if (os::Aix::on_aix()) {
+    libperfstat::perfstat_reset();
+  }
+
+  // Now initialze basic system properties. Note that for some of the values we
+  // need libperfstat etc.
+  os::Aix::initialize_system_info();
+
+  // Initialize large page support.
+  if (UseLargePages) {
+    os::large_page_init();
+    if (!UseLargePages) {
+      // initialize os::_page_sizes
+      _page_sizes[0] = Aix::page_size();
+      _page_sizes[1] = 0;
+      if (Verbose) {
+        fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
+      }
+    }
+  } else {
+    // initialize os::_page_sizes
+    _page_sizes[0] = Aix::page_size();
+    _page_sizes[1] = 0;
+  }
+
+  // debug trace
+  if (Verbose) {
+    fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
+    fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
+    fprintf(stderr, "os::_page_sizes = ( ");
+    for (int i = 0; _page_sizes[i]; i ++) {
+      fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
+    }
+    fprintf(stderr, ")\n");
+  }
+
+  _initial_pid = getpid();
+
+  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
+
+  init_random(1234567);
+
+  ThreadCritical::initialize();
+
+  // Main_thread points to the aboriginal thread.
+  Aix::_main_thread = pthread_self();
+
+  initial_time_count = os::elapsed_counter();
+  pthread_mutex_init(&dl_mutex, NULL);
+}
+
+// this is called _after_ the global arguments have been parsed
+jint os::init_2(void) {
+
+  if (Verbose) {
+    fprintf(stderr, "processor count: %d\n", os::_processor_count);
+    fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
+  }
+
+  // initially build up the loaded dll map
+  LoadedLibraries::reload();
+
+  const int page_size = Aix::page_size();
+  const int map_size = page_size;
+
+  address map_address = (address) MAP_FAILED;
+  const int prot  = PROT_READ;
+  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
+
+  // use optimized addresses for the polling page,
+  // e.g. map it to a special 32-bit address.
+  if (OptimizePollingPageLocation) {
+    // architecture-specific list of address wishes:
+    address address_wishes[] = {
+      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
+      // PPC64: all address wishes are non-negative 32 bit values where
+      // the lower 16 bits are all zero. we can load these addresses
+      // with a single ppc_lis instruction.
+      (address) 0x30000000, (address) 0x31000000,
+      (address) 0x32000000, (address) 0x33000000,
+      (address) 0x40000000, (address) 0x41000000,
+      (address) 0x42000000, (address) 0x43000000,
+      (address) 0x50000000, (address) 0x51000000,
+      (address) 0x52000000, (address) 0x53000000,
+      (address) 0x60000000, (address) 0x61000000,
+      (address) 0x62000000, (address) 0x63000000
+    };
+    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
+
+    // iterate over the list of address wishes:
+    for (int i=0; i<address_wishes_length; i++) {
+      // try to map with current address wish.
+      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
+      // fail if the address is already mapped.
+      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
+                                     map_size, prot,
+                                     flags | MAP_FIXED,
+                                     -1, 0);
+      if (Verbose) {
+        fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
+                address_wishes[i], map_address + (ssize_t)page_size);
+      }
+
+      if (map_address + (ssize_t)page_size == address_wishes[i]) {
+        // map succeeded and map_address is at wished address, exit loop.
+        break;
+      }
+
+      if (map_address != (address) MAP_FAILED) {
+        // map succeeded, but polling_page is not at wished address, unmap and continue.
+        ::munmap(map_address, map_size);
+        map_address = (address) MAP_FAILED;
+      }
+      // map failed, continue loop.
+    }
+  } // end OptimizePollingPageLocation
+
+  if (map_address == (address) MAP_FAILED) {
+    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
+  }
+  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
+  os::set_polling_page(map_address);
+
+  if (!UseMembar) {
+    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
+    os::set_memory_serialize_page(mem_serialize_page);
+
+#ifndef PRODUCT
+    if (Verbose && PrintMiscellaneous)
+      tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
+#endif
+  }
+
+  // initialize suspend/resume support - must do this before signal_sets_init()
+  if (SR_initialize() != 0) {
+    perror("SR_initialize failed");
+    return JNI_ERR;
+  }
+
+  Aix::signal_sets_init();
+  Aix::install_signal_handlers();
+
+  // Check minimum allowable stack size for thread creation and to initialize
+  // the java system classes, including StackOverflowError - depends on page
+  // size. Add a page for compiler2 recursion in main thread.
+  // Add in 2*BytesPerWord times page size to account for VM stack during
+  // class initialization depending on 32 or 64 bit VM.
+  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
+            (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
+                     2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
+
+  size_t threadStackSizeInBytes = ThreadStackSize * K;
+  if (threadStackSizeInBytes != 0 &&
+      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
+        tty->print_cr("\nThe stack size specified is too small, "
+                      "Specify at least %dk",
+                      os::Aix::min_stack_allowed / K);
+        return JNI_ERR;
+  }
+
+  // Make the stack size a multiple of the page size so that
+  // the yellow/red zones can be guarded.
+  // note that this can be 0, if no default stacksize was set
+  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
+
+  Aix::libpthread_init();
+
+  if (MaxFDLimit) {
+    // set the number of file descriptors to max. print out error
+    // if getrlimit/setrlimit fails but continue regardless.
+    struct rlimit nbr_files;
+    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
+    if (status != 0) {
+      if (PrintMiscellaneous && (Verbose || WizardMode))
+        perror("os::init_2 getrlimit failed");
+    } else {
+      nbr_files.rlim_cur = nbr_files.rlim_max;
+      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
+      if (status != 0) {
+        if (PrintMiscellaneous && (Verbose || WizardMode))
+          perror("os::init_2 setrlimit failed");
+      }
+    }
+  }
+
+  if (PerfAllowAtExitRegistration) {
+    // only register atexit functions if PerfAllowAtExitRegistration is set.
+    // atexit functions can be delayed until process exit time, which
+    // can be problematic for embedded VM situations. Embedded VMs should
+    // call DestroyJavaVM() to assure that VM resources are released.
+
+    // note: perfMemory_exit_helper atexit function may be removed in
+    // the future if the appropriate cleanup code can be added to the
+    // VM_Exit VMOperation's doit method.
+    if (atexit(perfMemory_exit_helper) != 0) {
+      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
+    }
+  }
+
+  return JNI_OK;
+}
+
+// this is called at the end of vm_initialization
+void os::init_3(void) {
+  return;
+}
+
+// Mark the polling page as unreadable
+void os::make_polling_page_unreadable(void) {
+  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
+    fatal("Could not disable polling page");
+  }
+};
+
+// Mark the polling page as readable
+void os::make_polling_page_readable(void) {
+  // Changed according to os_linux.cpp.
+  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
+    fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
+  }
+};
+
+int os::active_processor_count() {
+  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
+  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
+  return online_cpus;
+}
+
+void os::set_native_thread_name(const char *name) {
+  // Not yet implemented.
+  return;
+}
+
+bool os::distribute_processes(uint length, uint* distribution) {
+  // Not yet implemented.
+  return false;
+}
+
+bool os::bind_to_processor(uint processor_id) {
+  // Not yet implemented.
+  return false;
+}
+
+void os::SuspendedThreadTask::internal_do_task() {
+  if (do_suspend(_thread->osthread())) {
+    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
+    do_task(context);
+    do_resume(_thread->osthread());
+  }
+}
+
+class PcFetcher : public os::SuspendedThreadTask {
+public:
+  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
+  ExtendedPC result();
+protected:
+  void do_task(const os::SuspendedThreadTaskContext& context);
+private:
+  ExtendedPC _epc;
+};
+
+ExtendedPC PcFetcher::result() {
+  guarantee(is_done(), "task is not done yet.");
+  return _epc;
+}
+
+void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
+  Thread* thread = context.thread();
+  OSThread* osthread = thread->osthread();
+  if (osthread->ucontext() != NULL) {
+    _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
+  } else {
+    // NULL context is unexpected, double-check this is the VMThread.
+    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
+  }
+}
+
+// Suspends the target using the signal mechanism and then grabs the PC before
+// resuming the target. Used by the flat-profiler only
+ExtendedPC os::get_thread_pc(Thread* thread) {
+  // Make sure that it is called by the watcher for the VMThread.
+  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
+  assert(thread->is_VM_thread(), "Can only be called for VMThread");
+
+  PcFetcher fetcher(thread);
+  fetcher.run();
+  return fetcher.result();
+}
+
+// Not neede on Aix.
+// int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
+// }
+
+////////////////////////////////////////////////////////////////////////////////
+// debug support
+
+static address same_page(address x, address y) {
+  intptr_t page_bits = -os::vm_page_size();
+  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
+    return x;
+  else if (x > y)
+    return (address)(intptr_t(y) | ~page_bits) + 1;
+  else
+    return (address)(intptr_t(y) & page_bits);
+}
+
+bool os::find(address addr, outputStream* st) {
+
+  st->print(PTR_FORMAT ": ", addr);
+
+  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
+  if (lib) {
+    lib->print(st);
+    return true;
+  } else {
+    lib = LoadedLibraries::find_for_data_address(addr);
+    if (lib) {
+      lib->print(st);
+      return true;
+    } else {
+      st->print_cr("(outside any module)");
+    }
+  }
+
+  return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// misc
+
+// This does not do anything on Aix. This is basically a hook for being
+// able to use structured exception handling (thread-local exception filters)
+// on, e.g., Win32.
+void
+os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
+                         JavaCallArguments* args, Thread* thread) {
+  f(value, method, args, thread);
+}
+
+void os::print_statistics() {
+}
+
+int os::message_box(const char* title, const char* message) {
+  int i;
+  fdStream err(defaultStream::error_fd());
+  for (i = 0; i < 78; i++) err.print_raw("=");
+  err.cr();
+  err.print_raw_cr(title);
+  for (i = 0; i < 78; i++) err.print_raw("-");
+  err.cr();
+  err.print_raw_cr(message);
+  for (i = 0; i < 78; i++) err.print_raw("=");
+  err.cr();
+
+  char buf[16];
+  // Prevent process from exiting upon "read error" without consuming all CPU
+  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
+
+  return buf[0] == 'y' || buf[0] == 'Y';
+}
+
+int os::stat(const char *path, struct stat *sbuf) {
+  char pathbuf[MAX_PATH];
+  if (strlen(path) > MAX_PATH - 1) {
+    errno = ENAMETOOLONG;
+    return -1;
+  }
+  os::native_path(strcpy(pathbuf, path));
+  return ::stat(pathbuf, sbuf);
+}
+
+bool os::check_heap(bool force) {
+  return true;
+}
+
+// int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
+//   return ::vsnprintf(buf, count, format, args);
+// }
+
+// Is a (classpath) directory empty?
+bool os::dir_is_empty(const char* path) {
+  DIR *dir = NULL;
+  struct dirent *ptr;
+
+  dir = opendir(path);
+  if (dir == NULL) return true;
+
+  /* Scan the directory */
+  bool result = true;
+  char buf[sizeof(struct dirent) + MAX_PATH];
+  while (result && (ptr = ::readdir(dir)) != NULL) {
+    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
+      result = false;
+    }
+  }
+  closedir(dir);
+  return result;
+}
+
+// This code originates from JDK's sysOpen and open64_w
+// from src/solaris/hpi/src/system_md.c
+
+#ifndef O_DELETE
+#define O_DELETE 0x10000
+#endif
+
+// Open a file. Unlink the file immediately after open returns
+// if the specified oflag has the O_DELETE flag set.
+// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
+
+int os::open(const char *path, int oflag, int mode) {
+
+  if (strlen(path) > MAX_PATH - 1) {
+    errno = ENAMETOOLONG;
+    return -1;
+  }
+  int fd;
+  int o_delete = (oflag & O_DELETE);
+  oflag = oflag & ~O_DELETE;
+
+  fd = ::open64(path, oflag, mode);
+  if (fd == -1) return -1;
+
+  // If the open succeeded, the file might still be a directory.
+  {
+    struct stat64 buf64;
+    int ret = ::fstat64(fd, &buf64);
+    int st_mode = buf64.st_mode;
+
+    if (ret != -1) {
+      if ((st_mode & S_IFMT) == S_IFDIR) {
+        errno = EISDIR;
+        ::close(fd);
+        return -1;
+      }
+    } else {
+      ::close(fd);
+      return -1;
+    }
+  }
+
+  // All file descriptors that are opened in the JVM and not
+  // specifically destined for a subprocess should have the
+  // close-on-exec flag set. If we don't set it, then careless 3rd
+  // party native code might fork and exec without closing all
+  // appropriate file descriptors (e.g. as we do in closeDescriptors in
+  // UNIXProcess.c), and this in turn might:
+  //
+  // - cause end-of-file to fail to be detected on some file
+  //   descriptors, resulting in mysterious hangs, or
+  //
+  // - might cause an fopen in the subprocess to fail on a system
+  //   suffering from bug 1085341.
+  //
+  // (Yes, the default setting of the close-on-exec flag is a Unix
+  // design flaw.)
+  //
+  // See:
+  // 1085341: 32-bit stdio routines should support file descriptors >255
+  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
+  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
+#ifdef FD_CLOEXEC
+  {
+    int flags = ::fcntl(fd, F_GETFD);
+    if (flags != -1)
+      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
+  }
+#endif
+
+  if (o_delete != 0) {
+    ::unlink(path);
+  }
+  return fd;
+}
+
+
+// create binary file, rewriting existing file if required
+int os::create_binary_file(const char* path, bool rewrite_existing) {
+  int oflags = O_WRONLY | O_CREAT;
+  if (!rewrite_existing) {
+    oflags |= O_EXCL;
+  }
+  return ::open64(path, oflags, S_IREAD | S_IWRITE);
+}
+
+// return current position of file pointer
+jlong os::current_file_offset(int fd) {
+  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
+}
+
+// move file pointer to the specified offset
+jlong os::seek_to_file_offset(int fd, jlong offset) {
+  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
+}
+
+// This code originates from JDK's sysAvailable
+// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
+
+int os::available(int fd, jlong *bytes) {
+  jlong cur, end;
+  int mode;
+  struct stat64 buf64;
+
+  if (::fstat64(fd, &buf64) >= 0) {
+    mode = buf64.st_mode;
+    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
+      // XXX: is the following call interruptible? If so, this might
+      // need to go through the INTERRUPT_IO() wrapper as for other
+      // blocking, interruptible calls in this file.
+      int n;
+      if (::ioctl(fd, FIONREAD, &n) >= 0) {
+        *bytes = n;
+        return 1;
+      }
+    }
+  }
+  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
+    return 0;
+  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
+    return 0;
+  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
+    return 0;
+  }
+  *bytes = end - cur;
+  return 1;
+}
+
+int os::socket_available(int fd, jint *pbytes) {
+  // Linux doc says EINTR not returned, unlike Solaris
+  int ret = ::ioctl(fd, FIONREAD, pbytes);
+
+  //%% note ioctl can return 0 when successful, JVM_SocketAvailable
+  // is expected to return 0 on failure and 1 on success to the jdk.
+  return (ret < 0) ? 0 : 1;
+}
+
+// Map a block of memory.
+char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
+                        char *addr, size_t bytes, bool read_only,
+                        bool allow_exec) {
+  Unimplemented();
+  return NULL;
+}
+
+
+// Remap a block of memory.
+char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
+                          char *addr, size_t bytes, bool read_only,
+                          bool allow_exec) {
+  // same as map_memory() on this OS
+  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
+                        allow_exec);
+}
+
+// Unmap a block of memory.
+bool os::pd_unmap_memory(char* addr, size_t bytes) {
+  return munmap(addr, bytes) == 0;
+}
+
+// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
+// are used by JVM M&M and JVMTI to get user+sys or user CPU time
+// of a thread.
+//
+// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
+// the fast estimate available on the platform.
+
+jlong os::current_thread_cpu_time() {
+  // return user + sys since the cost is the same
+  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
+  assert(n >= 0, "negative CPU time");
+  return n;
+}
+
+jlong os::thread_cpu_time(Thread* thread) {
+  // consistent with what current_thread_cpu_time() returns
+  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
+  assert(n >= 0, "negative CPU time");
+  return n;
+}
+
+jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
+  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
+  assert(n >= 0, "negative CPU time");
+  return n;
+}
+
+static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
+  bool error = false;
+
+  jlong sys_time = 0;
+  jlong user_time = 0;
+
+  // reimplemented using getthrds64().
+  //
+  // goes like this:
+  // For the thread in question, get the kernel thread id. Then get the
+  // kernel thread statistics using that id.
+  //
+  // This only works of course when no pthread scheduling is used,
+  // ie there is a 1:1 relationship to kernel threads.
+  // On AIX, see AIXTHREAD_SCOPE variable.
+
+  pthread_t pthtid = thread->osthread()->pthread_id();
+
+  // retrieve kernel thread id for the pthread:
+  tid64_t tid = 0;
+  struct __pthrdsinfo pinfo;
+  // I just love those otherworldly IBM APIs which force me to hand down
+  // dummy buffers for stuff I dont care for...
+  char dummy[1];
+  int dummy_size = sizeof(dummy);
+  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
+                          dummy, &dummy_size) == 0) {
+    tid = pinfo.__pi_tid;
+  } else {
+    tty->print_cr("pthread_getthrds_np failed.");
+    error = true;
+  }
+
+  // retrieve kernel timing info for that kernel thread
+  if (!error) {
+    struct thrdentry64 thrdentry;
+    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
+      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
+      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
+    } else {
+      tty->print_cr("pthread_getthrds_np failed.");
+      error = true;
+    }
+  }
+
+  if (p_sys_time) {
+    *p_sys_time = sys_time;
+  }
+
+  if (p_user_time) {
+    *p_user_time = user_time;
+  }
+
+  if (error) {
+    return false;
+  }
+
+  return true;
+}
+
+jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
+  jlong sys_time;
+  jlong user_time;
+
+  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
+    return -1;
+  }
+
+  return user_sys_cpu_time ? sys_time + user_time : user_time;
+}
+
+void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
+  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
+  info_ptr->may_skip_backward = false;     // elapsed time not wall time
+  info_ptr->may_skip_forward = false;      // elapsed time not wall time
+  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
+}
+
+void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
+  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
+  info_ptr->may_skip_backward = false;     // elapsed time not wall time
+  info_ptr->may_skip_forward = false;      // elapsed time not wall time
+  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
+}
+
+bool os::is_thread_cpu_time_supported() {
+  return true;
+}
+
+// System loadavg support. Returns -1 if load average cannot be obtained.
+// For now just return the system wide load average (no processor sets).
+int os::loadavg(double values[], int nelem) {
+
+  // Implemented using libperfstat on AIX.
+
+  guarantee(nelem >= 0 && nelem <= 3, "argument error");
+  guarantee(values, "argument error");
+
+  if (os::Aix::on_pase()) {
+    Unimplemented();
+    return -1;
+  } else {
+    // AIX: use libperfstat
+    //
+    // See also:
+    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
+    // /usr/include/libperfstat.h:
+
+    // Use the already AIX version independent get_cpuinfo.
+    os::Aix::cpuinfo_t ci;
+    if (os::Aix::get_cpuinfo(&ci)) {
+      for (int i = 0; i < nelem; i++) {
+        values[i] = ci.loadavg[i];
+      }
+    } else {
+      return -1;
+    }
+    return nelem;
+  }
+}
+
+void os::pause() {
+  char filename[MAX_PATH];
+  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
+    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
+  } else {
+    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
+  }
+
+  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+  if (fd != -1) {
+    struct stat buf;
+    ::close(fd);
+    while (::stat(filename, &buf) == 0) {
+      (void)::poll(NULL, 0, 100);
+    }
+  } else {
+    jio_fprintf(stderr,
+      "Could not open pause file '%s', continuing immediately.\n", filename);
+  }
+}
+
+bool os::Aix::is_primordial_thread() {
+  if (pthread_self() == (pthread_t)1) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// OS recognitions (PASE/AIX, OS level) call this before calling any
+// one of Aix::on_pase(), Aix::os_version() static
+void os::Aix::initialize_os_info() {
+
+  assert(_on_pase == -1 && _os_version == -1, "already called.");
+
+  struct utsname uts;
+  memset(&uts, 0, sizeof(uts));
+  strcpy(uts.sysname, "?");
+  if (::uname(&uts) == -1) {
+    fprintf(stderr, "uname failed (%d)\n", errno);
+    guarantee(0, "Could not determine whether we run on AIX or PASE");
+  } else {
+    if (Verbose) {
+      fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
+              "node \"%s\" machine \"%s\"\n",
+              uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
+    }
+    const int major = atoi(uts.version);
+    assert(major > 0, "invalid OS version");
+    const int minor = atoi(uts.release);
+    assert(minor > 0, "invalid OS release");
+    _os_version = (major << 8) | minor;
+    if (strcmp(uts.sysname, "OS400") == 0) {
+      Unimplemented();
+    } else if (strcmp(uts.sysname, "AIX") == 0) {
+      // We run on AIX. We do not support versions older than AIX 5.3.
+      _on_pase = 0;
+      if (_os_version < 0x0503) {
+        fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
+        assert(false, "AIX release too old.");
+      } else {
+        if (Verbose) {
+          fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
+        }
+      }
+    } else {
+      assert(false, "unknown OS");
+    }
+  }
+
+  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
+
+} // end: os::Aix::initialize_os_info()
+
+// Scan environment for important settings which might effect the VM.
+// Trace out settings. Warn about invalid settings and/or correct them.
+//
+// Must run after os::Aix::initialue_os_info().
+void os::Aix::scan_environment() {
+
+  char* p;
+  int rc;
+
+  // Warn explicity if EXTSHM=ON is used. That switch changes how
+  // System V shared memory behaves. One effect is that page size of
+  // shared memory cannot be change dynamically, effectivly preventing
+  // large pages from working.
+  // This switch was needed on AIX 32bit, but on AIX 64bit the general
+  // recommendation is (in OSS notes) to switch it off.
+  p = ::getenv("EXTSHM");
+  if (Verbose) {
+    fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
+  }
+  if (p && strcmp(p, "ON") == 0) {
+    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
+    _extshm = 1;
+  } else {
+    _extshm = 0;
+  }
+
+  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
+  // Not tested, not supported.
+  //
+  // Note that it might be worth the trouble to test and to require it, if only to
+  // get useful return codes for mprotect.
+  //
+  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
+  // exec() ? before loading the libjvm ? ....)
+  p = ::getenv("XPG_SUS_ENV");
+  if (Verbose) {
+    fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
+  }
+  if (p && strcmp(p, "ON") == 0) {
+    _xpg_sus_mode = 1;
+    fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
+    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
+    // clobber address ranges. If we ever want to support that, we have to do some
+    // testing first.
+    guarantee(false, "XPG_SUS_ENV=ON not supported");
+  } else {
+    _xpg_sus_mode = 0;
+  }
+
+  // Switch off AIX internal (pthread) guard pages. This has
+  // immediate effect for any pthread_create calls which follow.
+  p = ::getenv("AIXTHREAD_GUARDPAGES");
+  if (Verbose) {
+    fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
+    fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
+  }
+  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
+  guarantee(rc == 0, "");
+
+} // end: os::Aix::scan_environment()
+
+// PASE: initialize the libo4 library (AS400 PASE porting library).
+void os::Aix::initialize_libo4() {
+  Unimplemented();
+}
+
+// AIX: initialize the libperfstat library (we load this dynamically
+// because it is only available on AIX.
+void os::Aix::initialize_libperfstat() {
+
+  assert(os::Aix::on_aix(), "AIX only");
+
+  if (!libperfstat::init()) {
+    fprintf(stderr, "libperfstat initialization failed.\n");
+    assert(false, "libperfstat initialization failed");
+  } else {
+    if (Verbose) {
+      fprintf(stderr, "libperfstat initialized.\n");
+    }
+  }
+} // end: os::Aix::initialize_libperfstat
+
+/////////////////////////////////////////////////////////////////////////////
+// thread stack
+
+// function to query the current stack size using pthread_getthrds_np
+//
+// ! do not change anything here unless you know what you are doing !
+static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
+
+  // This only works when invoked on a pthread. As we agreed not to use
+  // primordial threads anyway, I assert here
+  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
+
+  // information about this api can be found (a) in the pthread.h header and
+  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
+  //
+  // The use of this API to find out the current stack is kind of undefined.
+  // But after a lot of tries and asking IBM about it, I concluded that it is safe
+  // enough for cases where I let the pthread library create its stacks. For cases
+  // where I create an own stack and pass this to pthread_create, it seems not to
+  // work (the returned stack size in that case is 0).
+
+  pthread_t tid = pthread_self();
+  struct __pthrdsinfo pinfo;
+  char dummy[1]; // we only need this to satisfy the api and to not get E
+  int dummy_size = sizeof(dummy);
+
+  memset(&pinfo, 0, sizeof(pinfo));
+
+  const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
+                                      sizeof(pinfo), dummy, &dummy_size);
+
+  if (rc != 0) {
+    fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
+    guarantee(0, "pthread_getthrds_np failed");
+  }
+
+  guarantee(pinfo.__pi_stackend, "returned stack base invalid");
+
+  // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
+  // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
+  // Not sure what to do here - I feel inclined to forbid this use case completely.
+  guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
+
+  // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
+  if (p_stack_base) {
+    (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
+  }
+
+  if (p_stack_size) {
+    (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
+  }
+
+#ifndef PRODUCT
+  if (Verbose) {
+    fprintf(stderr,
+            "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
+            ", real stack_size=" INTPTR_FORMAT
+            ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
+            (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
+            (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
+            pinfo.__pi_stacksize - os::Aix::stack_page_size());
+  }
+#endif
+
+} // end query_stack_dimensions
+
+// get the current stack base from the OS (actually, the pthread library)
+address os::current_stack_base() {
+  address p;
+  query_stack_dimensions(&p, 0);
+  return p;
+}
+
+// get the current stack size from the OS (actually, the pthread library)
+size_t os::current_stack_size() {
+  size_t s;
+  query_stack_dimensions(0, &s);
+  return s;
+}
+
+// Refer to the comments in os_solaris.cpp park-unpark.
+//
+// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
+// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
+// For specifics regarding the bug see GLIBC BUGID 261237 :
+//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
+// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
+// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
+// is used. (The simple C test-case provided in the GLIBC bug report manifests the
+// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
+// and monitorenter when we're using 1-0 locking. All those operations may result in
+// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
+// of libpthread avoids the problem, but isn't practical.
+//
+// Possible remedies:
+//
+// 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
+//      This is palliative and probabilistic, however. If the thread is preempted
+//      between the call to compute_abstime() and pthread_cond_timedwait(), more
+//      than the minimum period may have passed, and the abstime may be stale (in the
+//      past) resultin in a hang. Using this technique reduces the odds of a hang
+//      but the JVM is still vulnerable, particularly on heavily loaded systems.
+//
+// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
+//      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
+//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
+//      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
+//      thread.
+//
+// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
+//      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
+//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
+//      This also works well. In fact it avoids kernel-level scalability impediments
+//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
+//      timers in a graceful fashion.
+//
+// 4.   When the abstime value is in the past it appears that control returns
+//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
+//      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
+//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
+//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
+//      It may be possible to avoid reinitialization by checking the return
+//      value from pthread_cond_timedwait(). In addition to reinitializing the
+//      condvar we must establish the invariant that cond_signal() is only called
+//      within critical sections protected by the adjunct mutex. This prevents
+//      cond_signal() from "seeing" a condvar that's in the midst of being
+//      reinitialized or that is corrupt. Sadly, this invariant obviates the
+//      desirable signal-after-unlock optimization that avoids futile context switching.
+//
+//      I'm also concerned that some versions of NTPL might allocate an auxilliary
+//      structure when a condvar is used or initialized. cond_destroy() would
+//      release the helper structure. Our reinitialize-after-timedwait fix
+//      put excessive stress on malloc/free and locks protecting the c-heap.
+//
+// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
+// It may be possible to refine (4) by checking the kernel and NTPL verisons
+// and only enabling the work-around for vulnerable environments.
+
+// utility to compute the abstime argument to timedwait:
+// millis is the relative timeout time
+// abstime will be the absolute timeout time
+// TODO: replace compute_abstime() with unpackTime()
+
+static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
+  if (millis < 0) millis = 0;
+  struct timeval now;
+  int status = gettimeofday(&now, NULL);
+  assert(status == 0, "gettimeofday");
+  jlong seconds = millis / 1000;
+  millis %= 1000;
+  if (seconds > 50000000) { // see man cond_timedwait(3T)
+    seconds = 50000000;
+  }
+  abstime->tv_sec = now.tv_sec  + seconds;
+  long       usec = now.tv_usec + millis * 1000;
+  if (usec >= 1000000) {
+    abstime->tv_sec += 1;
+    usec -= 1000000;
+  }
+  abstime->tv_nsec = usec * 1000;
+  return abstime;
+}
+
+
+// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
+// Conceptually TryPark() should be equivalent to park(0).
+
+int os::PlatformEvent::TryPark() {
+  for (;;) {
+    const int v = _Event;
+    guarantee ((v == 0) || (v == 1), "invariant");
+    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
+  }
+}
+
+void os::PlatformEvent::park() {       // AKA "down()"
+  // Invariant: Only the thread associated with the Event/PlatformEvent
+  // may call park().
+  // TODO: assert that _Assoc != NULL or _Assoc == Self
+  int v;
+  for (;;) {
+    v = _Event;
+    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
+  }
+  guarantee (v >= 0, "invariant");
+  if (v == 0) {
+    // Do this the hard way by blocking ...
+    int status = pthread_mutex_lock(_mutex);
+    assert_status(status == 0, status, "mutex_lock");
+    guarantee (_nParked == 0, "invariant");
+    ++ _nParked;
+    while (_Event < 0) {
+      status = pthread_cond_wait(_cond, _mutex);
+      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
+    }
+    -- _nParked;
+
+    // In theory we could move the ST of 0 into _Event past the unlock(),
+    // but then we'd need a MEMBAR after the ST.
+    _Event = 0;
+    status = pthread_mutex_unlock(_mutex);
+    assert_status(status == 0, status, "mutex_unlock");
+  }
+  guarantee (_Event >= 0, "invariant");
+}
+
+int os::PlatformEvent::park(jlong millis) {
+  guarantee (_nParked == 0, "invariant");
+
+  int v;
+  for (;;) {
+    v = _Event;
+    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
+  }
+  guarantee (v >= 0, "invariant");
+  if (v != 0) return OS_OK;
+
+  // We do this the hard way, by blocking the thread.
+  // Consider enforcing a minimum timeout value.
+  struct timespec abst;
+  compute_abstime(&abst, millis);
+
+  int ret = OS_TIMEOUT;
+  int status = pthread_mutex_lock(_mutex);
+  assert_status(status == 0, status, "mutex_lock");
+  guarantee (_nParked == 0, "invariant");
+  ++_nParked;
+
+  // Object.wait(timo) will return because of
+  // (a) notification
+  // (b) timeout
+  // (c) thread.interrupt
+  //
+  // Thread.interrupt and object.notify{All} both call Event::set.
+  // That is, we treat thread.interrupt as a special case of notification.
+  // The underlying Solaris implementation, cond_timedwait, admits
+  // spurious/premature wakeups, but the JLS/JVM spec prevents the
+  // JVM from making those visible to Java code. As such, we must
+  // filter out spurious wakeups. We assume all ETIME returns are valid.
+  //
+  // TODO: properly differentiate simultaneous notify+interrupt.
+  // In that case, we should propagate the notify to another waiter.
+
+  while (_Event < 0) {
+    status = pthread_cond_timedwait(_cond, _mutex, &abst);
+    assert_status(status == 0 || status == ETIMEDOUT,
+          status, "cond_timedwait");
+    if (!FilterSpuriousWakeups) break;         // previous semantics
+    if (status == ETIMEDOUT) break;
+    // We consume and ignore EINTR and spurious wakeups.
+  }
+  --_nParked;
+  if (_Event >= 0) {
+     ret = OS_OK;
+  }
+  _Event = 0;
+  status = pthread_mutex_unlock(_mutex);
+  assert_status(status == 0, status, "mutex_unlock");
+  assert (_nParked == 0, "invariant");
+  return ret;
+}
+
+void os::PlatformEvent::unpark() {
+  int v, AnyWaiters;
+  for (;;) {
+    v = _Event;
+    if (v > 0) {
+      // The LD of _Event could have reordered or be satisfied
+      // by a read-aside from this processor's write buffer.
+      // To avoid problems execute a barrier and then
+      // ratify the value.
+      OrderAccess::fence();
+      if (_Event == v) return;
+      continue;
+    }
+    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
+  }
+  if (v < 0) {
+    // Wait for the thread associated with the event to vacate
+    int status = pthread_mutex_lock(_mutex);
+    assert_status(status == 0, status, "mutex_lock");
+    AnyWaiters = _nParked;
+
+    if (AnyWaiters != 0) {
+      // We intentional signal *after* dropping the lock
+      // to avoid a common class of futile wakeups.
+      status = pthread_cond_signal(_cond);
+      assert_status(status == 0, status, "cond_signal");
+    }
+    // Mutex should be locked for pthread_cond_signal(_cond).
+    status = pthread_mutex_unlock(_mutex);
+    assert_status(status == 0, status, "mutex_unlock");
+  }
+
+  // Note that we signal() _after dropping the lock for "immortal" Events.
+  // This is safe and avoids a common class of futile wakeups. In rare
+  // circumstances this can cause a thread to return prematurely from
+  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
+  // simply re-test the condition and re-park itself.
+}
+
+
+// JSR166
+// -------------------------------------------------------
+
+//
+// The solaris and linux implementations of park/unpark are fairly
+// conservative for now, but can be improved. They currently use a
+// mutex/condvar pair, plus a a count.
+// Park decrements count if > 0, else does a condvar wait. Unpark
+// sets count to 1 and signals condvar. Only one thread ever waits
+// on the condvar. Contention seen when trying to park implies that someone
+// is unparking you, so don't wait. And spurious returns are fine, so there
+// is no need to track notifications.
+//
+
+#define MAX_SECS 100000000
+//
+// This code is common to linux and solaris and will be moved to a
+// common place in dolphin.
+//
+// The passed in time value is either a relative time in nanoseconds
+// or an absolute time in milliseconds. Either way it has to be unpacked
+// into suitable seconds and nanoseconds components and stored in the
+// given timespec structure.
+// Given time is a 64-bit value and the time_t used in the timespec is only
+// a signed-32-bit value (except on 64-bit Linux) we have to watch for
+// overflow if times way in the future are given. Further on Solaris versions
+// prior to 10 there is a restriction (see cond_timedwait) that the specified
+// number of seconds, in abstime, is less than current_time + 100,000,000.
+// As it will be 28 years before "now + 100000000" will overflow we can
+// ignore overflow and just impose a hard-limit on seconds using the value
+// of "now + 100,000,000". This places a limit on the timeout of about 3.17
+// years from "now".
+//
+
+static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
+  assert (time > 0, "convertTime");
+
+  struct timeval now;
+  int status = gettimeofday(&now, NULL);
+  assert(status == 0, "gettimeofday");
+
+  time_t max_secs = now.tv_sec + MAX_SECS;
+
+  if (isAbsolute) {
+    jlong secs = time / 1000;
+    if (secs > max_secs) {
+      absTime->tv_sec = max_secs;
+    }
+    else {
+      absTime->tv_sec = secs;
+    }
+    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
+  }
+  else {
+    jlong secs = time / NANOSECS_PER_SEC;
+    if (secs >= MAX_SECS) {
+      absTime->tv_sec = max_secs;
+      absTime->tv_nsec = 0;
+    }
+    else {
+      absTime->tv_sec = now.tv_sec + secs;
+      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
+        absTime->tv_nsec -= NANOSECS_PER_SEC;
+        ++absTime->tv_sec; // note: this must be <= max_secs
+      }
+    }
+  }
+  assert(absTime->tv_sec >= 0, "tv_sec < 0");
+  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
+  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
+  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
+}
+
+void Parker::park(bool isAbsolute, jlong time) {
+  // Optional fast-path check:
+  // Return immediately if a permit is available.
+  if (_counter > 0) {
+      _counter = 0;
+      OrderAccess::fence();
+      return;
+  }
+
+  Thread* thread = Thread::current();
+  assert(thread->is_Java_thread(), "Must be JavaThread");
+  JavaThread *jt = (JavaThread *)thread;
+
+  // Optional optimization -- avoid state transitions if there's an interrupt pending.
+  // Check interrupt before trying to wait
+  if (Thread::is_interrupted(thread, false)) {
+    return;
+  }
+
+  // Next, demultiplex/decode time arguments
+  timespec absTime;
+  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
+    return;
+  }
+  if (time > 0) {
+    unpackTime(&absTime, isAbsolute, time);
+  }
+
+
+  // Enter safepoint region
+  // Beware of deadlocks such as 6317397.
+  // The per-thread Parker:: mutex is a classic leaf-lock.
+  // In particular a thread must never block on the Threads_lock while
+  // holding the Parker:: mutex. If safepoints are pending both the
+  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
+  ThreadBlockInVM tbivm(jt);
+
+  // Don't wait if cannot get lock since interference arises from
+  // unblocking. Also. check interrupt before trying wait
+  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
+    return;
+  }
+
+  int status;
+  if (_counter > 0) { // no wait needed
+    _counter = 0;
+    status = pthread_mutex_unlock(_mutex);
+    assert (status == 0, "invariant");
+    OrderAccess::fence();
+    return;
+  }
+
+#ifdef ASSERT
+  // Don't catch signals while blocked; let the running threads have the signals.
+  // (This allows a debugger to break into the running thread.)
+  sigset_t oldsigs;
+  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
+  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
+#endif
+
+  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
+  jt->set_suspend_equivalent();
+  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
+
+  if (time == 0) {
+    status = pthread_cond_wait (_cond, _mutex);
+  } else {
+    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
+    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
+      pthread_cond_destroy (_cond);
+      pthread_cond_init    (_cond, NULL);
+    }
+  }
+  assert_status(status == 0 || status == EINTR ||
+                status == ETIME || status == ETIMEDOUT,
+                status, "cond_timedwait");
+
+#ifdef ASSERT
+  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
+#endif
+
+  _counter = 0;
+  status = pthread_mutex_unlock(_mutex);
+  assert_status(status == 0, status, "invariant");
+  // If externally suspended while waiting, re-suspend
+  if (jt->handle_special_suspend_equivalent_condition()) {
+    jt->java_suspend_self();
+  }
+
+  OrderAccess::fence();
+}
+
+void Parker::unpark() {
+  int s, status;
+  status = pthread_mutex_lock(_mutex);
+  assert (status == 0, "invariant");
+  s = _counter;
+  _counter = 1;
+  if (s < 1) {
+    if (WorkAroundNPTLTimedWaitHang) {
+      status = pthread_cond_signal (_cond);
+      assert (status == 0, "invariant");
+      status = pthread_mutex_unlock(_mutex);
+      assert (status == 0, "invariant");
+    } else {
+      status = pthread_mutex_unlock(_mutex);
+      assert (status == 0, "invariant");
+      status = pthread_cond_signal (_cond);
+      assert (status == 0, "invariant");
+    }
+  } else {
+    pthread_mutex_unlock(_mutex);
+    assert (status == 0, "invariant");
+  }
+}
+
+
+extern char** environ;
+
+// Run the specified command in a separate process. Return its exit value,
+// or -1 on failure (e.g. can't fork a new process).
+// Unlike system(), this function can be called from signal handler. It
+// doesn't block SIGINT et al.
+int os::fork_and_exec(char* cmd) {
+  char * argv[4] = {"sh", "-c", cmd, NULL};
+
+  pid_t pid = fork();
+
+  if (pid < 0) {
+    // fork failed
+    return -1;
+
+  } else if (pid == 0) {
+    // child process
+
+    // try to be consistent with system(), which uses "/usr/bin/sh" on AIX
+    execve("/usr/bin/sh", argv, environ);
+
+    // execve failed
+    _exit(-1);
+
+  } else  {
+    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
+    // care about the actual exit code, for now.
+
+    int status;
+
+    // Wait for the child process to exit.  This returns immediately if
+    // the child has already exited. */
+    while (waitpid(pid, &status, 0) < 0) {
+        switch (errno) {
+        case ECHILD: return 0;
+        case EINTR: break;
+        default: return -1;
+        }
+    }
+
+    if (WIFEXITED(status)) {
+       // The child exited normally; get its exit code.
+       return WEXITSTATUS(status);
+    } else if (WIFSIGNALED(status)) {
+       // The child exited because of a signal
+       // The best value to return is 0x80 + signal number,
+       // because that is what all Unix shells do, and because
+       // it allows callers to distinguish between process exit and
+       // process death by signal.
+       return 0x80 + WTERMSIG(status);
+    } else {
+       // Unknown exit code; pass it through
+       return status;
+    }
+  }
+  // Remove warning.
+  return -1;
+}
+
+// is_headless_jre()
+//
+// Test for the existence of xawt/libmawt.so or libawt_xawt.so
+// in order to report if we are running in a headless jre.
+//
+// Since JDK8 xawt/libmawt.so is moved into the same directory
+// as libawt.so, and renamed libawt_xawt.so
+bool os::is_headless_jre() {
+  struct stat statbuf;
+  char buf[MAXPATHLEN];
+  char libmawtpath[MAXPATHLEN];
+  const char *xawtstr  = "/xawt/libmawt.so";
+  const char *new_xawtstr = "/libawt_xawt.so";
+
+  char *p;
+
+  // Get path to libjvm.so
+  os::jvm_path(buf, sizeof(buf));
+
+  // Get rid of libjvm.so
+  p = strrchr(buf, '/');
+  if (p == NULL) return false;
+  else *p = '\0';
+
+  // Get rid of client or server
+  p = strrchr(buf, '/');
+  if (p == NULL) return false;
+  else *p = '\0';
+
+  // check xawt/libmawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  // check libawt_xawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, new_xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  return true;
+}
+
+// Get the default path to the core file
+// Returns the length of the string
+int os::get_core_path(char* buffer, size_t bufferSize) {
+  const char* p = get_current_directory(buffer, bufferSize);
+
+  if (p == NULL) {
+    assert(p != NULL, "failed to get current directory");
+    return 0;
+  }
+
+  return strlen(buffer);
+}
+
+#ifndef PRODUCT
+void TestReserveMemorySpecial_test() {
+  // No tests available for this platform
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/os_aix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,385 @@
+/*
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_AIX_VM_OS_AIX_HPP
+#define OS_AIX_VM_OS_AIX_HPP
+
+// Information about the protection of the page at address '0' on this os.
+static bool zero_page_read_protected() { return false; }
+
+// Class Aix defines the interface to the Aix operating systems.
+
+class Aix {
+  friend class os;
+
+  // For signal-chaining
+  // highest so far (AIX 5.2) is SIGSAK (63)
+#define MAXSIGNUM 63
+  // length of strings included in the libperfstat structures
+#define IDENTIFIER_LENGTH 64
+
+  static struct sigaction sigact[MAXSIGNUM]; // saved preinstalled sigactions
+  static unsigned int sigs;             // mask of signals that have
+                                        // preinstalled signal handlers
+  static bool libjsig_is_loaded;        // libjsig that interposes sigaction(),
+                                        // __sigaction(), signal() is loaded
+  static struct sigaction *(*get_signal_action)(int);
+  static struct sigaction *get_preinstalled_handler(int);
+  static void save_preinstalled_handler(int, struct sigaction&);
+
+  static void check_signal_handler(int sig);
+
+  // For signal flags diagnostics
+  static int sigflags[MAXSIGNUM];
+
+ protected:
+
+  static julong _physical_memory;
+  static pthread_t _main_thread;
+  static Mutex* _createThread_lock;
+  static int _page_size;
+  static int _logical_cpus;
+
+  // -1 = uninitialized, 0 = AIX, 1 = OS/400 (PASE)
+  static int _on_pase;
+
+  // -1 = uninitialized, otherwise 16 bit number:
+  //  lower 8 bit - minor version
+  //  higher 8 bit - major version
+  //  For AIX, e.g. 0x0601 for AIX 6.1
+  //  for OS/400 e.g. 0x0504 for OS/400 V5R4
+  static int _os_version;
+
+  // -1 = uninitialized,
+  //  0 - SPEC1170 not requested (XPG_SUS_ENV is OFF or not set)
+  //  1 - SPEC1170 requested (XPG_SUS_ENV is ON)
+  static int _xpg_sus_mode;
+
+  // -1 = uninitialized,
+  //  0 - EXTSHM=OFF or not set
+  //  1 - EXTSHM=ON
+  static int _extshm;
+
+  // page sizes on AIX.
+  //
+  //  AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The latter two
+  //  (16M "large" resp. 16G "huge" pages) require special setup and are normally
+  //  not available.
+  //
+  //  AIX supports multiple page sizes per process, for:
+  //  - Stack (of the primordial thread, so not relevant for us)
+  //  - Data - data, bss, heap, for us also pthread stacks
+  //  - Text - text code
+  //  - shared memory
+  //
+  //  Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
+  //  and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...)
+  //
+  //  For shared memory, page size can be set dynamically via shmctl(). Different shared memory
+  //  regions can have different page sizes.
+  //
+  //  More information can be found at AIBM info center:
+  //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
+  //
+  // -----
+  //  We want to support 4K and 64K and, if the machine is set up correctly, 16MB pages.
+  //
+
+  // page size of the stack of newly created pthreads
+  // (should be LDR_CNTRL DATAPSIZE because stack is allocated on heap by pthread lib)
+  static int _stack_page_size;
+
+  // Default shm page size. Read: what page size shared memory will be backed
+  // with if no page size was set explicitly using shmctl(SHM_PAGESIZE).
+  // Should be LDR_CNTRL SHMPSIZE.
+  static size_t _shm_default_page_size;
+
+  // True if sys V shm can be used with 64K pages dynamically.
+  // (via shmctl(.. SHM_PAGESIZE..). Should be true for AIX 53 and
+  // newer / PASE V6R1 and newer. (0 or 1, -1 if not initialized)
+  static int _can_use_64K_pages;
+
+  // True if sys V shm can be used with 16M pages dynamically.
+  // (via shmctl(.. SHM_PAGESIZE..). Only true on AIX 5.3 and
+  // newer, if the system was set up to use 16M pages and the
+  // jvm has enough user rights. (0 or 1, -1 if not initialized)
+  static int _can_use_16M_pages;
+
+  static julong available_memory();
+  static julong physical_memory() { return _physical_memory; }
+  static void initialize_system_info();
+
+  // OS recognitions (PASE/AIX, OS level) call this before calling any
+  // one of Aix::on_pase(), Aix::os_version().
+  static void initialize_os_info();
+
+  static int commit_memory_impl(char* addr, size_t bytes, bool exec);
+  static int commit_memory_impl(char* addr, size_t bytes,
+                                size_t alignment_hint, bool exec);
+
+  // Scan environment for important settings which might effect the
+  // VM. Trace out settings. Warn about invalid settings and/or
+  // correct them.
+  //
+  // Must run after os::Aix::initialue_os_info().
+  static void scan_environment();
+
+  // Retrieve information about multipage size support. Will initialize
+  // _page_size, _stack_page_size, _can_use_64K_pages/_can_use_16M_pages
+  static void query_multipage_support();
+
+  // Initialize libo4 (on PASE) and libperfstat (on AIX). Call this
+  // before relying on functions from either lib, e.g. Aix::get_meminfo().
+  static void initialize_libo4();
+  static void initialize_libperfstat();
+
+  static bool supports_variable_stack_size();
+
+ public:
+  static void init_thread_fpu_state();
+  static pthread_t main_thread(void)                                { return _main_thread; }
+  // returns kernel thread id (similar to LWP id on Solaris), which can be
+  // used to access /proc
+  static pid_t gettid();
+  static void set_createThread_lock(Mutex* lk)                      { _createThread_lock = lk; }
+  static Mutex* createThread_lock(void)                             { return _createThread_lock; }
+  static void hotspot_sigmask(Thread* thread);
+
+  // Given an address, returns the size of the page backing that address
+  static size_t query_pagesize(void* p);
+
+  // Return `true' if the calling thread is the primordial thread. The
+  // primordial thread is the thread which contains the main function,
+  // *not* necessarily the thread which initialized the VM by calling
+  // JNI_CreateJavaVM.
+  static bool is_primordial_thread(void);
+
+  static int page_size(void) {
+    assert(_page_size != -1, "not initialized");
+    return _page_size;
+  }
+
+  // Accessor methods for stack page size which may be different from usual page size.
+  static int stack_page_size(void) {
+    assert(_stack_page_size != -1, "not initialized");
+    return _stack_page_size;
+  }
+
+  // default shm page size. Read: what page size shared memory
+  // will be backed with if no page size was set explicitly using shmctl(SHM_PAGESIZE).
+  // Should be LDR_CNTRL SHMPSIZE.
+  static int shm_default_page_size(void) {
+    assert(_shm_default_page_size != -1, "not initialized");
+    return _shm_default_page_size;
+  }
+
+  // Return true if sys V shm can be used with 64K pages dynamically
+  // (via shmctl(.. SHM_PAGESIZE..).
+  static bool can_use_64K_pages () {
+    assert(_can_use_64K_pages != -1,  "not initialized");
+    return _can_use_64K_pages == 1 ? true : false;
+  }
+
+  // Return true if sys V shm can be used with 16M pages dynamically.
+  // (via shmctl(.. SHM_PAGESIZE..).
+  static bool can_use_16M_pages () {
+    assert(_can_use_16M_pages != -1,  "not initialized");
+    return _can_use_16M_pages == 1 ? true : false;
+  }
+
+  static address   ucontext_get_pc(ucontext_t* uc);
+  static intptr_t* ucontext_get_sp(ucontext_t* uc);
+  static intptr_t* ucontext_get_fp(ucontext_t* uc);
+  // Set PC into context. Needed for continuation after signal.
+  static void ucontext_set_pc(ucontext_t* uc, address pc);
+
+  // This boolean allows users to forward their own non-matching signals
+  // to JVM_handle_aix_signal, harmlessly.
+  static bool signal_handlers_are_installed;
+
+  static int get_our_sigflags(int);
+  static void set_our_sigflags(int, int);
+  static void signal_sets_init();
+  static void install_signal_handlers();
+  static void set_signal_handler(int, bool);
+  static bool is_sig_ignored(int sig);
+
+  static sigset_t* unblocked_signals();
+  static sigset_t* vm_signals();
+  static sigset_t* allowdebug_blocked_signals();
+
+  // For signal-chaining
+  static struct sigaction *get_chained_signal_action(int sig);
+  static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
+
+  // libpthread version string
+  static void libpthread_init();
+
+  // Minimum stack size a thread can be created with (allowing
+  // the VM to completely create the thread and enter user code)
+  static size_t min_stack_allowed;
+
+  // Return default stack size or guard size for the specified thread type
+  static size_t default_stack_size(os::ThreadType thr_type);
+  static size_t default_guard_size(os::ThreadType thr_type);
+
+  // Function returns true if we run on OS/400 (pase), false if we run
+  // on AIX.
+  static bool on_pase() {
+    assert(_on_pase != -1, "not initialized");
+    return _on_pase ? true : false;
+  }
+
+  // Function returns true if we run on AIX, false if we run on OS/400
+  // (pase).
+  static bool on_aix() {
+    assert(_on_pase != -1, "not initialized");
+    return _on_pase ? false : true;
+  }
+
+  // -1 = uninitialized, otherwise 16 bit number:
+  // lower 8 bit - minor version
+  // higher 8 bit - major version
+  // For AIX, e.g. 0x0601 for AIX 6.1
+  // for OS/400 e.g. 0x0504 for OS/400 V5R4
+  static int os_version () {
+    assert(_os_version != -1, "not initialized");
+    return _os_version;
+  }
+
+  // Convenience method: returns true if running on AIX 5.3 or older.
+  static bool on_aix_53_or_older() {
+    return on_aix() && os_version() <= 0x0503;
+  }
+
+  // Returns true if we run in SPEC1170 compliant mode (XPG_SUS_ENV=ON).
+  static bool xpg_sus_mode() {
+    assert(_xpg_sus_mode != -1, "not initialized");
+    return _xpg_sus_mode;
+  }
+
+  // Returns true if EXTSHM=ON.
+  static bool extshm() {
+    assert(_extshm != -1, "not initialized");
+    return _extshm;
+  }
+
+  // result struct for get_meminfo()
+  struct meminfo_t {
+
+    // Amount of virtual memory (in units of 4 KB pages)
+    unsigned long long virt_total;
+
+    // Amount of real memory, in bytes
+    unsigned long long real_total;
+
+    // Amount of free real memory, in bytes
+    unsigned long long real_free;
+
+    // Total amount of paging space, in bytes
+    unsigned long long pgsp_total;
+
+    // Amount of free paging space, in bytes
+    unsigned long long pgsp_free;
+
+  };
+
+  // Result struct for get_cpuinfo().
+  struct cpuinfo_t {
+    char description[IDENTIFIER_LENGTH];  // processor description (type/official name)
+    u_longlong_t processorHZ;             // processor speed in Hz
+    int ncpus;                            // number of active logical processors
+    double loadavg[3];                    // (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.
+                                          // To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>.
+    char version[20];                     // processor version from _system_configuration (sys/systemcfg.h)
+  };
+
+  // Functions to retrieve memory information on AIX, PASE.
+  // (on AIX, using libperfstat, on PASE with libo4.so).
+  // Returns true if ok, false if error.
+  static bool get_meminfo(meminfo_t* pmi);
+
+  // Function to retrieve cpu information on AIX
+  // (on AIX, using libperfstat)
+  // Returns true if ok, false if error.
+  static bool get_cpuinfo(cpuinfo_t* pci);
+
+}; // os::Aix class
+
+
+class PlatformEvent : public CHeapObj<mtInternal> {
+  private:
+    double CachePad [4];   // increase odds that _mutex is sole occupant of cache line
+    volatile int _Event;
+    volatile int _nParked;
+    pthread_mutex_t _mutex  [1];
+    pthread_cond_t  _cond   [1];
+    double PostPad  [2];
+    Thread * _Assoc;
+
+  public:       // TODO-FIXME: make dtor private
+    ~PlatformEvent() { guarantee (0, "invariant"); }
+
+  public:
+    PlatformEvent() {
+      int status;
+      status = pthread_cond_init (_cond, NULL);
+      assert_status(status == 0, status, "cond_init");
+      status = pthread_mutex_init (_mutex, NULL);
+      assert_status(status == 0, status, "mutex_init");
+      _Event   = 0;
+      _nParked = 0;
+      _Assoc   = NULL;
+    }
+
+    // Use caution with reset() and fired() -- they may require MEMBARs
+    void reset() { _Event = 0; }
+    int  fired() { return _Event; }
+    void park ();
+    void unpark ();
+    int  TryPark ();
+    int  park (jlong millis);
+    void SetAssociation (Thread * a) { _Assoc = a; }
+};
+
+class PlatformParker : public CHeapObj<mtInternal> {
+  protected:
+    pthread_mutex_t _mutex [1];
+    pthread_cond_t  _cond  [1];
+
+  public:       // TODO-FIXME: make dtor private
+    ~PlatformParker() { guarantee (0, "invariant"); }
+
+  public:
+    PlatformParker() {
+      int status;
+      status = pthread_cond_init (_cond, NULL);
+      assert_status(status == 0, status, "cond_init");
+      status = pthread_mutex_init (_mutex, NULL);
+      assert_status(status == 0, status, "mutex_init");
+    }
+};
+
+#endif // OS_AIX_VM_OS_AIX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/os_aix.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_AIX_VM_OS_AIX_INLINE_HPP
+#define OS_AIX_VM_OS_AIX_INLINE_HPP
+
+#include "runtime/atomic.hpp"
+#include "runtime/os.hpp"
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "atomic_aix_ppc.inline.hpp"
+# include "orderAccess_aix_ppc.inline.hpp"
+#endif
+
+// System includes
+
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/poll.h>
+#include <sys/ioctl.h>
+#include <netdb.h>
+
+// Defined in the system headers included above.
+#undef rem_size
+
+inline void* os::thread_local_storage_at(int index) {
+  return pthread_getspecific((pthread_key_t)index);
+}
+
+inline const char* os::file_separator() {
+  return "/";
+}
+
+inline const char* os::line_separator() {
+  return "\n";
+}
+
+inline const char* os::path_separator() {
+  return ":";
+}
+
+// File names are case-sensitive on windows only
+inline int os::file_name_strcmp(const char* s1, const char* s2) {
+  return strcmp(s1, s2);
+}
+
+inline bool os::obsolete_option(const JavaVMOption *option) {
+  return false;
+}
+
+inline bool os::uses_stack_guard_pages() {
+  return true;
+}
+
+inline bool os::allocate_stack_guard_pages() {
+  assert(uses_stack_guard_pages(), "sanity check");
+  return true;
+}
+
+
+// On Aix, reservations are made on a page by page basis, nothing to do.
+inline void os::pd_split_reserved_memory(char *base, size_t size,
+                                         size_t split, bool realloc) {
+}
+
+
+// Bang the shadow pages if they need to be touched to be mapped.
+inline void os::bang_stack_shadow_pages() {
+}
+
+inline void os::dll_unload(void *lib) {
+  ::dlclose(lib);
+}
+
+inline const int os::default_file_open_flags() { return 0;}
+
+inline DIR* os::opendir(const char* dirname)
+{
+  assert(dirname != NULL, "just checking");
+  return ::opendir(dirname);
+}
+
+inline int os::readdir_buf_size(const char *path)
+{
+  // according to aix sys/limits, NAME_MAX must be retrieved at runtime. */
+  const long my_NAME_MAX = pathconf(path, _PC_NAME_MAX);
+  return my_NAME_MAX + sizeof(dirent) + 1;
+}
+
+inline jlong os::lseek(int fd, jlong offset, int whence) {
+  return (jlong) ::lseek64(fd, offset, whence);
+}
+
+inline int os::fsync(int fd) {
+  return ::fsync(fd);
+}
+
+inline char* os::native_path(char *path) {
+  return path;
+}
+
+inline int os::ftruncate(int fd, jlong length) {
+  return ::ftruncate64(fd, length);
+}
+
+inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf)
+{
+  dirent* p;
+  int status;
+  assert(dirp != NULL, "just checking");
+
+  // NOTE: Linux readdir_r (on RH 6.2 and 7.2 at least) is NOT like the POSIX
+  // version. Here is the doc for this function:
+  // http://www.gnu.org/manual/glibc-2.2.3/html_node/libc_262.html
+
+  if((status = ::readdir_r(dirp, dbuf, &p)) != 0) {
+    errno = status;
+    return NULL;
+  } else
+    return p;
+}
+
+inline int os::closedir(DIR *dirp) {
+  assert(dirp != NULL, "argument is NULL");
+  return ::closedir(dirp);
+}
+
+// macros for restartable system calls
+
+#define RESTARTABLE(_cmd, _result) do { \
+    _result = _cmd; \
+  } while(((int)_result == OS_ERR) && (errno == EINTR))
+
+#define RESTARTABLE_RETURN_INT(_cmd) do { \
+  int _result; \
+  RESTARTABLE(_cmd, _result); \
+  return _result; \
+} while(false)
+
+// We don't have NUMA support on Aix, but we need this for compilation.
+inline bool os::numa_has_static_binding()   { ShouldNotReachHere(); return true; }
+inline bool os::numa_has_group_homing()     { ShouldNotReachHere(); return false;  }
+
+inline size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
+  size_t res;
+  RESTARTABLE( (size_t) ::read(fd, buf, (size_t) nBytes), res);
+  return res;
+}
+
+inline size_t os::write(int fd, const void *buf, unsigned int nBytes) {
+  size_t res;
+  RESTARTABLE((size_t) ::write(fd, buf, (size_t) nBytes), res);
+  return res;
+}
+
+inline int os::close(int fd) {
+  return ::close(fd);
+}
+
+inline int os::socket_close(int fd) {
+  return ::close(fd);
+}
+
+inline int os::socket(int domain, int type, int protocol) {
+  return ::socket(domain, type, protocol);
+}
+
+inline int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
+  RESTARTABLE_RETURN_INT(::recv(fd, buf, nBytes, flags));
+}
+
+inline int os::send(int fd, char* buf, size_t nBytes, uint flags) {
+  RESTARTABLE_RETURN_INT(::send(fd, buf, nBytes, flags));
+}
+
+inline int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
+  return os::send(fd, buf, nBytes, flags);
+}
+
+inline int os::timeout(int fd, long timeout) {
+  julong prevtime,newtime;
+  struct timeval t;
+
+  gettimeofday(&t, NULL);
+  prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
+
+  for(;;) {
+    struct pollfd pfd;
+
+    pfd.fd = fd;
+    pfd.events = POLLIN | POLLERR;
+
+    int res = ::poll(&pfd, 1, timeout);
+
+    if (res == OS_ERR && errno == EINTR) {
+
+      // On Linux any value < 0 means "forever"
+
+      if(timeout >= 0) {
+        gettimeofday(&t, NULL);
+        newtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
+        timeout -= newtime - prevtime;
+        if(timeout <= 0)
+          return OS_OK;
+        prevtime = newtime;
+      }
+    } else
+      return res;
+  }
+}
+
+inline int os::listen(int fd, int count) {
+  return ::listen(fd, count);
+}
+
+inline int os::connect(int fd, struct sockaddr* him, socklen_t len) {
+  RESTARTABLE_RETURN_INT(::connect(fd, him, len));
+}
+
+inline int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
+  // Linux doc says this can't return EINTR, unlike accept() on Solaris.
+  // But see attachListener_linux.cpp, LinuxAttachListener::dequeue().
+  return (int)::accept(fd, him, len);
+}
+
+inline int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
+                        sockaddr* from, socklen_t* fromlen) {
+  RESTARTABLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen));
+}
+
+inline int os::sendto(int fd, char* buf, size_t len, uint flags,
+                      struct sockaddr* to, socklen_t tolen) {
+  RESTARTABLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen));
+}
+
+inline int os::socket_shutdown(int fd, int howto) {
+  return ::shutdown(fd, howto);
+}
+
+inline int os::bind(int fd, struct sockaddr* him, socklen_t len) {
+  return ::bind(fd, him, len);
+}
+
+inline int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) {
+  return ::getsockname(fd, him, len);
+}
+
+inline int os::get_host_name(char* name, int namelen) {
+  return ::gethostname(name, namelen);
+}
+
+inline struct hostent* os::get_host_by_name(char* name) {
+  return ::gethostbyname(name);
+}
+
+inline int os::get_sock_opt(int fd, int level, int optname,
+                            char* optval, socklen_t* optlen) {
+  return ::getsockopt(fd, level, optname, optval, optlen);
+}
+
+inline int os::set_sock_opt(int fd, int level, int optname,
+                            const char* optval, socklen_t optlen) {
+  return ::setsockopt(fd, level, optname, optval, optlen);
+}
+#endif // OS_AIX_VM_OS_AIX_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/os_share_aix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_AIX_VM_OS_SHARE_AIX_HPP
+#define OS_AIX_VM_OS_SHARE_AIX_HPP
+
+// misc
+void signalHandler(int, siginfo_t*, ucontext_t*);
+void handle_unexpected_exception(Thread* thread, int sig, siginfo_t* info, address pc, address adjusted_pc);
+#ifndef PRODUCT
+void continue_with_dump(void);
+#endif
+
+#define PROCFILE_LENGTH 128
+
+#endif // OS_AIX_VM_OS_SHARE_AIX_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/perfMemory_aix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,1026 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
+#include "oops/oop.inline.hpp"
+#include "os_aix.inline.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/perfMemory.hpp"
+#include "utilities/exceptions.hpp"
+
+// put OS-includes here
+# include <sys/types.h>
+# include <sys/mman.h>
+# include <errno.h>
+# include <stdio.h>
+# include <unistd.h>
+# include <sys/stat.h>
+# include <signal.h>
+# include <pwd.h>
+
+static char* backing_store_file_name = NULL;  // name of the backing store
+                                              // file, if successfully created.
+
+// Standard Memory Implementation Details
+
+// create the PerfData memory region in standard memory.
+//
+static char* create_standard_memory(size_t size) {
+
+  // allocate an aligned chuck of memory
+  char* mapAddress = os::reserve_memory(size);
+
+  if (mapAddress == NULL) {
+    return NULL;
+  }
+
+  // commit memory
+  if (!os::commit_memory(mapAddress, size, !ExecMem)) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("Could not commit PerfData memory\n");
+    }
+    os::release_memory(mapAddress, size);
+    return NULL;
+  }
+
+  return mapAddress;
+}
+
+// delete the PerfData memory region
+//
+static void delete_standard_memory(char* addr, size_t size) {
+
+  // there are no persistent external resources to cleanup for standard
+  // memory. since DestroyJavaVM does not support unloading of the JVM,
+  // cleanup of the memory resource is not performed. The memory will be
+  // reclaimed by the OS upon termination of the process.
+  //
+  return;
+}
+
+// save the specified memory region to the given file
+//
+// Note: this function might be called from signal handler (by os::abort()),
+// don't allocate heap memory.
+//
+static void save_memory_to_file(char* addr, size_t size) {
+
+  const char* destfile = PerfMemory::get_perfdata_file_path();
+  assert(destfile[0] != '\0', "invalid PerfData file path");
+
+  int result;
+
+  RESTARTABLE(::open(destfile, O_CREAT|O_WRONLY|O_TRUNC, S_IREAD|S_IWRITE),
+              result);;
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("Could not create Perfdata save file: %s: %s\n",
+              destfile, strerror(errno));
+    }
+  } else {
+    int fd = result;
+
+    for (size_t remaining = size; remaining > 0;) {
+
+      RESTARTABLE(::write(fd, addr, remaining), result);
+      if (result == OS_ERR) {
+        if (PrintMiscellaneous && Verbose) {
+          warning("Could not write Perfdata save file: %s: %s\n",
+                  destfile, strerror(errno));
+        }
+        break;
+      }
+
+      remaining -= (size_t)result;
+      addr += result;
+    }
+
+    RESTARTABLE(::close(fd), result);
+    if (PrintMiscellaneous && Verbose) {
+      if (result == OS_ERR) {
+        warning("Could not close %s: %s\n", destfile, strerror(errno));
+      }
+    }
+  }
+  FREE_C_HEAP_ARRAY(char, destfile, mtInternal);
+}
+
+
+// Shared Memory Implementation Details
+
+// Note: the solaris and linux shared memory implementation uses the mmap
+// interface with a backing store file to implement named shared memory.
+// Using the file system as the name space for shared memory allows a
+// common name space to be supported across a variety of platforms. It
+// also provides a name space that Java applications can deal with through
+// simple file apis.
+//
+// The solaris and linux implementations store the backing store file in
+// a user specific temporary directory located in the /tmp file system,
+// which is always a local file system and is sometimes a RAM based file
+// system.
+
+// return the user specific temporary directory name.
+//
+// the caller is expected to free the allocated memory.
+//
+static char* get_user_tmp_dir(const char* user) {
+
+  const char* tmpdir = os::get_temp_directory();
+  const char* perfdir = PERFDATA_NAME;
+  size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
+  char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
+
+  // construct the path name to user specific tmp directory
+  snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user);
+
+  return dirname;
+}
+
+// convert the given file name into a process id. if the file
+// does not meet the file naming constraints, return 0.
+//
+static pid_t filename_to_pid(const char* filename) {
+
+  // a filename that doesn't begin with a digit is not a
+  // candidate for conversion.
+  //
+  if (!isdigit(*filename)) {
+    return 0;
+  }
+
+  // check if file name can be converted to an integer without
+  // any leftover characters.
+  //
+  char* remainder = NULL;
+  errno = 0;
+  pid_t pid = (pid_t)strtol(filename, &remainder, 10);
+
+  if (errno != 0) {
+    return 0;
+  }
+
+  // check for left over characters. If any, then the filename is
+  // not a candidate for conversion.
+  //
+  if (remainder != NULL && *remainder != '\0') {
+    return 0;
+  }
+
+  // successful conversion, return the pid
+  return pid;
+}
+
+
+// check if the given path is considered a secure directory for
+// the backing store files. Returns true if the directory exists
+// and is considered a secure location. Returns false if the path
+// is a symbolic link or if an error occurred.
+//
+static bool is_directory_secure(const char* path) {
+  struct stat statbuf;
+  int result = 0;
+
+  RESTARTABLE(::lstat(path, &statbuf), result);
+  if (result == OS_ERR) {
+    return false;
+  }
+
+  // the path exists, now check it's mode
+  if (S_ISLNK(statbuf.st_mode) || !S_ISDIR(statbuf.st_mode)) {
+    // the path represents a link or some non-directory file type,
+    // which is not what we expected. declare it insecure.
+    //
+    return false;
+  }
+  else {
+    // we have an existing directory, check if the permissions are safe.
+    //
+    if ((statbuf.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
+      // the directory is open for writing and could be subjected
+      // to a symlnk attack. declare it insecure.
+      //
+      return false;
+    }
+  }
+  return true;
+}
+
+
+// return the user name for the given user id
+//
+// the caller is expected to free the allocated memory.
+//
+static char* get_user_name(uid_t uid) {
+
+  struct passwd pwent;
+
+  // determine the max pwbuf size from sysconf, and hardcode
+  // a default if this not available through sysconf.
+  //
+  long bufsize = sysconf(_SC_GETPW_R_SIZE_MAX);
+  if (bufsize == -1)
+    bufsize = 1024;
+
+  char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
+
+  // POSIX interface to getpwuid_r is used on LINUX
+  struct passwd* p;
+  int result = getpwuid_r(uid, &pwent, pwbuf, (size_t)bufsize, &p);
+
+  if (result != 0 || p == NULL || p->pw_name == NULL || *(p->pw_name) == '\0') {
+    if (PrintMiscellaneous && Verbose) {
+      if (result != 0) {
+        warning("Could not retrieve passwd entry: %s\n",
+                strerror(result));
+      }
+      else if (p == NULL) {
+        // this check is added to protect against an observed problem
+        // with getpwuid_r() on RedHat 9 where getpwuid_r returns 0,
+        // indicating success, but has p == NULL. This was observed when
+        // inserting a file descriptor exhaustion fault prior to the call
+        // getpwuid_r() call. In this case, error is set to the appropriate
+        // error condition, but this is undocumented behavior. This check
+        // is safe under any condition, but the use of errno in the output
+        // message may result in an erroneous message.
+        // Bug Id 89052 was opened with RedHat.
+        //
+        warning("Could not retrieve passwd entry: %s\n",
+                strerror(errno));
+      }
+      else {
+        warning("Could not determine user name: %s\n",
+                p->pw_name == NULL ? "pw_name = NULL" :
+                                     "pw_name zero length");
+      }
+    }
+    FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
+    return NULL;
+  }
+
+  char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal);
+  strcpy(user_name, p->pw_name);
+
+  FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal);
+  return user_name;
+}
+
+// return the name of the user that owns the process identified by vmid.
+//
+// This method uses a slow directory search algorithm to find the backing
+// store file for the specified vmid and returns the user name, as determined
+// by the user name suffix of the hsperfdata_<username> directory name.
+//
+// the caller is expected to free the allocated memory.
+//
+static char* get_user_name_slow(int vmid, TRAPS) {
+
+  // short circuit the directory search if the process doesn't even exist.
+  if (kill(vmid, 0) == OS_ERR) {
+    if (errno == ESRCH) {
+      THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
+                  "Process not found");
+    }
+    else /* EPERM */ {
+      THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
+    }
+  }
+
+  // directory search
+  char* oldest_user = NULL;
+  time_t oldest_ctime = 0;
+
+  const char* tmpdirname = os::get_temp_directory();
+
+  DIR* tmpdirp = os::opendir(tmpdirname);
+
+  if (tmpdirp == NULL) {
+    return NULL;
+  }
+
+  // for each entry in the directory that matches the pattern hsperfdata_*,
+  // open the directory and check if the file for the given vmid exists.
+  // The file with the expected name and the latest creation date is used
+  // to determine the user name for the process id.
+  //
+  struct dirent* dentry;
+  char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal);
+  errno = 0;
+  while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) {
+
+    // check if the directory entry is a hsperfdata file
+    if (strncmp(dentry->d_name, PERFDATA_NAME, strlen(PERFDATA_NAME)) != 0) {
+      continue;
+    }
+
+    char* usrdir_name = NEW_C_HEAP_ARRAY(char,
+                              strlen(tmpdirname) + strlen(dentry->d_name) + 2, mtInternal);
+    strcpy(usrdir_name, tmpdirname);
+    strcat(usrdir_name, "/");
+    strcat(usrdir_name, dentry->d_name);
+
+    DIR* subdirp = os::opendir(usrdir_name);
+
+    if (subdirp == NULL) {
+      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
+      continue;
+    }
+
+    // Since we don't create the backing store files in directories
+    // pointed to by symbolic links, we also don't follow them when
+    // looking for the files. We check for a symbolic link after the
+    // call to opendir in order to eliminate a small window where the
+    // symlink can be exploited.
+    //
+    if (!is_directory_secure(usrdir_name)) {
+      FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
+      os::closedir(subdirp);
+      continue;
+    }
+
+    struct dirent* udentry;
+    char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal);
+    errno = 0;
+    while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) {
+
+      if (filename_to_pid(udentry->d_name) == vmid) {
+        struct stat statbuf;
+        int result;
+
+        char* filename = NEW_C_HEAP_ARRAY(char,
+                            strlen(usrdir_name) + strlen(udentry->d_name) + 2, mtInternal);
+
+        strcpy(filename, usrdir_name);
+        strcat(filename, "/");
+        strcat(filename, udentry->d_name);
+
+        // don't follow symbolic links for the file
+        RESTARTABLE(::lstat(filename, &statbuf), result);
+        if (result == OS_ERR) {
+           FREE_C_HEAP_ARRAY(char, filename, mtInternal);
+           continue;
+        }
+
+        // skip over files that are not regular files.
+        if (!S_ISREG(statbuf.st_mode)) {
+          FREE_C_HEAP_ARRAY(char, filename, mtInternal);
+          continue;
+        }
+
+        // compare and save filename with latest creation time
+        if (statbuf.st_size > 0 && statbuf.st_ctime > oldest_ctime) {
+
+          if (statbuf.st_ctime > oldest_ctime) {
+            char* user = strchr(dentry->d_name, '_') + 1;
+
+            if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal);
+            oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal);
+
+            strcpy(oldest_user, user);
+            oldest_ctime = statbuf.st_ctime;
+          }
+        }
+
+        FREE_C_HEAP_ARRAY(char, filename, mtInternal);
+      }
+    }
+    os::closedir(subdirp);
+    FREE_C_HEAP_ARRAY(char, udbuf, mtInternal);
+    FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal);
+  }
+  os::closedir(tmpdirp);
+  FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal);
+
+  return(oldest_user);
+}
+
+// return the name of the user that owns the JVM indicated by the given vmid.
+//
+static char* get_user_name(int vmid, TRAPS) {
+  return get_user_name_slow(vmid, CHECK_NULL);
+}
+
+// return the file name of the backing store file for the named
+// shared memory region for the given user name and vmid.
+//
+// the caller is expected to free the allocated memory.
+//
+static char* get_sharedmem_filename(const char* dirname, int vmid) {
+
+  // add 2 for the file separator and a null terminator.
+  size_t nbytes = strlen(dirname) + UINT_CHARS + 2;
+
+  char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
+  snprintf(name, nbytes, "%s/%d", dirname, vmid);
+
+  return name;
+}
+
+
+// remove file
+//
+// this method removes the file specified by the given path
+//
+static void remove_file(const char* path) {
+
+  int result;
+
+  // if the file is a directory, the following unlink will fail. since
+  // we don't expect to find directories in the user temp directory, we
+  // won't try to handle this situation. even if accidentially or
+  // maliciously planted, the directory's presence won't hurt anything.
+  //
+  RESTARTABLE(::unlink(path), result);
+  if (PrintMiscellaneous && Verbose && result == OS_ERR) {
+    if (errno != ENOENT) {
+      warning("Could not unlink shared memory backing"
+              " store file %s : %s\n", path, strerror(errno));
+    }
+  }
+}
+
+
+// remove file
+//
+// this method removes the file with the given file name in the
+// named directory.
+//
+static void remove_file(const char* dirname, const char* filename) {
+
+  size_t nbytes = strlen(dirname) + strlen(filename) + 2;
+  char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
+
+  strcpy(path, dirname);
+  strcat(path, "/");
+  strcat(path, filename);
+
+  remove_file(path);
+
+  FREE_C_HEAP_ARRAY(char, path, mtInternal);
+}
+
+
+// cleanup stale shared memory resources
+//
+// This method attempts to remove all stale shared memory files in
+// the named user temporary directory. It scans the named directory
+// for files matching the pattern ^$[0-9]*$. For each file found, the
+// process id is extracted from the file name and a test is run to
+// determine if the process is alive. If the process is not alive,
+// any stale file resources are removed.
+//
+static void cleanup_sharedmem_resources(const char* dirname) {
+
+  // open the user temp directory
+  DIR* dirp = os::opendir(dirname);
+
+  if (dirp == NULL) {
+    // directory doesn't exist, so there is nothing to cleanup
+    return;
+  }
+
+  if (!is_directory_secure(dirname)) {
+    // the directory is not a secure directory
+    return;
+  }
+
+  // for each entry in the directory that matches the expected file
+  // name pattern, determine if the file resources are stale and if
+  // so, remove the file resources. Note, instrumented HotSpot processes
+  // for this user may start and/or terminate during this search and
+  // remove or create new files in this directory. The behavior of this
+  // loop under these conditions is dependent upon the implementation of
+  // opendir/readdir.
+  //
+  struct dirent* entry;
+  char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal);
+  errno = 0;
+  while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) {
+
+    pid_t pid = filename_to_pid(entry->d_name);
+
+    if (pid == 0) {
+
+      if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
+
+        // attempt to remove all unexpected files, except "." and ".."
+        remove_file(dirname, entry->d_name);
+      }
+
+      errno = 0;
+      continue;
+    }
+
+    // we now have a file name that converts to a valid integer
+    // that could represent a process id . if this process id
+    // matches the current process id or the process is not running,
+    // then remove the stale file resources.
+    //
+    // process liveness is detected by sending signal number 0 to
+    // the process id (see kill(2)). if kill determines that the
+    // process does not exist, then the file resources are removed.
+    // if kill determines that that we don't have permission to
+    // signal the process, then the file resources are assumed to
+    // be stale and are removed because the resources for such a
+    // process should be in a different user specific directory.
+    //
+    if ((pid == os::current_process_id()) ||
+        (kill(pid, 0) == OS_ERR && (errno == ESRCH || errno == EPERM))) {
+
+        remove_file(dirname, entry->d_name);
+    }
+    errno = 0;
+  }
+  os::closedir(dirp);
+  FREE_C_HEAP_ARRAY(char, dbuf, mtInternal);
+}
+
+// make the user specific temporary directory. Returns true if
+// the directory exists and is secure upon return. Returns false
+// if the directory exists but is either a symlink, is otherwise
+// insecure, or if an error occurred.
+//
+static bool make_user_tmp_dir(const char* dirname) {
+
+  // create the directory with 0755 permissions. note that the directory
+  // will be owned by euid::egid, which may not be the same as uid::gid.
+  //
+  if (mkdir(dirname, S_IRWXU|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) == OS_ERR) {
+    if (errno == EEXIST) {
+      // The directory already exists and was probably created by another
+      // JVM instance. However, this could also be the result of a
+      // deliberate symlink. Verify that the existing directory is safe.
+      //
+      if (!is_directory_secure(dirname)) {
+        // directory is not secure
+        if (PrintMiscellaneous && Verbose) {
+          warning("%s directory is insecure\n", dirname);
+        }
+        return false;
+      }
+    }
+    else {
+      // we encountered some other failure while attempting
+      // to create the directory
+      //
+      if (PrintMiscellaneous && Verbose) {
+        warning("could not create directory %s: %s\n",
+                dirname, strerror(errno));
+      }
+      return false;
+    }
+  }
+  return true;
+}
+
+// create the shared memory file resources
+//
+// This method creates the shared memory file with the given size
+// This method also creates the user specific temporary directory, if
+// it does not yet exist.
+//
+static int create_sharedmem_resources(const char* dirname, const char* filename, size_t size) {
+
+  // make the user temporary directory
+  if (!make_user_tmp_dir(dirname)) {
+    // could not make/find the directory or the found directory
+    // was not secure
+    return -1;
+  }
+
+  int result;
+
+  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_TRUNC, S_IREAD|S_IWRITE), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("could not create file %s: %s\n", filename, strerror(errno));
+    }
+    return -1;
+  }
+
+  // save the file descriptor
+  int fd = result;
+
+  // set the file size
+  RESTARTABLE(::ftruncate(fd, (off_t)size), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("could not set shared memory file size: %s\n", strerror(errno));
+    }
+    RESTARTABLE(::close(fd), result);
+    return -1;
+  }
+
+  return fd;
+}
+
+// open the shared memory file for the given user and vmid. returns
+// the file descriptor for the open file or -1 if the file could not
+// be opened.
+//
+static int open_sharedmem_file(const char* filename, int oflags, TRAPS) {
+
+  // open the file
+  int result;
+  RESTARTABLE(::open(filename, oflags), result);
+  if (result == OS_ERR) {
+    if (errno == ENOENT) {
+      THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
+                  "Process not found");
+    }
+    else if (errno == EACCES) {
+      THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
+                  "Permission denied");
+    }
+    else {
+      THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
+    }
+  }
+
+  return result;
+}
+
+// create a named shared memory region. returns the address of the
+// memory region on success or NULL on failure. A return value of
+// NULL will ultimately disable the shared memory feature.
+//
+// On Solaris and Linux, the name space for shared memory objects
+// is the file system name space.
+//
+// A monitoring application attaching to a JVM does not need to know
+// the file system name of the shared memory object. However, it may
+// be convenient for applications to discover the existence of newly
+// created and terminating JVMs by watching the file system name space
+// for files being created or removed.
+//
+static char* mmap_create_shared(size_t size) {
+
+  int result;
+  int fd;
+  char* mapAddress;
+
+  int vmid = os::current_process_id();
+
+  char* user_name = get_user_name(geteuid());
+
+  if (user_name == NULL)
+    return NULL;
+
+  char* dirname = get_user_tmp_dir(user_name);
+  char* filename = get_sharedmem_filename(dirname, vmid);
+
+  // cleanup any stale shared memory files
+  cleanup_sharedmem_resources(dirname);
+
+  assert(((size > 0) && (size % os::vm_page_size() == 0)),
+         "unexpected PerfMemory region size");
+
+  fd = create_sharedmem_resources(dirname, filename, size);
+
+  FREE_C_HEAP_ARRAY(char, user_name, mtInternal);
+  FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+
+  if (fd == -1) {
+    FREE_C_HEAP_ARRAY(char, filename, mtInternal);
+    return NULL;
+  }
+
+  mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+
+  // attempt to close the file - restart it if it was interrupted,
+  // but ignore other failures
+  RESTARTABLE(::close(fd), result);
+  assert(result != OS_ERR, "could not close file");
+
+  if (mapAddress == MAP_FAILED) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("mmap failed -  %s\n", strerror(errno));
+    }
+    remove_file(filename);
+    FREE_C_HEAP_ARRAY(char, filename, mtInternal);
+    return NULL;
+  }
+
+  // save the file name for use in delete_shared_memory()
+  backing_store_file_name = filename;
+
+  // clear the shared memory region
+  (void)::memset((void*) mapAddress, 0, size);
+
+  return mapAddress;
+}
+
+// release a named shared memory region
+//
+static void unmap_shared(char* addr, size_t bytes) {
+  // Do not rely on os::reserve_memory/os::release_memory to use mmap.
+  // Use os::reserve_memory/os::release_memory for PerfDisableSharedMem=1, mmap/munmap for PerfDisableSharedMem=0
+  if (::munmap(addr, bytes) == -1) {
+    warning("perfmemory: munmap failed (%d)\n", errno);
+  }
+}
+
+// create the PerfData memory region in shared memory.
+//
+static char* create_shared_memory(size_t size) {
+
+  // create the shared memory region.
+  return mmap_create_shared(size);
+}
+
+// delete the shared PerfData memory region
+//
+static void delete_shared_memory(char* addr, size_t size) {
+
+  // cleanup the persistent shared memory resources. since DestroyJavaVM does
+  // not support unloading of the JVM, unmapping of the memory resource is
+  // not performed. The memory will be reclaimed by the OS upon termination of
+  // the process. The backing store file is deleted from the file system.
+
+  assert(!PerfDisableSharedMem, "shouldn't be here");
+
+  if (backing_store_file_name != NULL) {
+    remove_file(backing_store_file_name);
+    // Don't.. Free heap memory could deadlock os::abort() if it is called
+    // from signal handler. OS will reclaim the heap memory.
+    // FREE_C_HEAP_ARRAY(char, backing_store_file_name, mtInternal);
+    backing_store_file_name = NULL;
+  }
+}
+
+// return the size of the file for the given file descriptor
+// or 0 if it is not a valid size for a shared memory file
+//
+static size_t sharedmem_filesize(int fd, TRAPS) {
+
+  struct stat statbuf;
+  int result;
+
+  RESTARTABLE(::fstat(fd, &statbuf), result);
+  if (result == OS_ERR) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("fstat failed: %s\n", strerror(errno));
+    }
+    THROW_MSG_0(vmSymbols::java_io_IOException(),
+                "Could not determine PerfMemory size");
+  }
+
+  if ((statbuf.st_size == 0) ||
+     ((size_t)statbuf.st_size % os::vm_page_size() != 0)) {
+    THROW_MSG_0(vmSymbols::java_lang_Exception(),
+                "Invalid PerfMemory size");
+  }
+
+  return (size_t)statbuf.st_size;
+}
+
+// attach to a named shared memory region.
+//
+static void mmap_attach_shared(const char* user, int vmid, PerfMemory::PerfMemoryMode mode, char** addr, size_t* sizep, TRAPS) {
+
+  char* mapAddress;
+  int result;
+  int fd;
+  size_t size;
+  const char* luser = NULL;
+
+  int mmap_prot;
+  int file_flags;
+
+  ResourceMark rm;
+
+  // map the high level access mode to the appropriate permission
+  // constructs for the file and the shared memory mapping.
+  if (mode == PerfMemory::PERF_MODE_RO) {
+    mmap_prot = PROT_READ;
+    file_flags = O_RDONLY;
+  }
+  else if (mode == PerfMemory::PERF_MODE_RW) {
+#ifdef LATER
+    mmap_prot = PROT_READ | PROT_WRITE;
+    file_flags = O_RDWR;
+#else
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Unsupported access mode");
+#endif
+  }
+  else {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Illegal access mode");
+  }
+
+  if (user == NULL || strlen(user) == 0) {
+    luser = get_user_name(vmid, CHECK);
+  }
+  else {
+    luser = user;
+  }
+
+  if (luser == NULL) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Could not map vmid to user Name");
+  }
+
+  char* dirname = get_user_tmp_dir(luser);
+
+  // since we don't follow symbolic links when creating the backing
+  // store file, we don't follow them when attaching either.
+  //
+  if (!is_directory_secure(dirname)) {
+    FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Process not found");
+  }
+
+  char* filename = get_sharedmem_filename(dirname, vmid);
+
+  // copy heap memory to resource memory. the open_sharedmem_file
+  // method below need to use the filename, but could throw an
+  // exception. using a resource array prevents the leak that
+  // would otherwise occur.
+  char* rfilename = NEW_RESOURCE_ARRAY(char, strlen(filename) + 1);
+  strcpy(rfilename, filename);
+
+  // free the c heap resources that are no longer needed
+  if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal);
+  FREE_C_HEAP_ARRAY(char, dirname, mtInternal);
+  FREE_C_HEAP_ARRAY(char, filename, mtInternal);
+
+  // open the shared memory file for the give vmid
+  fd = open_sharedmem_file(rfilename, file_flags, CHECK);
+  assert(fd != OS_ERR, "unexpected value");
+
+  if (*sizep == 0) {
+    size = sharedmem_filesize(fd, CHECK);
+    assert(size != 0, "unexpected size");
+  } else {
+    size = *sizep;
+  }
+
+  mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
+
+  // attempt to close the file - restart if it gets interrupted,
+  // but ignore other failures
+  RESTARTABLE(::close(fd), result);
+  assert(result != OS_ERR, "could not close file");
+
+  if (mapAddress == MAP_FAILED) {
+    if (PrintMiscellaneous && Verbose) {
+      warning("mmap failed: %s\n", strerror(errno));
+    }
+    THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
+              "Could not map PerfMemory");
+  }
+
+  *addr = mapAddress;
+  *sizep = size;
+
+  if (PerfTraceMemOps) {
+    tty->print("mapped " SIZE_FORMAT " bytes for vmid %d at "
+               INTPTR_FORMAT "\n", size, vmid, (void*)mapAddress);
+  }
+}
+
+
+
+
+// create the PerfData memory region
+//
+// This method creates the memory region used to store performance
+// data for the JVM. The memory may be created in standard or
+// shared memory.
+//
+void PerfMemory::create_memory_region(size_t size) {
+
+  if (PerfDisableSharedMem) {
+    // do not share the memory for the performance data.
+    _start = create_standard_memory(size);
+  }
+  else {
+    _start = create_shared_memory(size);
+    if (_start == NULL) {
+
+      // creation of the shared memory region failed, attempt
+      // to create a contiguous, non-shared memory region instead.
+      //
+      if (PrintMiscellaneous && Verbose) {
+        warning("Reverting to non-shared PerfMemory region.\n");
+      }
+      PerfDisableSharedMem = true;
+      _start = create_standard_memory(size);
+    }
+  }
+
+  if (_start != NULL) _capacity = size;
+
+}
+
+// delete the PerfData memory region
+//
+// This method deletes the memory region used to store performance
+// data for the JVM. The memory region indicated by the <address, size>
+// tuple will be inaccessible after a call to this method.
+//
+void PerfMemory::delete_memory_region() {
+
+  assert((start() != NULL && capacity() > 0), "verify proper state");
+
+  // If user specifies PerfDataSaveFile, it will save the performance data
+  // to the specified file name no matter whether PerfDataSaveToFile is specified
+  // or not. In other word, -XX:PerfDataSaveFile=.. overrides flag
+  // -XX:+PerfDataSaveToFile.
+  if (PerfDataSaveToFile || PerfDataSaveFile != NULL) {
+    save_memory_to_file(start(), capacity());
+  }
+
+  if (PerfDisableSharedMem) {
+    delete_standard_memory(start(), capacity());
+  }
+  else {
+    delete_shared_memory(start(), capacity());
+  }
+}
+
+// attach to the PerfData memory region for another JVM
+//
+// This method returns an <address, size> tuple that points to
+// a memory buffer that is kept reasonably synchronized with
+// the PerfData memory region for the indicated JVM. This
+// buffer may be kept in synchronization via shared memory
+// or some other mechanism that keeps the buffer updated.
+//
+// If the JVM chooses not to support the attachability feature,
+// this method should throw an UnsupportedOperation exception.
+//
+// This implementation utilizes named shared memory to map
+// the indicated process's PerfData memory region into this JVMs
+// address space.
+//
+void PerfMemory::attach(const char* user, int vmid, PerfMemoryMode mode, char** addrp, size_t* sizep, TRAPS) {
+
+  if (vmid == 0 || vmid == os::current_process_id()) {
+     *addrp = start();
+     *sizep = capacity();
+     return;
+  }
+
+  mmap_attach_shared(user, vmid, mode, addrp, sizep, CHECK);
+}
+
+// detach from the PerfData memory region of another JVM
+//
+// This method detaches the PerfData memory region of another
+// JVM, specified as an <address, size> tuple of a buffer
+// in this process's address space. This method may perform
+// arbitrary actions to accomplish the detachment. The memory
+// region specified by <address, size> will be inaccessible after
+// a call to this method.
+//
+// If the JVM chooses not to support the attachability feature,
+// this method should throw an UnsupportedOperation exception.
+//
+// This implementation utilizes named shared memory to detach
+// the indicated process's PerfData memory region from this
+// process's address space.
+//
+void PerfMemory::detach(char* addr, size_t bytes, TRAPS) {
+
+  assert(addr != 0, "address sanity check");
+  assert(bytes > 0, "capacity sanity check");
+
+  if (PerfMemory::contains(addr) || PerfMemory::contains(addr + bytes - 1)) {
+    // prevent accidental detachment of this process's PerfMemory region
+    return;
+  }
+
+  unmap_shared(addr, bytes);
+}
+
+char* PerfMemory::backing_store_filename() {
+  return backing_store_file_name;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/porting_aix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,367 @@
+/*
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "asm/assembler.hpp"
+#include "loadlib_aix.hpp"
+#include "porting_aix.hpp"
+#include "utilities/debug.hpp"
+
+#include <demangle.h>
+#include <sys/debug.h>
+
+//////////////////////////////////
+// Provide implementation for dladdr based on LoadedLibraries pool and
+// traceback table scan (see getFuncName).
+
+// Search traceback table in stack,
+// return procedure name from trace back table.
+#define MAX_FUNC_SEARCH_LEN 0x10000
+// Any PC below this value is considered toast.
+#define MINIMUM_VALUE_FOR_PC ((unsigned int*)0x1024)
+
+#define PTRDIFF_BYTES(p1,p2) (((ptrdiff_t)p1) - ((ptrdiff_t)p2))
+
+// Align a pointer without having to cast.
+inline char* align_ptr_up(char* ptr, intptr_t alignment) {
+  return (char*) align_size_up((intptr_t)ptr, alignment);
+}
+
+// Trace if verbose to tty.
+// I use these now instead of the Xtrace system because the latter is
+// not available at init time, hence worthless. Until we fix this, all
+// tracing here is done with -XX:+Verbose.
+#define trcVerbose(fmt, ...) { \
+  if (Verbose) { \
+    fprintf(stderr, fmt, ##__VA_ARGS__); \
+    fputc('\n', stderr); fflush(stderr); \
+  } \
+}
+#define ERRBYE(s) { trcVerbose(s); return -1; }
+
+// Unfortunately, the interface of dladdr makes the implementator
+// responsible for maintaining memory for function name/library
+// name. I guess this is because most OS's keep those values as part
+// of the mapped executable image ready to use. On AIX, this doesn't
+// work, so I have to keep the returned strings. For now, I do this in
+// a primitive string map. Should this turn out to be a performance
+// problem, a better hashmap has to be used.
+class fixed_strings {
+  struct node {
+    char* v;
+    node* next;
+  };
+
+  node* first;
+
+  public:
+
+  fixed_strings() : first(0) {}
+  ~fixed_strings() {
+    node* n = first;
+    while (n) {
+      node* p = n;
+      n = n->next;
+      free(p->v);
+      delete p;
+    }
+  }
+
+  char* intern(const char* s) {
+    for (node* n = first; n; n = n->next) {
+      if (strcmp(n->v, s) == 0) {
+        return n->v;
+      }
+    }
+    node* p = new node;
+    p->v = strdup(s);
+    p->next = first;
+    first = p;
+    return p->v;
+  }
+};
+
+static fixed_strings dladdr_fixed_strings;
+
+// Given a code pointer, returns the function name and the displacement.
+// Function looks for the traceback table at the end of the function.
+extern "C" int getFuncName(
+    codeptr_t pc,                    // [in] program counter
+    char* p_name, size_t namelen,    // [out] optional: function name ("" if not available)
+    int* p_displacement,             // [out] optional: displacement (-1 if not available)
+    const struct tbtable** p_tb,     // [out] optional: ptr to traceback table to get further
+                                     //                 information (NULL if not available)
+    char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
+  ) {
+  struct tbtable* tb = 0;
+  unsigned int searchcount = 0;
+
+  // initialize output parameters
+  if (p_name && namelen > 0) {
+    *p_name = '\0';
+  }
+  if (p_errmsg && errmsglen > 0) {
+    *p_errmsg = '\0';
+  }
+  if (p_displacement) {
+    *p_displacement = -1;
+  }
+  if (p_tb) {
+    *p_tb = NULL;
+  }
+
+  // weed out obvious bogus states
+  if (pc < MINIMUM_VALUE_FOR_PC) {
+    ERRBYE("invalid program counter");
+  }
+
+  codeptr_t pc2 = pc;
+
+  // make sure the pointer is word aligned.
+  pc2 = (codeptr_t) align_ptr_up((char*)pc2, 4);
+
+  // Find start of traceback table.
+  // (starts after code, is marked by word-aligned (32bit) zeros)
+  while ((*pc2 != NULL) && (searchcount++ < MAX_FUNC_SEARCH_LEN)) {
+    pc2++;
+  }
+  if (*pc2 != 0) {
+    ERRBYE("could not find traceback table within 5000 bytes of program counter");
+  }
+  //
+  // Set up addressability to the traceback table
+  //
+  tb = (struct tbtable*) (pc2 + 1);
+
+  // Is this really a traceback table? No way to be sure but
+  // some indicators we can check.
+  if (tb->tb.lang >= 0xf && tb->tb.lang <= 0xfb) {
+    // Language specifiers, go from 0 (C) to 14 (Objective C).
+    // According to spec, 0xf-0xfa reserved, 0xfb-0xff reserved for ibm.
+    ERRBYE("not a traceback table");
+  }
+
+  // Existence of fields in the tbtable extension are contingent upon
+  // specific fields in the base table.  Check for their existence so
+  // that we can address the function name if it exists.
+  pc2 = (codeptr_t) tb +
+    sizeof(struct tbtable_short)/sizeof(int);
+  if (tb->tb.fixedparms != 0 || tb->tb.floatparms != 0)
+    pc2++;
+
+  if (tb->tb.has_tboff == TRUE) {
+
+    // I want to know the displacement
+    const unsigned int tb_offset = *pc2;
+    codeptr_t start_of_procedure =
+    (codeptr_t)(((char*)tb) - 4 - tb_offset);  // (-4 to omit leading 0000)
+
+    // Weed out the cases where we did find the wrong traceback table.
+    if (pc < start_of_procedure) {
+      ERRBYE("could not find (the real) traceback table within 5000 bytes of program counter");
+    }
+
+    // return the displacement
+    if (p_displacement) {
+      (*p_displacement) = (int) PTRDIFF_BYTES(pc, start_of_procedure);
+    }
+
+    pc2++;
+  } else {
+    // return -1 for displacement
+    if (p_displacement) {
+      (*p_displacement) = -1;
+    }
+  }
+
+  if (tb->tb.int_hndl == TRUE)
+    pc2++;
+
+  if (tb->tb.has_ctl == TRUE)
+    pc2 += (*pc2) + 1; // don't care
+
+  //
+  // return function name if it exists.
+  //
+  if (p_name && namelen > 0) {
+    if (tb->tb.name_present) {
+      char buf[256];
+      const short l = MIN2<short>(*((short*)pc2), sizeof(buf) - 1);
+      memcpy(buf, (char*)pc2 + sizeof(short), l);
+      buf[l] = '\0';
+
+      p_name[0] = '\0';
+
+      // If it is a C++ name, try and demangle it using the Demangle interface (see demangle.h).
+      char* rest;
+      Name* const name = Demangle(buf, rest);
+      if (name) {
+        const char* const demangled_name = name->Text();
+        if (demangled_name) {
+          strncpy(p_name, demangled_name, namelen-1);
+          p_name[namelen-1] = '\0';
+        }
+        delete name;
+      }
+
+      // Fallback: if demangling did not work, just provide the unmangled name.
+      if (p_name[0] == '\0') {
+        strncpy(p_name, buf, namelen-1);
+        p_name[namelen-1] = '\0';
+      }
+
+    } else {
+      strncpy(p_name, "<nameless function>", namelen-1);
+      p_name[namelen-1] = '\0';
+    }
+  }
+  // Return traceback table, if user wants it.
+  if (p_tb) {
+    (*p_tb) = tb;
+  }
+
+  return 0;
+}
+
+// Special implementation of dladdr for Aix based on LoadedLibraries
+// Note: dladdr returns non-zero for ok, 0 for error!
+// Note: dladdr is not posix, but a non-standard GNU extension. So this tries to
+//   fulfill the contract of dladdr on Linux (see http://linux.die.net/man/3/dladdr)
+// Note: addr may be both an AIX function descriptor or a real code pointer
+//   to the entry of a function.
+extern "C"
+int dladdr(void* addr, Dl_info* info) {
+
+  if (!addr) {
+    return 0;
+  }
+
+  assert(info, "");
+
+  int rc = 0;
+
+  const char* const ZEROSTRING = "";
+
+  // Always return a string, even if a "" one. Linux dladdr manpage
+  // does not say anything about returning NULL
+  info->dli_fname = ZEROSTRING;
+  info->dli_sname = ZEROSTRING;
+  info->dli_saddr = NULL;
+
+  address p = (address) addr;
+  const LoadedLibraryModule* lib = NULL;
+
+  enum { noclue, code, data } type = noclue;
+
+  trcVerbose("dladdr(%p)...", p);
+
+  // Note: input address may be a function. I accept both a pointer to
+  // the entry of a function and a pointer to the function decriptor.
+  // (see ppc64 ABI)
+  lib = LoadedLibraries::find_for_text_address(p);
+  if (lib) {
+    type = code;
+  }
+
+  if (!lib) {
+    // Not a pointer into any text segment. Is it a function descriptor?
+    const FunctionDescriptor* const pfd = (const FunctionDescriptor*) p;
+    p = pfd->entry();
+    if (p) {
+      lib = LoadedLibraries::find_for_text_address(p);
+      if (lib) {
+        type = code;
+      }
+    }
+  }
+
+  if (!lib) {
+    // Neither direct code pointer nor function descriptor. A data ptr?
+    p = (address)addr;
+    lib = LoadedLibraries::find_for_data_address(p);
+    if (lib) {
+      type = data;
+    }
+  }
+
+  // If we did find the shared library this address belongs to (either
+  // code or data segment) resolve library path and, if possible, the
+  // symbol name.
+  if (lib) {
+    const char* const interned_libpath =
+      dladdr_fixed_strings.intern(lib->get_fullpath());
+    if (interned_libpath) {
+      info->dli_fname = interned_libpath;
+    }
+
+    if (type == code) {
+
+      // For code symbols resolve function name and displacement. Use
+      // displacement to calc start of function.
+      char funcname[256] = "";
+      int displacement = 0;
+
+      if (getFuncName((codeptr_t) p, funcname, sizeof(funcname), &displacement,
+                      NULL, NULL, 0) == 0) {
+        if (funcname[0] != '\0') {
+          const char* const interned = dladdr_fixed_strings.intern(funcname);
+          info->dli_sname = interned;
+          trcVerbose("... function name: %s ...", interned);
+        }
+
+        // From the displacement calculate the start of the function.
+        if (displacement != -1) {
+          info->dli_saddr = p - displacement;
+        } else {
+          info->dli_saddr = p;
+        }
+      } else {
+
+        // No traceback table found. Just assume the pointer is it.
+        info->dli_saddr = p;
+
+      }
+
+    } else if (type == data) {
+
+      // For data symbols.
+      info->dli_saddr = p;
+
+    } else {
+      ShouldNotReachHere();
+    }
+
+    rc = 1; // success: return 1 [sic]
+
+  }
+
+  // sanity checks.
+  if (rc) {
+    assert(info->dli_fname, "");
+    assert(info->dli_sname, "");
+    assert(info->dli_saddr, "");
+  }
+
+  return rc; // error: return 0 [sic]
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/porting_aix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include <stddef.h>
+
+// Header file to contain porting-relevant code which does not have a
+// home anywhere else and which can not go into os_<platform>.h because
+// that header is included inside the os class definition, hence all
+// its content is part of the os class.
+
+// Aix' own version of dladdr().
+// This function tries to mimick dladdr(3) on Linux
+// (see http://linux.die.net/man/3/dladdr)
+// dladdr(3) is not POSIX but a GNU extension, and is not available on AIX.
+//
+// Differences between AIX dladdr and Linux dladdr:
+//
+// 1) Dl_info.dli_fbase: can never work, is disabled.
+//   A loaded image on AIX is divided in multiple segments, at least two
+//   (text and data) but potentially also far more. This is because the loader may
+//   load each member into an own segment, as for instance happens with the libC.a
+// 2) Dl_info.dli_sname: This only works for code symbols (functions); for data, a
+//   zero-length string is returned ("").
+// 3) Dl_info.dli_saddr: For code, this will return the entry point of the function,
+//   not the function descriptor.
+
+typedef struct {
+  const char *dli_fname; // file path of loaded library
+  // void *dli_fbase;
+  const char *dli_sname; // symbol name; "" if not known
+  void *dli_saddr;       // address of *entry* of function; not function descriptor;
+} Dl_info;
+
+// Note: we export this to use it inside J2se too
+#ifdef __cplusplus
+extern "C"
+#endif
+int dladdr(void *addr, Dl_info *info);
+
+
+// The semantics in this file are thus that codeptr_t is a *real code ptr*.
+// This means that any function taking codeptr_t as arguments will assume
+// a real codeptr and won't handle function descriptors (eg getFuncName),
+// whereas functions taking address as args will deal with function
+// descriptors (eg os::dll_address_to_library_name).
+typedef unsigned int* codeptr_t;
+
+// helper function - given a program counter, tries to locate the traceback table and
+// returns info from it (like, most importantly, function name, displacement of the
+// pc inside the function, and the traceback table itself.
+#ifdef __cplusplus
+extern "C"
+#endif
+int getFuncName(
+      codeptr_t pc,                    // [in] program counter
+      char* p_name, size_t namelen,    // [out] optional: user provided buffer for the function name
+      int* p_displacement,             // [out] optional: displacement
+      const struct tbtable** p_tb,     // [out] optional: ptr to traceback table to get further information
+      char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
+    );
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/threadCritical_aix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/threadCritical.hpp"
+#include "thread_aix.inline.hpp"
+
+// put OS-includes here
+# include <pthread.h>
+
+//
+// See threadCritical.hpp for details of this class.
+//
+
+static pthread_t             tc_owner = 0;
+static pthread_mutex_t       tc_mutex = PTHREAD_MUTEX_INITIALIZER;
+static int                   tc_count = 0;
+
+void ThreadCritical::initialize() {
+}
+
+void ThreadCritical::release() {
+}
+
+ThreadCritical::ThreadCritical() {
+  pthread_t self = pthread_self();
+  if (self != tc_owner) {
+    int ret = pthread_mutex_lock(&tc_mutex);
+    guarantee(ret == 0, "fatal error with pthread_mutex_lock()");
+    assert(tc_count == 0, "Lock acquired with illegal reentry count.");
+    tc_owner = self;
+  }
+  tc_count++;
+}
+
+ThreadCritical::~ThreadCritical() {
+  assert(tc_owner == pthread_self(), "must have correct owner");
+  assert(tc_count > 0, "must have correct count");
+
+  tc_count--;
+  if (tc_count == 0) {
+    tc_owner = 0;
+    int ret = pthread_mutex_unlock(&tc_mutex);
+    guarantee(ret == 0, "fatal error with pthread_mutex_unlock()");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/thread_aix.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_AIX_VM_THREAD_AIX_INLINE_HPP
+#define OS_AIX_VM_THREAD_AIX_INLINE_HPP
+
+#include "runtime/atomic.hpp"
+#include "runtime/prefetch.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/threadLocalStorage.hpp"
+
+#include "atomic_aix_ppc.inline.hpp"
+#include "orderAccess_aix_ppc.inline.hpp"
+#include "prefetch_aix_ppc.inline.hpp"
+
+// Contains inlined functions for class Thread and ThreadLocalStorage
+
+inline void ThreadLocalStorage::pd_invalidate_all() {} // nothing to do
+
+#endif // OS_AIX_VM_THREAD_AIX_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/vmError_aix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/os.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/vmError.hpp"
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <signal.h>
+
+void VMError::show_message_box(char *buf, int buflen) {
+  bool yes;
+  do {
+    error_string(buf, buflen);
+    int len = (int)strlen(buf);
+    char *p = &buf[len];
+
+    jio_snprintf(p, buflen - len,
+                 "\n\n"
+                 "Do you want to debug the problem?\n\n"
+                 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
+                 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
+                 "Otherwise, press RETURN to abort...",
+                 os::current_process_id(),
+                 os::current_thread_id(), thread_self());
+
+    yes = os::message_box("Unexpected Error", buf);
+
+    if (yes) {
+      // yes, user asked VM to launch debugger
+      jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
+
+      os::fork_and_exec(buf);
+      yes = false;
+    }
+  } while (yes);
+}
+
+// Handle all synchronous signals which may happen during signal handling,
+// not just SIGSEGV and SIGBUS.
+static const int SIGNALS[] = { SIGSEGV, SIGBUS, SIGILL, SIGFPE, SIGTRAP }; // add more if needed
+static const int NUM_SIGNALS = sizeof(SIGNALS) / sizeof(int);
+
+// Space for our "saved" signal flags and handlers
+static int resettedSigflags[NUM_SIGNALS];
+static address resettedSighandler[NUM_SIGNALS];
+
+static void save_signal(int idx, int sig) {
+  struct sigaction sa;
+  sigaction(sig, NULL, &sa);
+  resettedSigflags[idx]   = sa.sa_flags;
+  resettedSighandler[idx] = (sa.sa_flags & SA_SIGINFO)
+                              ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
+                              : CAST_FROM_FN_PTR(address, sa.sa_handler);
+}
+
+int VMError::get_resetted_sigflags(int sig) {
+  // Handle all program errors.
+  for (int i = 0; i < NUM_SIGNALS; i++) {
+    if (SIGNALS[i] == sig) {
+      return resettedSigflags[i];
+    }
+  }
+  return -1;
+}
+
+address VMError::get_resetted_sighandler(int sig) {
+  // Handle all program errors.
+  for (int i = 0; i < NUM_SIGNALS; i++) {
+    if (SIGNALS[i] == sig) {
+      return resettedSighandler[i];
+    }
+  }
+  return NULL;
+}
+
+static void crash_handler(int sig, siginfo_t* info, void* ucVoid) {
+  // Unmask current signal.
+  sigset_t newset;
+  sigemptyset(&newset);
+  sigaddset(&newset, sig);
+
+  Unimplemented();
+}
+
+void VMError::reset_signal_handlers() {
+  sigset_t newset;
+  sigemptyset(&newset);
+
+  for (int i = 0; i < NUM_SIGNALS; i++) {
+    save_signal(i, SIGNALS[i]);
+    os::signal(SIGNALS[i], CAST_FROM_FN_PTR(void *, crash_handler));
+    sigaddset(&newset, SIGNALS[i]);
+  }
+
+  sigthreadmask(SIG_UNBLOCK, &newset, NULL);
+}
--- a/src/os/bsd/dtrace/hotspot.d	Tue Mar 11 15:34:06 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hotspot {
-  probe class__loaded(char*, uintptr_t, void*, uintptr_t);
-  probe class__unloaded(char*, uintptr_t, void*, uintptr_t);
-  probe class__initialization__required(char*, uintptr_t, void*, intptr_t);
-  probe class__initialization__recursive(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__concurrent(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__erroneous(char*, uintptr_t, void*, intptr_t, int);
-  probe class__initialization__super__failed(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__clinit(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__error(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__end(char*, uintptr_t, void*, intptr_t,int);
-  probe vm__init__begin();
-  probe vm__init__end();
-  probe vm__shutdown();
-  probe vmops__request(char*, uintptr_t, int);
-  probe vmops__begin(char*, uintptr_t, int);
-  probe vmops__end(char*, uintptr_t, int);
-  probe gc__begin(uintptr_t);
-  probe gc__end();
-  probe mem__pool__gc__begin(
-    char*, uintptr_t, char*, uintptr_t, 
-    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe mem__pool__gc__end(
-    char*, uintptr_t, char*, uintptr_t, 
-    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__probe__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__probe__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__sleep__begin(long long);
-  probe thread__sleep__end(int);
-  probe thread__yield();
-  probe thread__park__begin(uintptr_t, int, long long);
-  probe thread__park__end(uintptr_t);
-  probe thread__unpark(uintptr_t);
-  probe method__compile__begin(
-    const char*, uintptr_t, const char*, uintptr_t, const char*, uintptr_t, const char*, uintptr_t); 
-  probe method__compile__end(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, 
-    char*, uintptr_t, uintptr_t); 
-  probe compiled__method__load(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, void*, uintptr_t);
-  probe compiled__method__unload(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
-  probe monitor__contended__enter(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__contended__entered(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__contended__exit(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__wait(uintptr_t, uintptr_t, char*, uintptr_t, uintptr_t);
-  probe monitor__probe__waited(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__notify(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__notifyAll(uintptr_t, uintptr_t, char*, uintptr_t);
-
-  probe object__alloc(int, char*, uintptr_t, uintptr_t);
-  probe method__entry(
-    int, char*, int, char*, int, char*, int);
-  probe method__return(
-    int, char*, int, char*, int, char*, int);
-};
-
-#pragma D attributes Evolving/Evolving/Common provider hotspot provider
-#pragma D attributes Private/Private/Unknown provider hotspot module
-#pragma D attributes Private/Private/Unknown provider hotspot function
-#pragma D attributes Evolving/Evolving/Common provider hotspot name
-#pragma D attributes Evolving/Evolving/Common provider hotspot args
--- a/src/os/bsd/dtrace/hotspot_jni.d	Tue Mar 11 15:34:06 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,506 +0,0 @@
-/*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hotspot_jni {
-  probe AllocObject__entry(void*, void*);
-  probe AllocObject__return(void*);
-  probe AttachCurrentThreadAsDaemon__entry(void*, void**, void*);
-  probe AttachCurrentThreadAsDaemon__return(uint32_t);
-  probe AttachCurrentThread__entry(void*, void**, void*);
-  probe AttachCurrentThread__return(uint32_t);
-  probe CallBooleanMethodA__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethodA__return(uintptr_t);
-  probe CallBooleanMethod__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethod__return(uintptr_t);
-  probe CallBooleanMethodV__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethodV__return(uintptr_t);
-  probe CallByteMethodA__entry(void*, void*, uintptr_t);
-  probe CallByteMethodA__return(char);
-  probe CallByteMethod__entry(void*, void*, uintptr_t);
-  probe CallByteMethod__return(char);
-  probe CallByteMethodV__entry(void*, void*, uintptr_t);
-  probe CallByteMethodV__return(char);
-  probe CallCharMethodA__entry(void*, void*, uintptr_t);
-  probe CallCharMethodA__return(uint16_t);
-  probe CallCharMethod__entry(void*, void*, uintptr_t);
-  probe CallCharMethod__return(uint16_t);
-  probe CallCharMethodV__entry(void*, void*, uintptr_t);
-  probe CallCharMethodV__return(uint16_t);
-  probe CallDoubleMethodA__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethodA__return();
-  probe CallDoubleMethod__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethod__return();
-  probe CallDoubleMethodV__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethodV__return();
-  probe CallFloatMethodA__entry(void*, void*, uintptr_t);
-  probe CallFloatMethodA__return();
-  probe CallFloatMethod__entry(void*, void*, uintptr_t);
-  probe CallFloatMethod__return();
-  probe CallFloatMethodV__entry(void*, void*, uintptr_t);
-  probe CallFloatMethodV__return();
-  probe CallIntMethodA__entry(void*, void*, uintptr_t);
-  probe CallIntMethodA__return(uint32_t);
-  probe CallIntMethod__entry(void*, void*, uintptr_t);
-  probe CallIntMethod__return(uint32_t);
-  probe CallIntMethodV__entry(void*, void*, uintptr_t);
-  probe CallIntMethodV__return(uint32_t);
-  probe CallLongMethodA__entry(void*, void*, uintptr_t);
-  probe CallLongMethodA__return(uintptr_t);
-  probe CallLongMethod__entry(void*, void*, uintptr_t);
-  probe CallLongMethod__return(uintptr_t);
-  probe CallLongMethodV__entry(void*, void*, uintptr_t);
-  probe CallLongMethodV__return(uintptr_t);
-  probe CallNonvirtualBooleanMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethodA__return(uintptr_t);
-  probe CallNonvirtualBooleanMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethod__return(uintptr_t);
-  probe CallNonvirtualBooleanMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethodV__return(uintptr_t);
-  probe CallNonvirtualByteMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethodA__return(char);
-  probe CallNonvirtualByteMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethod__return(char);
-  probe CallNonvirtualByteMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethodV__return(char);
-  probe CallNonvirtualCharMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethodA__return(uint16_t);
-  probe CallNonvirtualCharMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethod__return(uint16_t);
-  probe CallNonvirtualCharMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethodV__return(uint16_t);
-  probe CallNonvirtualDoubleMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethodA__return();
-  probe CallNonvirtualDoubleMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethod__return();
-  probe CallNonvirtualDoubleMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethodV__return();
-  probe CallNonvirtualFloatMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethodA__return();
-  probe CallNonvirtualFloatMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethod__return();
-  probe CallNonvirtualFloatMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethodV__return();
-  probe CallNonvirtualIntMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethodA__return(uint32_t);
-  probe CallNonvirtualIntMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethod__return(uint32_t);
-  probe CallNonvirtualIntMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethodV__return(uint32_t);
-  probe CallNonvirtualLongMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethodA__return(uintptr_t);
-  probe CallNonvirtualLongMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethod__return(uintptr_t);
-  probe CallNonvirtualLongMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethodV__return(uintptr_t);
-  probe CallNonvirtualObjectMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethodA__return(void*);
-  probe CallNonvirtualObjectMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethod__return(void*);
-  probe CallNonvirtualObjectMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethodV__return(void*);
-  probe CallNonvirtualShortMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethodA__return(uint16_t);
-  probe CallNonvirtualShortMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethod__return(uint16_t);
-  probe CallNonvirtualShortMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethodV__return(uint16_t);
-  probe CallNonvirtualVoidMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualVoidMethodA__return();
-  probe CallNonvirtualVoidMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualVoidMethod__return();
-  probe CallNonvirtualVoidMethodV__entry(void*, void*, void*, uintptr_t);  
-  probe CallNonvirtualVoidMethodV__return();
-  probe CallObjectMethodA__entry(void*, void*, uintptr_t);
-  probe CallObjectMethodA__return(void*);
-  probe CallObjectMethod__entry(void*, void*, uintptr_t);
-  probe CallObjectMethod__return(void*);
-  probe CallObjectMethodV__entry(void*, void*, uintptr_t);
-  probe CallObjectMethodV__return(void*);
-  probe CallShortMethodA__entry(void*, void*, uintptr_t);
-  probe CallShortMethodA__return(uint16_t);
-  probe CallShortMethod__entry(void*, void*, uintptr_t);
-  probe CallShortMethod__return(uint16_t);
-  probe CallShortMethodV__entry(void*, void*, uintptr_t);
-  probe CallShortMethodV__return(uint16_t);
-  probe CallStaticBooleanMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethodA__return(uintptr_t);
-  probe CallStaticBooleanMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethod__return(uintptr_t);
-  probe CallStaticBooleanMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethodV__return(uintptr_t);
-  probe CallStaticByteMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethodA__return(char);
-  probe CallStaticByteMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethod__return(char);
-  probe CallStaticByteMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethodV__return(char);
-  probe CallStaticCharMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethodA__return(uint16_t);
-  probe CallStaticCharMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethod__return(uint16_t);
-  probe CallStaticCharMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethodV__return(uint16_t);
-  probe CallStaticDoubleMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethodA__return();
-  probe CallStaticDoubleMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethod__return();
-  probe CallStaticDoubleMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethodV__return();
-  probe CallStaticFloatMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethodA__return();
-  probe CallStaticFloatMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethod__return();
-  probe CallStaticFloatMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethodV__return();
-  probe CallStaticIntMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethodA__return(uint32_t);
-  probe CallStaticIntMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethod__return(uint32_t);
-  probe CallStaticIntMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethodV__return(uint32_t);
-  probe CallStaticLongMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethodA__return(uintptr_t);
-  probe CallStaticLongMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethod__return(uintptr_t);
-  probe CallStaticLongMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethodV__return(uintptr_t);
-  probe CallStaticObjectMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethodA__return(void*);
-  probe CallStaticObjectMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethod__return(void*);
-  probe CallStaticObjectMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethodV__return(void*);
-  probe CallStaticShortMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethodA__return(uint16_t);
-  probe CallStaticShortMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethod__return(uint16_t);
-  probe CallStaticShortMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethodV__return(uint16_t);
-  probe CallStaticVoidMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticVoidMethodA__return();
-  probe CallStaticVoidMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticVoidMethod__return(); 
-  probe CallStaticVoidMethodV__entry(void*, void*, uintptr_t);  
-  probe CallStaticVoidMethodV__return();
-  probe CallVoidMethodA__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethodA__return();
-  probe CallVoidMethod__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethod__return(); 
-  probe CallVoidMethodV__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethodV__return();
-  probe CreateJavaVM__entry(void**, void**, void*);
-  probe CreateJavaVM__return(uint32_t);
-  probe DefineClass__entry(void*, const char*, void*, char*, uintptr_t);
-  probe DefineClass__return(void*);
-  probe DeleteGlobalRef__entry(void*, void*);
-  probe DeleteGlobalRef__return();
-  probe DeleteLocalRef__entry(void*, void*);
-  probe DeleteLocalRef__return();
-  probe DeleteWeakGlobalRef__entry(void*, void*);
-  probe DeleteWeakGlobalRef__return();
-  probe DestroyJavaVM__entry(void*);
-  probe DestroyJavaVM__return(uint32_t);
-  probe DetachCurrentThread__entry(void*);
-  probe DetachCurrentThread__return(uint32_t);
-  probe EnsureLocalCapacity__entry(void*, uint32_t);
-  probe EnsureLocalCapacity__return(uint32_t);
-  probe ExceptionCheck__entry(void*);
-  probe ExceptionCheck__return(uintptr_t);
-  probe ExceptionClear__entry(void*);
-  probe ExceptionClear__return();
-  probe ExceptionDescribe__entry(void*);  
-  probe ExceptionDescribe__return();
-  probe ExceptionOccurred__entry(void*);
-  probe ExceptionOccurred__return(void*);
-  probe FatalError__entry(void* env, const char*);
-  probe FindClass__entry(void*, const char*);
-  probe FindClass__return(void*);
-  probe FromReflectedField__entry(void*, void*);
-  probe FromReflectedField__return(uintptr_t);
-  probe FromReflectedMethod__entry(void*, void*);
-  probe FromReflectedMethod__return(uintptr_t);
-  probe GetArrayLength__entry(void*, void*);
-  probe GetArrayLength__return(uintptr_t);
-  probe GetBooleanArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetBooleanArrayElements__return(uintptr_t*);
-  probe GetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
-  probe GetBooleanArrayRegion__return();
-  probe GetBooleanField__entry(void*, void*, uintptr_t);
-  probe GetBooleanField__return(uintptr_t);
-  probe GetByteArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetByteArrayElements__return(char*);
-  probe GetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
-  probe GetByteArrayRegion__return();
-  probe GetByteField__entry(void*, void*, uintptr_t);
-  probe GetByteField__return(char);
-  probe GetCharArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetCharArrayElements__return(uint16_t*);
-  probe GetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetCharArrayRegion__return();
-  probe GetCharField__entry(void*, void*, uintptr_t);
-  probe GetCharField__return(uint16_t);
-  probe GetCreatedJavaVMs__entry(void**, uintptr_t, uintptr_t*);
-  probe GetCreatedJavaVMs__return(uintptr_t);
-  probe GetDefaultJavaVMInitArgs__entry(void*);
-  probe GetDefaultJavaVMInitArgs__return(uint32_t);
-  probe GetDirectBufferAddress__entry(void*, void*);
-  probe GetDirectBufferAddress__return(void*);
-  probe GetDirectBufferCapacity__entry(void*, void*);
-  probe GetDirectBufferCapacity__return(uintptr_t);
-  probe GetDoubleArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetDoubleArrayElements__return(double*);
-  probe GetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, double*);
-  probe GetDoubleArrayRegion__return();
-  probe GetDoubleField__entry(void*, void*, uintptr_t);
-  probe GetDoubleField__return();
-  probe GetEnv__entry(void*, void*, uint32_t);
-  probe GetEnv__return(uint32_t);
-  probe GetFieldID__entry(void*, void*, const char*, const char*);
-  probe GetFieldID__return(uintptr_t);
-  probe GetFloatArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetFloatArrayElements__return(float*);
-  probe GetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, float*);
-  probe GetFloatArrayRegion__return();
-  probe GetFloatField__entry(void*, void*, uintptr_t);
-  probe GetFloatField__return();
-  probe GetIntArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetIntArrayElements__return(uint32_t*);
-  probe GetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint32_t*);
-  probe GetIntArrayRegion__return();
-  probe GetIntField__entry(void*, void*, uintptr_t);
-  probe GetIntField__return(uint32_t);
-  probe GetJavaVM__entry(void*, void**);
-  probe GetJavaVM__return(uint32_t);
-  probe GetLongArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetLongArrayElements__return(uintptr_t*);
-  probe GetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
-  probe GetLongArrayRegion__return();
-  probe GetLongField__entry(void*, void*, uintptr_t);
-  probe GetLongField__return(uintptr_t);
-  probe GetMethodID__entry(void*, void*, const char*, const char*);
-  probe GetMethodID__return(uintptr_t);
-  probe GetObjectArrayElement__entry(void*, void*, uintptr_t);
-  probe GetObjectArrayElement__return(void*);
-  probe GetObjectClass__entry(void*, void*);
-  probe GetObjectClass__return(void*);
-  probe GetObjectField__entry(void*, void*, uintptr_t);
-  probe GetObjectField__return(void*);
-  probe GetObjectRefType__entry(void*, void*);
-  probe GetObjectRefType__return(void*);
-  probe GetPrimitiveArrayCritical__entry(void*, void*, uintptr_t*);
-  probe GetPrimitiveArrayCritical__return(void*);
-  probe GetShortArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetShortArrayElements__return(uint16_t*);
-  probe GetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetShortArrayRegion__return();
-  probe GetShortField__entry(void*, void*, uintptr_t);
-  probe GetShortField__return(uint16_t);
-  probe GetStaticBooleanField__entry(void*, void*, uintptr_t);
-  probe GetStaticBooleanField__return(uintptr_t);
-  probe GetStaticByteField__entry(void*, void*, uintptr_t);
-  probe GetStaticByteField__return(char);
-  probe GetStaticCharField__entry(void*, void*, uintptr_t);
-  probe GetStaticCharField__return(uint16_t);
-  probe GetStaticDoubleField__entry(void*, void*, uintptr_t);
-  probe GetStaticDoubleField__return();
-  probe GetStaticFieldID__entry(void*, void*, const char*, const char*);
-  probe GetStaticFieldID__return(uintptr_t);
-  probe GetStaticFloatField__entry(void*, void*, uintptr_t);
-  probe GetStaticFloatField__return();
-  probe GetStaticIntField__entry(void*, void*, uintptr_t);
-  probe GetStaticIntField__return(uint32_t);
-  probe GetStaticLongField__entry(void*, void*, uintptr_t);
-  probe GetStaticLongField__return(uintptr_t);
-  probe GetStaticMethodID__entry(void*, void*, const char*, const char*);
-  probe GetStaticMethodID__return(uintptr_t);
-  probe GetStaticObjectField__entry(void*, void*, uintptr_t);
-  probe GetStaticObjectField__return(void*);
-  probe GetStaticShortField__entry(void*, void*, uintptr_t);
-  probe GetStaticShortField__return(uint16_t);
-  probe GetStringChars__entry(void*, void*, uintptr_t*);
-  probe GetStringChars__return(const uint16_t*);
-  probe GetStringCritical__entry(void*, void*, uintptr_t*);
-  probe GetStringCritical__return(const uint16_t*);
-  probe GetStringLength__entry(void*, void*);
-  probe GetStringLength__return(uintptr_t);
-  probe GetStringRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetStringRegion__return();
-  probe GetStringUTFChars__entry(void*, void*, uintptr_t*);
-  probe GetStringUTFChars__return(const char*);
-  probe GetStringUTFLength__entry(void*, void*);
-  probe GetStringUTFLength__return(uintptr_t);
-  probe GetStringUTFRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
-  probe GetStringUTFRegion__return();
-  probe GetSuperclass__entry(void*, void*);
-  probe GetSuperclass__return(void*);
-  probe GetVersion__entry(void*);
-  probe GetVersion__return(uint32_t);
-  probe IsAssignableFrom__entry(void*, void*, void*);
-  probe IsAssignableFrom__return(uintptr_t);
-  probe IsInstanceOf__entry(void*, void*, void*);
-  probe IsInstanceOf__return(uintptr_t);
-  probe IsSameObject__entry(void*, void*, void*);
-  probe IsSameObject__return(uintptr_t);
-  probe MonitorEnter__entry(void*, void*);
-  probe MonitorEnter__return(uint32_t);
-  probe MonitorExit__entry(void*, void*);
-  probe MonitorExit__return(uint32_t);
-  probe NewBooleanArray__entry(void*, uintptr_t);
-  probe NewBooleanArray__return(void*);
-  probe NewByteArray__entry(void*, uintptr_t);
-  probe NewByteArray__return(void*);
-  probe NewCharArray__entry(void*, uintptr_t);
-  probe NewCharArray__return(void*);
-  probe NewDirectByteBuffer__entry(void*, void*, uintptr_t);
-  probe NewDirectByteBuffer__return(void*);
-  probe NewDoubleArray__entry(void*, uintptr_t);
-  probe NewDoubleArray__return(void*);
-  probe NewFloatArray__entry(void*, uintptr_t);
-  probe NewFloatArray__return(void*);
-  probe NewGlobalRef__entry(void*, void*);
-  probe NewGlobalRef__return(void*);
-  probe NewIntArray__entry(void*, uintptr_t);
-  probe NewIntArray__return(void*);
-  probe NewLocalRef__entry(void*, void*);
-  probe NewLocalRef__return(void*);
-  probe NewLongArray__entry(void*, uintptr_t);
-  probe NewLongArray__return(void*);
-  probe NewObjectA__entry(void*, void*, uintptr_t);  
-  probe NewObjectA__return(void*);
-  probe NewObjectArray__entry(void*, uintptr_t, void*, void*);
-  probe NewObjectArray__return(void*);
-  probe NewObject__entry(void*, void*, uintptr_t); 
-  probe NewObject__return(void*);
-  probe NewObjectV__entry(void*, void*, uintptr_t);  
-  probe NewObjectV__return(void*);
-  probe NewShortArray__entry(void*, uintptr_t);
-  probe NewShortArray__return(void*);
-  probe NewString__entry(void*, const uint16_t*, uintptr_t);
-  probe NewString__return(void*);
-  probe NewStringUTF__entry(void*, const char*);
-  probe NewStringUTF__return(void*);
-  probe NewWeakGlobalRef__entry(void*, void*);
-  probe NewWeakGlobalRef__return(void*);
-  probe PopLocalFrame__entry(void*, void*);
-  probe PopLocalFrame__return(void*);
-  probe PushLocalFrame__entry(void*, uint32_t);
-  probe PushLocalFrame__return(uint32_t);
-  probe RegisterNatives__entry(void*, void*, const void*, uint32_t);  
-  probe RegisterNatives__return(uint32_t);
-  probe ReleaseBooleanArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
-  probe ReleaseBooleanArrayElements__return();
-  probe ReleaseByteArrayElements__entry(void*, void*, char*, uint32_t);
-  probe ReleaseByteArrayElements__return();
-  probe ReleaseCharArrayElements__entry(void*, void*, uint16_t*, uint32_t);
-  probe ReleaseCharArrayElements__return();
-  probe ReleaseDoubleArrayElements__entry(void*, void*, double*, uint32_t);
-  probe ReleaseDoubleArrayElements__return();
-  probe ReleaseFloatArrayElements__entry(void*, void*, float*, uint32_t);
-  probe ReleaseFloatArrayElements__return();
-  probe ReleaseIntArrayElements__entry(void*, void*, uint32_t*, uint32_t);
-  probe ReleaseIntArrayElements__return();
-  probe ReleaseLongArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
-  probe ReleaseLongArrayElements__return();
-  probe ReleasePrimitiveArrayCritical__entry(void*, void*, void*, uint32_t);
-  probe ReleasePrimitiveArrayCritical__return();
-  probe ReleaseShortArrayElements__entry(void*, void*, uint16_t*, uint32_t);
-  probe ReleaseShortArrayElements__return();
-  probe ReleaseStringChars__entry(void*, void*, const uint16_t*);
-  probe ReleaseStringChars__return();
-  probe ReleaseStringCritical__entry(void*, void*, const uint16_t*);
-  probe ReleaseStringCritical__return();
-  probe ReleaseStringUTFChars__entry(void*, void*, const char*);
-  probe ReleaseStringUTFChars__return();
-  probe SetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
-  probe SetBooleanArrayRegion__return();
-  probe SetBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetBooleanField__return();
-  probe SetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const char*);
-  probe SetByteArrayRegion__return();
-  probe SetByteField__entry(void*, void*, uintptr_t, char);
-  probe SetByteField__return();
-  probe SetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
-  probe SetCharArrayRegion__return();
-  probe SetCharField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetCharField__return();
-  probe SetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const double*);
-  probe SetDoubleArrayRegion__return();
-  probe SetDoubleField__entry(void*, void*, uintptr_t);
-  probe SetDoubleField__return();
-  probe SetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const float*);
-  probe SetFloatArrayRegion__return();
-  probe SetFloatField__entry(void*, void*, uintptr_t);
-  probe SetFloatField__return();
-  probe SetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint32_t*);
-  probe SetIntArrayRegion__return();
-  probe SetIntField__entry(void*, void*, uintptr_t, uint32_t);
-  probe SetIntField__return();
-  probe SetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
-  probe SetLongArrayRegion__return();
-  probe SetLongField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetLongField__return();
-  probe SetObjectArrayElement__entry(void*, void*, uintptr_t, void*);
-  probe SetObjectArrayElement__return();
-  probe SetObjectField__entry(void*, void*, uintptr_t, void*);
-  probe SetObjectField__return();
-  probe SetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
-  probe SetShortArrayRegion__return();
-  probe SetShortField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetShortField__return();
-  probe SetStaticBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetStaticBooleanField__return();
-  probe SetStaticByteField__entry(void*, void*, uintptr_t, char);
-  probe SetStaticByteField__return();
-  probe SetStaticCharField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetStaticCharField__return();
-  probe SetStaticDoubleField__entry(void*, void*, uintptr_t);
-  probe SetStaticDoubleField__return();
-  probe SetStaticFloatField__entry(void*, void*, uintptr_t);
-  probe SetStaticFloatField__return();
-  probe SetStaticIntField__entry(void*, void*, uintptr_t, uint32_t);
-  probe SetStaticIntField__return();
-  probe SetStaticLongField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetStaticLongField__return();
-  probe SetStaticObjectField__entry(void*, void*, uintptr_t, void*);
-  probe SetStaticObjectField__return();
-  probe SetStaticShortField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetStaticShortField__return();
-  probe Throw__entry(void*, void*);
-  probe Throw__return(intptr_t);
-  probe ThrowNew__entry(void*, void*, const char*);  
-  probe ThrowNew__return(intptr_t);  
-  probe ToReflectedField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe ToReflectedField__return(void*);
-  probe ToReflectedMethod__entry(void*, void*, uintptr_t, uintptr_t);
-  probe ToReflectedMethod__return(void*);
-  probe UnregisterNatives__entry(void*, void*);  
-  probe UnregisterNatives__return(uint32_t);
-};
-
-#pragma D attributes Standard/Standard/Common provider hotspot_jni provider
-#pragma D attributes Private/Private/Unknown provider hotspot_jni module
-#pragma D attributes Private/Private/Unknown provider hotspot_jni function
-#pragma D attributes Standard/Standard/Common provider hotspot_jni name
-#pragma D attributes Evolving/Evolving/Common provider hotspot_jni args
-
--- a/src/os/bsd/dtrace/hs_private.d	Tue Mar 11 15:34:06 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hs_private {
-  probe hashtable__new_entry(void*, uint32_t, uintptr_t, void*); 
-  probe safepoint__begin();
-  probe safepoint__end();
-  probe cms__initmark__begin();
-  probe cms__initmark__end();
-  probe cms__remark__begin();
-  probe cms__remark__end();
-};
-
-#pragma D attributes Private/Private/Common provider hs_private provider
-#pragma D attributes Private/Private/Unknown provider hs_private module
-#pragma D attributes Private/Private/Unknown provider hs_private function
-#pragma D attributes Private/Private/Common provider hs_private name
-#pragma D attributes Private/Private/Common provider hs_private args
-
--- a/src/os/bsd/dtrace/jvm_dtrace.c	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/bsd/dtrace/jvm_dtrace.c	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os/bsd/vm/os_bsd.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/bsd/vm/os_bsd.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -219,7 +219,7 @@
 static char cpu_arch[] = "amd64";
 #elif defined(ARM)
 static char cpu_arch[] = "arm";
-#elif defined(PPC)
+#elif defined(PPC32)
 static char cpu_arch[] = "ppc";
 #elif defined(SPARC)
 #  ifdef _LP64
@@ -994,7 +994,7 @@
 
 
 jlong os::javaTimeNanos() {
-  if (Bsd::supports_monotonic_clock()) {
+  if (os::supports_monotonic_clock()) {
     struct timespec tp;
     int status = Bsd::clock_gettime(CLOCK_MONOTONIC, &tp);
     assert(status == 0, "gettime error");
@@ -1010,7 +1010,7 @@
 }
 
 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
-  if (Bsd::supports_monotonic_clock()) {
+  if (os::supports_monotonic_clock()) {
     info_ptr->max_value = ALL_64_BITS;
 
     // CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
@@ -1557,6 +1557,17 @@
 }
 #endif /* !__APPLE__ */
 
+void* os::get_default_process_handle() {
+#ifdef __APPLE__
+  // MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY
+  // to avoid finding unexpected symbols on second (or later)
+  // loads of a library.
+  return (void*)::dlopen(NULL, RTLD_FIRST);
+#else
+  return (void*)::dlopen(NULL, RTLD_LAZY);
+#endif
+}
+
 // XXX: Do we need a lock around this as per Linux?
 void* os::dll_lookup(void* handle, const char* name) {
   return dlsym(handle, name);
@@ -1666,58 +1677,12 @@
   st->cr();
 }
 
-// Taken from /usr/include/bits/siginfo.h  Supposed to be architecture specific
-// but they're the same for all the bsd arch that we support
-// and they're the same for solaris but there's no common place to put this.
-const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
-                          "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
-                          "ILL_COPROC", "ILL_BADSTK" };
-
-const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV",
-                          "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES",
-                          "FPE_FLTINV", "FPE_FLTSUB", "FPE_FLTDEN" };
-
-const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" };
-
-const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" };
-
 void os::print_siginfo(outputStream* st, void* siginfo) {
-  st->print("siginfo:");
-
-  const int buflen = 100;
-  char buf[buflen];
-  siginfo_t *si = (siginfo_t*)siginfo;
-  st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen));
-  if (si->si_errno != 0 && strerror_r(si->si_errno, buf, buflen) == 0) {
-    st->print("si_errno=%s", buf);
-  } else {
-    st->print("si_errno=%d", si->si_errno);
-  }
-  const int c = si->si_code;
-  assert(c > 0, "unexpected si_code");
-  switch (si->si_signo) {
-  case SIGILL:
-    st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  case SIGFPE:
-    st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  case SIGSEGV:
-    st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  case SIGBUS:
-    st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  default:
-    st->print(", si_code=%d", si->si_code);
-    // no si_addr
-  }
-
-  if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
+  const siginfo_t* si = (const siginfo_t*)siginfo;
+
+  os::Posix::print_siginfo_brief(st, si);
+
+  if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
       UseSharedSpaces) {
     FileMapInfo* mapinfo = FileMapInfo::current_info();
     if (mapinfo->is_in_shared_space(si->si_addr)) {
@@ -1777,12 +1742,14 @@
   if (rp == NULL)
     return;
 
-  if (Arguments::created_by_gamma_launcher()) {
-    // Support for the gamma launcher.  Typical value for buf is
-    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm".  If "/jre/lib/" appears at
-    // the right place in the string, then assume we are installed in a JDK and
-    // we're done.  Otherwise, check for a JAVA_HOME environment variable and
-    // construct a path to the JVM being overridden.
+  if (Arguments::sun_java_launcher_is_altjvm()) {
+    // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
+    // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so"
+    // or "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.dylib". If "/jre/lib/"
+    // appears at the right place in the string, then assume we are
+    // installed in a JDK and we're done. Otherwise, check for a
+    // JAVA_HOME environment variable and construct a path to the JVM
+    // being overridden.
 
     const char *p = buf + strlen(buf) - 1;
     for (int count = 0; p > buf && count < 5; ++count) {
@@ -1821,7 +1788,7 @@
         jrelib_p = buf + len;
         snprintf(jrelib_p, buflen-len, "/%s", COMPILER_VARIANT);
         if (0 != access(buf, F_OK)) {
-          snprintf(jrelib_p, buflen-len, "");
+          snprintf(jrelib_p, buflen-len, "%s", "");
         }
 
         // If the path exists within JAVA_HOME, add the JVM library name
@@ -2546,88 +2513,21 @@
   RESTARTABLE_RETURN_INT(::read(fd, buf, nBytes));
 }
 
-// TODO-FIXME: reconcile Solaris' os::sleep with the bsd variation.
-// Solaris uses poll(), bsd uses park().
-// Poll() is likely a better choice, assuming that Thread.interrupt()
-// generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
-// SIGSEGV, see 4355769.
-
-int os::sleep(Thread* thread, jlong millis, bool interruptible) {
-  assert(thread == Thread::current(),  "thread consistency check");
-
-  ParkEvent * const slp = thread->_SleepEvent ;
-  slp->reset() ;
-  OrderAccess::fence() ;
-
-  if (interruptible) {
-    jlong prevtime = javaTimeNanos();
-
-    for (;;) {
-      if (os::is_interrupted(thread, true)) {
-        return OS_INTRPT;
-      }
-
-      jlong newtime = javaTimeNanos();
-
-      if (newtime - prevtime < 0) {
-        // time moving backwards, should only happen if no monotonic clock
-        // not a guarantee() because JVM should not abort on kernel/glibc bugs
-        assert(!Bsd::supports_monotonic_clock(), "time moving backwards");
-      } else {
-        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
-      }
-
-      if(millis <= 0) {
-        return OS_OK;
-      }
-
-      prevtime = newtime;
-
-      {
-        assert(thread->is_Java_thread(), "sanity check");
-        JavaThread *jt = (JavaThread *) thread;
-        ThreadBlockInVM tbivm(jt);
-        OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
-
-        jt->set_suspend_equivalent();
-        // cleared by handle_special_suspend_equivalent_condition() or
-        // java_suspend_self() via check_and_wait_while_suspended()
-
-        slp->park(millis);
-
-        // were we externally suspended while we were waiting?
-        jt->check_and_wait_while_suspended();
-      }
-    }
-  } else {
-    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
-    jlong prevtime = javaTimeNanos();
-
-    for (;;) {
-      // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
-      // the 1st iteration ...
-      jlong newtime = javaTimeNanos();
-
-      if (newtime - prevtime < 0) {
-        // time moving backwards, should only happen if no monotonic clock
-        // not a guarantee() because JVM should not abort on kernel/glibc bugs
-        assert(!Bsd::supports_monotonic_clock(), "time moving backwards");
-      } else {
-        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
-      }
-
-      if(millis <= 0) break ;
-
-      prevtime = newtime;
-      slp->park(millis);
-    }
-    return OS_OK ;
+void os::naked_short_sleep(jlong ms) {
+  struct timespec req;
+
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  req.tv_sec = 0;
+  if (ms > 0) {
+    req.tv_nsec = (ms % 1000) * 1000000;
   }
-}
-
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os::sleep(Thread::current(), 1, false);
+  else {
+    req.tv_nsec = 1;
+  }
+
+  nanosleep(&req, NULL);
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
@@ -3008,50 +2908,6 @@
   guarantee(osthread->sr.is_running(), "Must be running!");
 }
 
-////////////////////////////////////////////////////////////////////////////////
-// interrupt support
-
-void os::interrupt(Thread* thread) {
-  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
-    "possibility of dangling Thread pointer");
-
-  OSThread* osthread = thread->osthread();
-
-  if (!osthread->interrupted()) {
-    osthread->set_interrupted(true);
-    // More than one thread can get here with the same value of osthread,
-    // resulting in multiple notifications.  We do, however, want the store
-    // to interrupted() to be visible to other threads before we execute unpark().
-    OrderAccess::fence();
-    ParkEvent * const slp = thread->_SleepEvent ;
-    if (slp != NULL) slp->unpark() ;
-  }
-
-  // For JSR166. Unpark even if interrupt status already was set
-  if (thread->is_Java_thread())
-    ((JavaThread*)thread)->parker()->unpark();
-
-  ParkEvent * ev = thread->_ParkEvent ;
-  if (ev != NULL) ev->unpark() ;
-
-}
-
-bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
-  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
-    "possibility of dangling Thread pointer");
-
-  OSThread* osthread = thread->osthread();
-
-  bool interrupted = osthread->interrupted();
-
-  if (interrupted && clear_interrupted) {
-    osthread->set_interrupted(false);
-    // consider thread->_SleepEvent->reset() ... optional optimization
-  }
-
-  return interrupted;
-}
-
 ///////////////////////////////////////////////////////////////////////////////////
 // signal handling (except suspend/resume)
 
@@ -3392,7 +3248,8 @@
     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
   }
 
-  st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask);
+  st->print(", sa_mask[0]=");
+  os::Posix::print_signal_set_short(st, &sa.sa_mask);
 
   address rh = VMError::get_resetted_sighandler(sig);
   // May be, handler was resetted by VMError?
@@ -3401,7 +3258,8 @@
     sa.sa_flags = VMError::get_resetted_sigflags(sig) & SIGNIFICANT_SIGNAL_MASK;
   }
 
-  st->print(", sa_flags="   PTR32_FORMAT, sa.sa_flags);
+  st->print(", sa_flags=");
+  os::Posix::print_sa_flags(st, sa.sa_flags);
 
   // Check: is it our handler?
   if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
--- a/src/os/bsd/vm/os_bsd.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/bsd/vm/os_bsd.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,9 @@
 
 // Bsd_OS defines the interface to Bsd operating systems
 
+// Information about the protection of the page at address '0' on this os.
+static bool zero_page_read_protected() { return true; }
+
 /* pthread_getattr_np comes with BsdThreads-0.9-7 on RedHat 7.1 */
 typedef int (*pthread_getattr_func_type) (pthread_t, pthread_attr_t *);
 
@@ -131,10 +134,6 @@
   // Real-time clock functions
   static void clock_init(void);
 
-  static inline bool supports_monotonic_clock() {
-    return _clock_gettime != NULL;
-  }
-
   static int clock_gettime(clockid_t clock_id, struct timespec *tp) {
     return _clock_gettime ? _clock_gettime(clock_id, tp) : -1;
   }
--- a/src/os/bsd/vm/os_bsd.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/bsd/vm/os_bsd.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -286,4 +286,8 @@
   return ::setsockopt(fd, level, optname, optval, optlen);
 }
 
+inline bool os::supports_monotonic_clock() {
+  return Bsd::_clock_gettime != NULL;
+}
+
 #endif // OS_BSD_VM_OS_BSD_INLINE_HPP
--- a/src/os/linux/vm/decoder_linux.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/linux/vm/decoder_linux.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -32,6 +32,12 @@
   char* result;
   size_t size = (size_t)buflen;
 
+#ifdef PPC64
+  // On PPC64 ElfDecoder::decode() may return a dot (.) prefixed name
+  // (see elfFuncDescTable.hpp for details)
+  if (symbol && *symbol == '.') symbol += 1;
+#endif
+
   // Don't pass buf to __cxa_demangle. In case of the 'buf' is too small,
   // __cxa_demangle will call system "realloc" for additional memory, which
   // may use different malloc/realloc mechanism that allocates 'buf'.
--- a/src/os/linux/vm/os_linux.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/linux/vm/os_linux.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -139,7 +139,7 @@
 
 // For diagnostics to print a message once. see run_periodic_checks
 static sigset_t check_signal_done;
-static bool check_signals = true;;
+static bool check_signals = true;
 
 static pid_t _initial_pid = 0;
 
@@ -257,8 +257,10 @@
 static char cpu_arch[] = "amd64";
 #elif defined(ARM)
 static char cpu_arch[] = "arm";
-#elif defined(PPC)
+#elif defined(PPC32)
 static char cpu_arch[] = "ppc";
+#elif defined(PPC64)
+static char cpu_arch[] = "ppc64";
 #elif defined(SPARC)
 #  ifdef _LP64
 static char cpu_arch[] = "sparcv9";
@@ -530,6 +532,9 @@
   sigaddset(&unblocked_sigs, SIGSEGV);
   sigaddset(&unblocked_sigs, SIGBUS);
   sigaddset(&unblocked_sigs, SIGFPE);
+#if defined(PPC64)
+  sigaddset(&unblocked_sigs, SIGTRAP);
+#endif
   sigaddset(&unblocked_sigs, SR_signum);
 
   if (!ReduceSignalUsage) {
@@ -1451,7 +1456,7 @@
 }
 
 jlong os::javaTimeNanos() {
-  if (Linux::supports_monotonic_clock()) {
+  if (os::supports_monotonic_clock()) {
     struct timespec tp;
     int status = Linux::clock_gettime(CLOCK_MONOTONIC, &tp);
     assert(status == 0, "gettime error");
@@ -1467,7 +1472,7 @@
 }
 
 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
-  if (Linux::supports_monotonic_clock()) {
+  if (os::supports_monotonic_clock()) {
     info_ptr->max_value = ALL_64_BITS;
 
     // CLOCK_MONOTONIC - amount of time since some arbitrary point in the past
@@ -2104,6 +2109,9 @@
   return res;
 }
 
+void* os::get_default_process_handle() {
+  return (void*)::dlopen(NULL, RTLD_LAZY);
+}
 
 static bool _print_ascii_file(const char* filename, outputStream* st) {
   int fd = ::open(filename, O_RDONLY);
@@ -2257,58 +2265,12 @@
   st->cr();
 }
 
-// Taken from /usr/include/bits/siginfo.h  Supposed to be architecture specific
-// but they're the same for all the linux arch that we support
-// and they're the same for solaris but there's no common place to put this.
-const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
-                          "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
-                          "ILL_COPROC", "ILL_BADSTK" };
-
-const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV",
-                          "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES",
-                          "FPE_FLTINV", "FPE_FLTSUB", "FPE_FLTDEN" };
-
-const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" };
-
-const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" };
-
 void os::print_siginfo(outputStream* st, void* siginfo) {
-  st->print("siginfo:");
-
-  const int buflen = 100;
-  char buf[buflen];
-  siginfo_t *si = (siginfo_t*)siginfo;
-  st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen));
-  if (si->si_errno != 0 && strerror_r(si->si_errno, buf, buflen) == 0) {
-    st->print("si_errno=%s", buf);
-  } else {
-    st->print("si_errno=%d", si->si_errno);
-  }
-  const int c = si->si_code;
-  assert(c > 0, "unexpected si_code");
-  switch (si->si_signo) {
-  case SIGILL:
-    st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  case SIGFPE:
-    st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  case SIGSEGV:
-    st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  case SIGBUS:
-    st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  default:
-    st->print(", si_code=%d", si->si_code);
-    // no si_addr
-  }
-
-  if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
+  const siginfo_t* si = (const siginfo_t*)siginfo;
+
+  os::Posix::print_siginfo_brief(st, si);
+
+  if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
       UseSharedSpaces) {
     FileMapInfo* mapinfo = FileMapInfo::current_info();
     if (mapinfo->is_in_shared_space(si->si_addr)) {
@@ -2338,6 +2300,9 @@
   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
+#if defined(PPC64)
+  print_signal_handler(st, SIGTRAP, buf, buflen);
+#endif
 }
 
 static char saved_jvm_path[MAXPATHLEN] = {0};
@@ -2368,13 +2333,14 @@
   if (rp == NULL)
     return;
 
-  if (Arguments::created_by_gamma_launcher()) {
-    // Support for the gamma launcher.  Typical value for buf is
-    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
-    // the right place in the string, then assume we are installed in a JDK and
-    // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
-    // up the path so it looks like libjvm.so is installed there (append a
-    // fake suffix hotspot/libjvm.so).
+  if (Arguments::sun_java_launcher_is_altjvm()) {
+    // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
+    // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
+    // If "/jre/lib/" appears at the right place in the string, then
+    // assume we are installed in a JDK and we're done. Otherwise, check
+    // for a JAVA_HOME environment variable and fix up the path so it
+    // looks like libjvm.so is installed there (append a fake suffix
+    // hotspot/libjvm.so).
     const char *p = buf + strlen(buf) - 1;
     for (int count = 0; p > buf && count < 5; ++count) {
       for (--p; p > buf && *p != '/'; --p)
@@ -2996,7 +2962,9 @@
 
   unsigned char vec[1];
   unsigned imin = 1, imax = pages + 1, imid;
-  int mincore_return_value;
+  int mincore_return_value = 0;
+
+  assert(imin <= imax, "Unexpected page size");
 
   while (imin < imax) {
     imid = (imax + imin) / 2;
@@ -3789,88 +3757,33 @@
   return ::read(fd, buf, nBytes);
 }
 
-// TODO-FIXME: reconcile Solaris' os::sleep with the linux variation.
-// Solaris uses poll(), linux uses park().
-// Poll() is likely a better choice, assuming that Thread.interrupt()
-// generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
-// SIGSEGV, see 4355769.
-
-int os::sleep(Thread* thread, jlong millis, bool interruptible) {
-  assert(thread == Thread::current(),  "thread consistency check");
-
-  ParkEvent * const slp = thread->_SleepEvent ;
-  slp->reset() ;
-  OrderAccess::fence() ;
-
-  if (interruptible) {
-    jlong prevtime = javaTimeNanos();
-
-    for (;;) {
-      if (os::is_interrupted(thread, true)) {
-        return OS_INTRPT;
-      }
-
-      jlong newtime = javaTimeNanos();
-
-      if (newtime - prevtime < 0) {
-        // time moving backwards, should only happen if no monotonic clock
-        // not a guarantee() because JVM should not abort on kernel/glibc bugs
-        assert(!Linux::supports_monotonic_clock(), "time moving backwards");
-      } else {
-        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
-      }
-
-      if(millis <= 0) {
-        return OS_OK;
-      }
-
-      prevtime = newtime;
-
-      {
-        assert(thread->is_Java_thread(), "sanity check");
-        JavaThread *jt = (JavaThread *) thread;
-        ThreadBlockInVM tbivm(jt);
-        OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
-
-        jt->set_suspend_equivalent();
-        // cleared by handle_special_suspend_equivalent_condition() or
-        // java_suspend_self() via check_and_wait_while_suspended()
-
-        slp->park(millis);
-
-        // were we externally suspended while we were waiting?
-        jt->check_and_wait_while_suspended();
-      }
-    }
-  } else {
-    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
-    jlong prevtime = javaTimeNanos();
-
-    for (;;) {
-      // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
-      // the 1st iteration ...
-      jlong newtime = javaTimeNanos();
-
-      if (newtime - prevtime < 0) {
-        // time moving backwards, should only happen if no monotonic clock
-        // not a guarantee() because JVM should not abort on kernel/glibc bugs
-        assert(!Linux::supports_monotonic_clock(), "time moving backwards");
-      } else {
-        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
-      }
-
-      if(millis <= 0) break ;
-
-      prevtime = newtime;
-      slp->park(millis);
-    }
-    return OS_OK ;
-  }
-}
-
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os::sleep(Thread::current(), 1, false);
+//
+// Short sleep, direct OS call.
+//
+// Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
+// sched_yield(2) will actually give up the CPU:
+//
+//   * Alone on this pariticular CPU, keeps running.
+//   * Before the introduction of "skip_buddy" with "compat_yield" disabled
+//     (pre 2.6.39).
+//
+// So calling this with 0 is an alternative.
+//
+void os::naked_short_sleep(jlong ms) {
+  struct timespec req;
+
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  req.tv_sec = 0;
+  if (ms > 0) {
+    req.tv_nsec = (ms % 1000) * 1000000;
+  }
+  else {
+    req.tv_nsec = 1;
+  }
+
+  nanosleep(&req, NULL);
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
@@ -4196,50 +4109,6 @@
   guarantee(osthread->sr.is_running(), "Must be running!");
 }
 
-////////////////////////////////////////////////////////////////////////////////
-// interrupt support
-
-void os::interrupt(Thread* thread) {
-  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
-    "possibility of dangling Thread pointer");
-
-  OSThread* osthread = thread->osthread();
-
-  if (!osthread->interrupted()) {
-    osthread->set_interrupted(true);
-    // More than one thread can get here with the same value of osthread,
-    // resulting in multiple notifications.  We do, however, want the store
-    // to interrupted() to be visible to other threads before we execute unpark().
-    OrderAccess::fence();
-    ParkEvent * const slp = thread->_SleepEvent ;
-    if (slp != NULL) slp->unpark() ;
-  }
-
-  // For JSR166. Unpark even if interrupt status already was set
-  if (thread->is_Java_thread())
-    ((JavaThread*)thread)->parker()->unpark();
-
-  ParkEvent * ev = thread->_ParkEvent ;
-  if (ev != NULL) ev->unpark() ;
-
-}
-
-bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
-  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
-    "possibility of dangling Thread pointer");
-
-  OSThread* osthread = thread->osthread();
-
-  bool interrupted = osthread->interrupted();
-
-  if (interrupted && clear_interrupted) {
-    osthread->set_interrupted(false);
-    // consider thread->_SleepEvent->reset() ... optional optimization
-  }
-
-  return interrupted;
-}
-
 ///////////////////////////////////////////////////////////////////////////////////
 // signal handling (except suspend/resume)
 
@@ -4467,6 +4336,9 @@
     set_signal_handler(SIGBUS, true);
     set_signal_handler(SIGILL, true);
     set_signal_handler(SIGFPE, true);
+#if defined(PPC64)
+    set_signal_handler(SIGTRAP, true);
+#endif
     set_signal_handler(SIGXFSZ, true);
 
     if (libjsig_is_loaded) {
@@ -4560,7 +4432,8 @@
     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
   }
 
-  st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask);
+  st->print(", sa_mask[0]=");
+  os::Posix::print_signal_set_short(st, &sa.sa_mask);
 
   address rh = VMError::get_resetted_sighandler(sig);
   // May be, handler was resetted by VMError?
@@ -4569,7 +4442,8 @@
     sa.sa_flags = VMError::get_resetted_sigflags(sig) & SIGNIFICANT_SIGNAL_MASK;
   }
 
-  st->print(", sa_flags="   PTR32_FORMAT, sa.sa_flags);
+  st->print(", sa_flags=");
+  os::Posix::print_sa_flags(st, sa.sa_flags);
 
   // Check: is it our handler?
   if(handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler) ||
@@ -4607,7 +4481,9 @@
   DO_SIGNAL_CHECK(SIGBUS);
   DO_SIGNAL_CHECK(SIGPIPE);
   DO_SIGNAL_CHECK(SIGXFSZ);
-
+#if defined(PPC64)
+  DO_SIGNAL_CHECK(SIGTRAP);
+#endif
 
   // ReduceSignalUsage allows the user to override these handlers
   // see comments at the very top and jvm_solaris.h
@@ -4757,7 +4633,7 @@
     fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
   }
   // Only set the clock if CLOCK_MONOTONIC is available
-  if (Linux::supports_monotonic_clock()) {
+  if (os::supports_monotonic_clock()) {
     if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
       if (status == EINVAL) {
         warning("Unable to use monotonic clock with relative timed-waits" \
@@ -4929,7 +4805,7 @@
     // the future if the appropriate cleanup code can be added to the
     // VM_Exit VMOperation's doit method.
     if (atexit(perfMemory_exit_helper) != 0) {
-      warning("os::init2 atexit(perfMemory_exit_helper) failed");
+      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
     }
   }
 
@@ -4940,8 +4816,7 @@
 }
 
 // this is called at the end of vm_initialization
-void os::init_3(void)
-{
+void os::init_3(void) {
 #ifdef JAVASE_EMBEDDED
   // Start the MemNotifyThread
   if (LowMemoryProtection) {
@@ -5588,7 +5463,7 @@
     seconds = 50000000;
   }
 
-  if (os::Linux::supports_monotonic_clock()) {
+  if (os::supports_monotonic_clock()) {
     struct timespec now;
     int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
     assert_status(status == 0, status, "clock_gettime");
@@ -5805,7 +5680,7 @@
   assert (time > 0, "convertTime");
   time_t max_secs = 0;
 
-  if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
+  if (!os::supports_monotonic_clock() || isAbsolute) {
     struct timeval now;
     int status = gettimeofday(&now, NULL);
     assert(status == 0, "gettimeofday");
--- a/src/os/linux/vm/os_linux.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/linux/vm/os_linux.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,9 @@
 /* pthread_getattr_np comes with LinuxThreads-0.9-7 on RedHat 7.1 */
 typedef int (*pthread_getattr_func_type) (pthread_t, pthread_attr_t *);
 
+// Information about the protection of the page at address '0' on this os.
+static bool zero_page_read_protected() { return true; }
+
 class Linux {
   friend class os;
   friend class TestReserveMemorySpecial;
@@ -203,10 +206,6 @@
   // fast POSIX clocks support
   static void fast_thread_clock_init(void);
 
-  static inline bool supports_monotonic_clock() {
-    return _clock_gettime != NULL;
-  }
-
   static int clock_gettime(clockid_t clock_id, struct timespec *tp) {
     return _clock_gettime ? _clock_gettime(clock_id, tp) : -1;
   }
--- a/src/os/linux/vm/os_linux.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/linux/vm/os_linux.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -288,4 +288,8 @@
   return ::setsockopt(fd, level, optname, optval, optlen);
 }
 
+inline bool os::supports_monotonic_clock() {
+  return Linux::_clock_gettime != NULL;
+}
+
 #endif // OS_LINUX_VM_OS_LINUX_INLINE_HPP
--- a/src/os/linux/vm/perfMemory_linux.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/linux/vm/perfMemory_linux.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -891,8 +891,16 @@
   FREE_C_HEAP_ARRAY(char, filename, mtInternal);
 
   // open the shared memory file for the give vmid
-  fd = open_sharedmem_file(rfilename, file_flags, CHECK);
-  assert(fd != OS_ERR, "unexpected value");
+  fd = open_sharedmem_file(rfilename, file_flags, THREAD);
+
+  if (fd == OS_ERR) {
+    return;
+  }
+
+  if (HAS_PENDING_EXCEPTION) {
+    ::close(fd);
+    return;
+  }
 
   if (*sizep == 0) {
     size = sharedmem_filesize(fd, CHECK);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/posix/dtrace/hotspot.d	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *  
+ */
+
+provider hotspot {
+  probe class__loaded(char*, uintptr_t, void*, uintptr_t);
+  probe class__unloaded(char*, uintptr_t, void*, uintptr_t);
+  probe class__initialization__required(char*, uintptr_t, void*, intptr_t);
+  probe class__initialization__recursive(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__concurrent(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__erroneous(char*, uintptr_t, void*, intptr_t, int);
+  probe class__initialization__super__failed(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__clinit(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__error(char*, uintptr_t, void*, intptr_t,int);
+  probe class__initialization__end(char*, uintptr_t, void*, intptr_t,int);
+  probe vm__init__begin();
+  probe vm__init__end();
+  probe vm__shutdown();
+  probe vmops__request(char*, uintptr_t, int);
+  probe vmops__begin(char*, uintptr_t, int);
+  probe vmops__end(char*, uintptr_t, int);
+  probe gc__begin(uintptr_t);
+  probe gc__end();
+  probe mem__pool__gc__begin(
+    char*, uintptr_t, char*, uintptr_t, 
+    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+  probe mem__pool__gc__end(
+    char*, uintptr_t, char*, uintptr_t, 
+    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+  probe thread__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+  probe thread__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+  probe thread__sleep__begin(long long);
+  probe thread__sleep__end(int);
+  probe thread__yield();
+  probe thread__park__begin(uintptr_t, int, long long);
+  probe thread__park__end(uintptr_t);
+  probe thread__unpark(uintptr_t);
+  probe method__compile__begin(
+    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
+  probe method__compile__end(
+    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, 
+    char*, uintptr_t, uintptr_t); 
+  probe compiled__method__load(
+    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, void*, uintptr_t);
+  probe compiled__method__unload(
+    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
+  probe monitor__contended__enter(uintptr_t, uintptr_t, char*, uintptr_t);
+  probe monitor__contended__entered(uintptr_t, uintptr_t, char*, uintptr_t);
+  probe monitor__contended__exit(uintptr_t, uintptr_t, char*, uintptr_t);
+  probe monitor__wait(uintptr_t, uintptr_t, char*, uintptr_t, uintptr_t);
+  probe monitor__waited(uintptr_t, uintptr_t, char*, uintptr_t);
+  probe monitor__notify(uintptr_t, uintptr_t, char*, uintptr_t);
+  probe monitor__notifyAll(uintptr_t, uintptr_t, char*, uintptr_t);
+
+  probe object__alloc(int, char*, uintptr_t, uintptr_t);
+  probe method__entry(
+    int, char*, int, char*, int, char*, int);
+  probe method__return(
+    int, char*, int, char*, int, char*, int);
+};
+
+#pragma D attributes Evolving/Evolving/Common provider hotspot provider
+#pragma D attributes Private/Private/Unknown provider hotspot module
+#pragma D attributes Private/Private/Unknown provider hotspot function
+#pragma D attributes Evolving/Evolving/Common provider hotspot name
+#pragma D attributes Evolving/Evolving/Common provider hotspot args
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/posix/dtrace/hotspot_jni.d	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,506 @@
+/*
+ * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *  
+ */
+
+provider hotspot_jni {
+  probe AllocObject__entry(void*, void*);
+  probe AllocObject__return(void*);
+  probe AttachCurrentThreadAsDaemon__entry(void*, void**, void*);
+  probe AttachCurrentThreadAsDaemon__return(uint32_t);
+  probe AttachCurrentThread__entry(void*, void**, void*);
+  probe AttachCurrentThread__return(uint32_t);
+  probe CallBooleanMethodA__entry(void*, void*, uintptr_t);
+  probe CallBooleanMethodA__return(uintptr_t);
+  probe CallBooleanMethod__entry(void*, void*, uintptr_t);
+  probe CallBooleanMethod__return(uintptr_t);
+  probe CallBooleanMethodV__entry(void*, void*, uintptr_t);
+  probe CallBooleanMethodV__return(uintptr_t);
+  probe CallByteMethodA__entry(void*, void*, uintptr_t);
+  probe CallByteMethodA__return(char);
+  probe CallByteMethod__entry(void*, void*, uintptr_t);
+  probe CallByteMethod__return(char);
+  probe CallByteMethodV__entry(void*, void*, uintptr_t);
+  probe CallByteMethodV__return(char);
+  probe CallCharMethodA__entry(void*, void*, uintptr_t);
+  probe CallCharMethodA__return(uint16_t);
+  probe CallCharMethod__entry(void*, void*, uintptr_t);
+  probe CallCharMethod__return(uint16_t);
+  probe CallCharMethodV__entry(void*, void*, uintptr_t);
+  probe CallCharMethodV__return(uint16_t);
+  probe CallDoubleMethodA__entry(void*, void*, uintptr_t);
+  probe CallDoubleMethodA__return();
+  probe CallDoubleMethod__entry(void*, void*, uintptr_t);
+  probe CallDoubleMethod__return();
+  probe CallDoubleMethodV__entry(void*, void*, uintptr_t);
+  probe CallDoubleMethodV__return();
+  probe CallFloatMethodA__entry(void*, void*, uintptr_t);
+  probe CallFloatMethodA__return();
+  probe CallFloatMethod__entry(void*, void*, uintptr_t);
+  probe CallFloatMethod__return();
+  probe CallFloatMethodV__entry(void*, void*, uintptr_t);
+  probe CallFloatMethodV__return();
+  probe CallIntMethodA__entry(void*, void*, uintptr_t);
+  probe CallIntMethodA__return(uint32_t);
+  probe CallIntMethod__entry(void*, void*, uintptr_t);
+  probe CallIntMethod__return(uint32_t);
+  probe CallIntMethodV__entry(void*, void*, uintptr_t);
+  probe CallIntMethodV__return(uint32_t);
+  probe CallLongMethodA__entry(void*, void*, uintptr_t);
+  probe CallLongMethodA__return(uintptr_t);
+  probe CallLongMethod__entry(void*, void*, uintptr_t);
+  probe CallLongMethod__return(uintptr_t);
+  probe CallLongMethodV__entry(void*, void*, uintptr_t);
+  probe CallLongMethodV__return(uintptr_t);
+  probe CallNonvirtualBooleanMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualBooleanMethodA__return(uintptr_t);
+  probe CallNonvirtualBooleanMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualBooleanMethod__return(uintptr_t);
+  probe CallNonvirtualBooleanMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualBooleanMethodV__return(uintptr_t);
+  probe CallNonvirtualByteMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualByteMethodA__return(char);
+  probe CallNonvirtualByteMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualByteMethod__return(char);
+  probe CallNonvirtualByteMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualByteMethodV__return(char);
+  probe CallNonvirtualCharMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualCharMethodA__return(uint16_t);
+  probe CallNonvirtualCharMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualCharMethod__return(uint16_t);
+  probe CallNonvirtualCharMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualCharMethodV__return(uint16_t);
+  probe CallNonvirtualDoubleMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualDoubleMethodA__return();
+  probe CallNonvirtualDoubleMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualDoubleMethod__return();
+  probe CallNonvirtualDoubleMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualDoubleMethodV__return();
+  probe CallNonvirtualFloatMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualFloatMethodA__return();
+  probe CallNonvirtualFloatMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualFloatMethod__return();
+  probe CallNonvirtualFloatMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualFloatMethodV__return();
+  probe CallNonvirtualIntMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualIntMethodA__return(uint32_t);
+  probe CallNonvirtualIntMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualIntMethod__return(uint32_t);
+  probe CallNonvirtualIntMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualIntMethodV__return(uint32_t);
+  probe CallNonvirtualLongMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualLongMethodA__return(uintptr_t);
+  probe CallNonvirtualLongMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualLongMethod__return(uintptr_t);
+  probe CallNonvirtualLongMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualLongMethodV__return(uintptr_t);
+  probe CallNonvirtualObjectMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualObjectMethodA__return(void*);
+  probe CallNonvirtualObjectMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualObjectMethod__return(void*);
+  probe CallNonvirtualObjectMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualObjectMethodV__return(void*);
+  probe CallNonvirtualShortMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualShortMethodA__return(uint16_t);
+  probe CallNonvirtualShortMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualShortMethod__return(uint16_t);
+  probe CallNonvirtualShortMethodV__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualShortMethodV__return(uint16_t);
+  probe CallNonvirtualVoidMethodA__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualVoidMethodA__return();
+  probe CallNonvirtualVoidMethod__entry(void*, void*, void*, uintptr_t);
+  probe CallNonvirtualVoidMethod__return();
+  probe CallNonvirtualVoidMethodV__entry(void*, void*, void*, uintptr_t);  
+  probe CallNonvirtualVoidMethodV__return();
+  probe CallObjectMethodA__entry(void*, void*, uintptr_t);
+  probe CallObjectMethodA__return(void*);
+  probe CallObjectMethod__entry(void*, void*, uintptr_t);
+  probe CallObjectMethod__return(void*);
+  probe CallObjectMethodV__entry(void*, void*, uintptr_t);
+  probe CallObjectMethodV__return(void*);
+  probe CallShortMethodA__entry(void*, void*, uintptr_t);
+  probe CallShortMethodA__return(uint16_t);
+  probe CallShortMethod__entry(void*, void*, uintptr_t);
+  probe CallShortMethod__return(uint16_t);
+  probe CallShortMethodV__entry(void*, void*, uintptr_t);
+  probe CallShortMethodV__return(uint16_t);
+  probe CallStaticBooleanMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticBooleanMethodA__return(uintptr_t);
+  probe CallStaticBooleanMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticBooleanMethod__return(uintptr_t);
+  probe CallStaticBooleanMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticBooleanMethodV__return(uintptr_t);
+  probe CallStaticByteMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticByteMethodA__return(char);
+  probe CallStaticByteMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticByteMethod__return(char);
+  probe CallStaticByteMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticByteMethodV__return(char);
+  probe CallStaticCharMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticCharMethodA__return(uint16_t);
+  probe CallStaticCharMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticCharMethod__return(uint16_t);
+  probe CallStaticCharMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticCharMethodV__return(uint16_t);
+  probe CallStaticDoubleMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticDoubleMethodA__return();
+  probe CallStaticDoubleMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticDoubleMethod__return();
+  probe CallStaticDoubleMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticDoubleMethodV__return();
+  probe CallStaticFloatMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticFloatMethodA__return();
+  probe CallStaticFloatMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticFloatMethod__return();
+  probe CallStaticFloatMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticFloatMethodV__return();
+  probe CallStaticIntMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticIntMethodA__return(uint32_t);
+  probe CallStaticIntMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticIntMethod__return(uint32_t);
+  probe CallStaticIntMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticIntMethodV__return(uint32_t);
+  probe CallStaticLongMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticLongMethodA__return(uintptr_t);
+  probe CallStaticLongMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticLongMethod__return(uintptr_t);
+  probe CallStaticLongMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticLongMethodV__return(uintptr_t);
+  probe CallStaticObjectMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticObjectMethodA__return(void*);
+  probe CallStaticObjectMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticObjectMethod__return(void*);
+  probe CallStaticObjectMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticObjectMethodV__return(void*);
+  probe CallStaticShortMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticShortMethodA__return(uint16_t);
+  probe CallStaticShortMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticShortMethod__return(uint16_t);
+  probe CallStaticShortMethodV__entry(void*, void*, uintptr_t);
+  probe CallStaticShortMethodV__return(uint16_t);
+  probe CallStaticVoidMethodA__entry(void*, void*, uintptr_t);
+  probe CallStaticVoidMethodA__return();
+  probe CallStaticVoidMethod__entry(void*, void*, uintptr_t);
+  probe CallStaticVoidMethod__return(); 
+  probe CallStaticVoidMethodV__entry(void*, void*, uintptr_t);  
+  probe CallStaticVoidMethodV__return();
+  probe CallVoidMethodA__entry(void*, void*, uintptr_t);  
+  probe CallVoidMethodA__return();
+  probe CallVoidMethod__entry(void*, void*, uintptr_t);  
+  probe CallVoidMethod__return(); 
+  probe CallVoidMethodV__entry(void*, void*, uintptr_t);  
+  probe CallVoidMethodV__return();
+  probe CreateJavaVM__entry(void**, void**, void*);
+  probe CreateJavaVM__return(uint32_t);
+  probe DefineClass__entry(void*, const char*, void*, char*, uintptr_t);
+  probe DefineClass__return(void*);
+  probe DeleteGlobalRef__entry(void*, void*);
+  probe DeleteGlobalRef__return();
+  probe DeleteLocalRef__entry(void*, void*);
+  probe DeleteLocalRef__return();
+  probe DeleteWeakGlobalRef__entry(void*, void*);
+  probe DeleteWeakGlobalRef__return();
+  probe DestroyJavaVM__entry(void*);
+  probe DestroyJavaVM__return(uint32_t);
+  probe DetachCurrentThread__entry(void*);
+  probe DetachCurrentThread__return(uint32_t);
+  probe EnsureLocalCapacity__entry(void*, uint32_t);
+  probe EnsureLocalCapacity__return(uint32_t);
+  probe ExceptionCheck__entry(void*);
+  probe ExceptionCheck__return(uintptr_t);
+  probe ExceptionClear__entry(void*);
+  probe ExceptionClear__return();
+  probe ExceptionDescribe__entry(void*);  
+  probe ExceptionDescribe__return();
+  probe ExceptionOccurred__entry(void*);
+  probe ExceptionOccurred__return(void*);
+  probe FatalError__entry(void* env, const char*);
+  probe FindClass__entry(void*, const char*);
+  probe FindClass__return(void*);
+  probe FromReflectedField__entry(void*, void*);
+  probe FromReflectedField__return(uintptr_t);
+  probe FromReflectedMethod__entry(void*, void*);
+  probe FromReflectedMethod__return(uintptr_t);
+  probe GetArrayLength__entry(void*, void*);
+  probe GetArrayLength__return(uintptr_t);
+  probe GetBooleanArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetBooleanArrayElements__return(uintptr_t*);
+  probe GetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
+  probe GetBooleanArrayRegion__return();
+  probe GetBooleanField__entry(void*, void*, uintptr_t);
+  probe GetBooleanField__return(uintptr_t);
+  probe GetByteArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetByteArrayElements__return(char*);
+  probe GetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
+  probe GetByteArrayRegion__return();
+  probe GetByteField__entry(void*, void*, uintptr_t);
+  probe GetByteField__return(char);
+  probe GetCharArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetCharArrayElements__return(uint16_t*);
+  probe GetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
+  probe GetCharArrayRegion__return();
+  probe GetCharField__entry(void*, void*, uintptr_t);
+  probe GetCharField__return(uint16_t);
+  probe GetCreatedJavaVMs__entry(void**, uintptr_t, uintptr_t*);
+  probe GetCreatedJavaVMs__return(uintptr_t);
+  probe GetDefaultJavaVMInitArgs__entry(void*);
+  probe GetDefaultJavaVMInitArgs__return(uint32_t);
+  probe GetDirectBufferAddress__entry(void*, void*);
+  probe GetDirectBufferAddress__return(void*);
+  probe GetDirectBufferCapacity__entry(void*, void*);
+  probe GetDirectBufferCapacity__return(uintptr_t);
+  probe GetDoubleArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetDoubleArrayElements__return(double*);
+  probe GetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, double*);
+  probe GetDoubleArrayRegion__return();
+  probe GetDoubleField__entry(void*, void*, uintptr_t);
+  probe GetDoubleField__return();
+  probe GetEnv__entry(void*, void*, uint32_t);
+  probe GetEnv__return(uint32_t);
+  probe GetFieldID__entry(void*, void*, const char*, const char*);
+  probe GetFieldID__return(uintptr_t);
+  probe GetFloatArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetFloatArrayElements__return(float*);
+  probe GetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, float*);
+  probe GetFloatArrayRegion__return();
+  probe GetFloatField__entry(void*, void*, uintptr_t);
+  probe GetFloatField__return();
+  probe GetIntArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetIntArrayElements__return(uint32_t*);
+  probe GetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint32_t*);
+  probe GetIntArrayRegion__return();
+  probe GetIntField__entry(void*, void*, uintptr_t);
+  probe GetIntField__return(uint32_t);
+  probe GetJavaVM__entry(void*, void**);
+  probe GetJavaVM__return(uint32_t);
+  probe GetLongArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetLongArrayElements__return(uintptr_t*);
+  probe GetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
+  probe GetLongArrayRegion__return();
+  probe GetLongField__entry(void*, void*, uintptr_t);
+  probe GetLongField__return(uintptr_t);
+  probe GetMethodID__entry(void*, void*, const char*, const char*);
+  probe GetMethodID__return(uintptr_t);
+  probe GetObjectArrayElement__entry(void*, void*, uintptr_t);
+  probe GetObjectArrayElement__return(void*);
+  probe GetObjectClass__entry(void*, void*);
+  probe GetObjectClass__return(void*);
+  probe GetObjectField__entry(void*, void*, uintptr_t);
+  probe GetObjectField__return(void*);
+  probe GetObjectRefType__entry(void*, void*);
+  probe GetObjectRefType__return(void*);
+  probe GetPrimitiveArrayCritical__entry(void*, void*, uintptr_t*);
+  probe GetPrimitiveArrayCritical__return(void*);
+  probe GetShortArrayElements__entry(void*, void*, uintptr_t*);
+  probe GetShortArrayElements__return(uint16_t*);
+  probe GetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
+  probe GetShortArrayRegion__return();
+  probe GetShortField__entry(void*, void*, uintptr_t);
+  probe GetShortField__return(uint16_t);
+  probe GetStaticBooleanField__entry(void*, void*, uintptr_t);
+  probe GetStaticBooleanField__return(uintptr_t);
+  probe GetStaticByteField__entry(void*, void*, uintptr_t);
+  probe GetStaticByteField__return(char);
+  probe GetStaticCharField__entry(void*, void*, uintptr_t);
+  probe GetStaticCharField__return(uint16_t);
+  probe GetStaticDoubleField__entry(void*, void*, uintptr_t);
+  probe GetStaticDoubleField__return();
+  probe GetStaticFieldID__entry(void*, void*, const char*, const char*);
+  probe GetStaticFieldID__return(uintptr_t);
+  probe GetStaticFloatField__entry(void*, void*, uintptr_t);
+  probe GetStaticFloatField__return();
+  probe GetStaticIntField__entry(void*, void*, uintptr_t);
+  probe GetStaticIntField__return(uint32_t);
+  probe GetStaticLongField__entry(void*, void*, uintptr_t);
+  probe GetStaticLongField__return(uintptr_t);
+  probe GetStaticMethodID__entry(void*, void*, const char*, const char*);
+  probe GetStaticMethodID__return(uintptr_t);
+  probe GetStaticObjectField__entry(void*, void*, uintptr_t);
+  probe GetStaticObjectField__return(void*);
+  probe GetStaticShortField__entry(void*, void*, uintptr_t);
+  probe GetStaticShortField__return(uint16_t);
+  probe GetStringChars__entry(void*, void*, uintptr_t*);
+  probe GetStringChars__return(const uint16_t*);
+  probe GetStringCritical__entry(void*, void*, uintptr_t*);
+  probe GetStringCritical__return(const uint16_t*);
+  probe GetStringLength__entry(void*, void*);
+  probe GetStringLength__return(uintptr_t);
+  probe GetStringRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
+  probe GetStringRegion__return();
+  probe GetStringUTFChars__entry(void*, void*, uintptr_t*);
+  probe GetStringUTFChars__return(const char*);
+  probe GetStringUTFLength__entry(void*, void*);
+  probe GetStringUTFLength__return(uintptr_t);
+  probe GetStringUTFRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
+  probe GetStringUTFRegion__return();
+  probe GetSuperclass__entry(void*, void*);
+  probe GetSuperclass__return(void*);
+  probe GetVersion__entry(void*);
+  probe GetVersion__return(uint32_t);
+  probe IsAssignableFrom__entry(void*, void*, void*);
+  probe IsAssignableFrom__return(uintptr_t);
+  probe IsInstanceOf__entry(void*, void*, void*);
+  probe IsInstanceOf__return(uintptr_t);
+  probe IsSameObject__entry(void*, void*, void*);
+  probe IsSameObject__return(uintptr_t);
+  probe MonitorEnter__entry(void*, void*);
+  probe MonitorEnter__return(uint32_t);
+  probe MonitorExit__entry(void*, void*);
+  probe MonitorExit__return(uint32_t);
+  probe NewBooleanArray__entry(void*, uintptr_t);
+  probe NewBooleanArray__return(void*);
+  probe NewByteArray__entry(void*, uintptr_t);
+  probe NewByteArray__return(void*);
+  probe NewCharArray__entry(void*, uintptr_t);
+  probe NewCharArray__return(void*);
+  probe NewDirectByteBuffer__entry(void*, void*, uintptr_t);
+  probe NewDirectByteBuffer__return(void*);
+  probe NewDoubleArray__entry(void*, uintptr_t);
+  probe NewDoubleArray__return(void*);
+  probe NewFloatArray__entry(void*, uintptr_t);
+  probe NewFloatArray__return(void*);
+  probe NewGlobalRef__entry(void*, void*);
+  probe NewGlobalRef__return(void*);
+  probe NewIntArray__entry(void*, uintptr_t);
+  probe NewIntArray__return(void*);
+  probe NewLocalRef__entry(void*, void*);
+  probe NewLocalRef__return(void*);
+  probe NewLongArray__entry(void*, uintptr_t);
+  probe NewLongArray__return(void*);
+  probe NewObjectA__entry(void*, void*, uintptr_t);  
+  probe NewObjectA__return(void*);
+  probe NewObjectArray__entry(void*, uintptr_t, void*, void*);
+  probe NewObjectArray__return(void*);
+  probe NewObject__entry(void*, void*, uintptr_t); 
+  probe NewObject__return(void*);
+  probe NewObjectV__entry(void*, void*, uintptr_t);  
+  probe NewObjectV__return(void*);
+  probe NewShortArray__entry(void*, uintptr_t);
+  probe NewShortArray__return(void*);
+  probe NewString__entry(void*, const uint16_t*, uintptr_t);
+  probe NewString__return(void*);
+  probe NewStringUTF__entry(void*, const char*);
+  probe NewStringUTF__return(void*);
+  probe NewWeakGlobalRef__entry(void*, void*);
+  probe NewWeakGlobalRef__return(void*);
+  probe PopLocalFrame__entry(void*, void*);
+  probe PopLocalFrame__return(void*);
+  probe PushLocalFrame__entry(void*, uint32_t);
+  probe PushLocalFrame__return(uint32_t);
+  probe RegisterNatives__entry(void*, void*, const void*, uint32_t);  
+  probe RegisterNatives__return(uint32_t);
+  probe ReleaseBooleanArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
+  probe ReleaseBooleanArrayElements__return();
+  probe ReleaseByteArrayElements__entry(void*, void*, char*, uint32_t);
+  probe ReleaseByteArrayElements__return();
+  probe ReleaseCharArrayElements__entry(void*, void*, uint16_t*, uint32_t);
+  probe ReleaseCharArrayElements__return();
+  probe ReleaseDoubleArrayElements__entry(void*, void*, double*, uint32_t);
+  probe ReleaseDoubleArrayElements__return();
+  probe ReleaseFloatArrayElements__entry(void*, void*, float*, uint32_t);
+  probe ReleaseFloatArrayElements__return();
+  probe ReleaseIntArrayElements__entry(void*, void*, uint32_t*, uint32_t);
+  probe ReleaseIntArrayElements__return();
+  probe ReleaseLongArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
+  probe ReleaseLongArrayElements__return();
+  probe ReleasePrimitiveArrayCritical__entry(void*, void*, void*, uint32_t);
+  probe ReleasePrimitiveArrayCritical__return();
+  probe ReleaseShortArrayElements__entry(void*, void*, uint16_t*, uint32_t);
+  probe ReleaseShortArrayElements__return();
+  probe ReleaseStringChars__entry(void*, void*, const uint16_t*);
+  probe ReleaseStringChars__return();
+  probe ReleaseStringCritical__entry(void*, void*, const uint16_t*);
+  probe ReleaseStringCritical__return();
+  probe ReleaseStringUTFChars__entry(void*, void*, const char*);
+  probe ReleaseStringUTFChars__return();
+  probe SetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
+  probe SetBooleanArrayRegion__return();
+  probe SetBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
+  probe SetBooleanField__return();
+  probe SetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const char*);
+  probe SetByteArrayRegion__return();
+  probe SetByteField__entry(void*, void*, uintptr_t, char);
+  probe SetByteField__return();
+  probe SetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
+  probe SetCharArrayRegion__return();
+  probe SetCharField__entry(void*, void*, uintptr_t, uint16_t);
+  probe SetCharField__return();
+  probe SetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const double*);
+  probe SetDoubleArrayRegion__return();
+  probe SetDoubleField__entry(void*, void*, uintptr_t);
+  probe SetDoubleField__return();
+  probe SetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const float*);
+  probe SetFloatArrayRegion__return();
+  probe SetFloatField__entry(void*, void*, uintptr_t);
+  probe SetFloatField__return();
+  probe SetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint32_t*);
+  probe SetIntArrayRegion__return();
+  probe SetIntField__entry(void*, void*, uintptr_t, uint32_t);
+  probe SetIntField__return();
+  probe SetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
+  probe SetLongArrayRegion__return();
+  probe SetLongField__entry(void*, void*, uintptr_t, uintptr_t);
+  probe SetLongField__return();
+  probe SetObjectArrayElement__entry(void*, void*, uintptr_t, void*);
+  probe SetObjectArrayElement__return();
+  probe SetObjectField__entry(void*, void*, uintptr_t, void*);
+  probe SetObjectField__return();
+  probe SetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
+  probe SetShortArrayRegion__return();
+  probe SetShortField__entry(void*, void*, uintptr_t, uint16_t);
+  probe SetShortField__return();
+  probe SetStaticBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
+  probe SetStaticBooleanField__return();
+  probe SetStaticByteField__entry(void*, void*, uintptr_t, char);
+  probe SetStaticByteField__return();
+  probe SetStaticCharField__entry(void*, void*, uintptr_t, uint16_t);
+  probe SetStaticCharField__return();
+  probe SetStaticDoubleField__entry(void*, void*, uintptr_t);
+  probe SetStaticDoubleField__return();
+  probe SetStaticFloatField__entry(void*, void*, uintptr_t);
+  probe SetStaticFloatField__return();
+  probe SetStaticIntField__entry(void*, void*, uintptr_t, uint32_t);
+  probe SetStaticIntField__return();
+  probe SetStaticLongField__entry(void*, void*, uintptr_t, uintptr_t);
+  probe SetStaticLongField__return();
+  probe SetStaticObjectField__entry(void*, void*, uintptr_t, void*);
+  probe SetStaticObjectField__return();
+  probe SetStaticShortField__entry(void*, void*, uintptr_t, uint16_t);
+  probe SetStaticShortField__return();
+  probe Throw__entry(void*, void*);
+  probe Throw__return(intptr_t);
+  probe ThrowNew__entry(void*, void*, const char*);  
+  probe ThrowNew__return(intptr_t);  
+  probe ToReflectedField__entry(void*, void*, uintptr_t, uintptr_t);
+  probe ToReflectedField__return(void*);
+  probe ToReflectedMethod__entry(void*, void*, uintptr_t, uintptr_t);
+  probe ToReflectedMethod__return(void*);
+  probe UnregisterNatives__entry(void*, void*);  
+  probe UnregisterNatives__return(uint32_t);
+};
+
+#pragma D attributes Standard/Standard/Common provider hotspot_jni provider
+#pragma D attributes Private/Private/Unknown provider hotspot_jni module
+#pragma D attributes Private/Private/Unknown provider hotspot_jni function
+#pragma D attributes Standard/Standard/Common provider hotspot_jni name
+#pragma D attributes Evolving/Evolving/Common provider hotspot_jni args
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/posix/dtrace/hs_private.d	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *  
+ */
+
+provider hs_private {
+  probe safepoint__begin();
+  probe safepoint__end();
+  probe cms__initmark__begin();
+  probe cms__initmark__end();
+  probe cms__remark__begin();
+  probe cms__remark__end();
+};
+
+#pragma D attributes Private/Private/Common provider hs_private provider
+#pragma D attributes Private/Private/Unknown provider hs_private module
+#pragma D attributes Private/Private/Unknown provider hs_private function
+#pragma D attributes Private/Private/Common provider hs_private name
+#pragma D attributes Private/Private/Common provider hs_private args
+
--- a/src/os/posix/vm/os_posix.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/posix/vm/os_posix.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,38 +1,47 @@
 /*
-* Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
-* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-*
-* This code is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License version 2 only, as
-* published by the Free Software Foundation.
-*
-* This code is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-* version 2 for more details (a copy is included in the LICENSE file that
-* accompanied this code).
-*
-* You should have received a copy of the GNU General Public License version
-* 2 along with this work; if not, write to the Free Software Foundation,
-* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-*
-* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-* or visit www.oracle.com if you need additional information or have any
-* questions.
-*
-*/
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
 
+#include "utilities/globalDefinitions.hpp"
 #include "prims/jvm.h"
 #include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.hpp"
 #include "runtime/os.hpp"
 #include "utilities/vmError.hpp"
 
+#include <signal.h>
 #include <unistd.h>
 #include <sys/resource.h>
 #include <sys/utsname.h>
 #include <pthread.h>
 #include <signal.h>
 
+// Todo: provide a os::get_max_process_id() or similar. Number of processes
+// may have been configured, can be read more accurately from proc fs etc.
+#ifndef MAX_PID
+#define MAX_PID INT_MAX
+#endif
+#define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
 
 // Check core dump limit and report possible place where core can be found
 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) {
@@ -158,8 +167,8 @@
   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
   else st->print("%uk", rlim.rlim_cur >> 10);
 
-  //Isn't there on solaris
-#ifndef TARGET_OS_FAMILY_solaris
+  // Isn't there on solaris
+#if !defined(TARGET_OS_FAMILY_solaris) && !defined(TARGET_OS_FAMILY_aix)
   st->print(", NPROC ");
   getrlimit(RLIMIT_NPROC, &rlim);
   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
@@ -262,10 +271,6 @@
   return ::fdopen(fd, mode);
 }
 
-void* os::get_default_process_handle() {
-  return (void*)::dlopen(NULL, RTLD_LAZY);
-}
-
 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
 // which is used to find statically linked in agents.
 // Parameters:
@@ -311,6 +316,612 @@
   return agent_entry_name;
 }
 
+int os::sleep(Thread* thread, jlong millis, bool interruptible) {
+  assert(thread == Thread::current(),  "thread consistency check");
+
+  ParkEvent * const slp = thread->_SleepEvent ;
+  slp->reset() ;
+  OrderAccess::fence() ;
+
+  if (interruptible) {
+    jlong prevtime = javaTimeNanos();
+
+    for (;;) {
+      if (os::is_interrupted(thread, true)) {
+        return OS_INTRPT;
+      }
+
+      jlong newtime = javaTimeNanos();
+
+      if (newtime - prevtime < 0) {
+        // time moving backwards, should only happen if no monotonic clock
+        // not a guarantee() because JVM should not abort on kernel/glibc bugs
+        assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected in os::sleep(interruptible)");
+      } else {
+        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
+      }
+
+      if (millis <= 0) {
+        return OS_OK;
+      }
+
+      prevtime = newtime;
+
+      {
+        assert(thread->is_Java_thread(), "sanity check");
+        JavaThread *jt = (JavaThread *) thread;
+        ThreadBlockInVM tbivm(jt);
+        OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
+
+        jt->set_suspend_equivalent();
+        // cleared by handle_special_suspend_equivalent_condition() or
+        // java_suspend_self() via check_and_wait_while_suspended()
+
+        slp->park(millis);
+
+        // were we externally suspended while we were waiting?
+        jt->check_and_wait_while_suspended();
+      }
+    }
+  } else {
+    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
+    jlong prevtime = javaTimeNanos();
+
+    for (;;) {
+      // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
+      // the 1st iteration ...
+      jlong newtime = javaTimeNanos();
+
+      if (newtime - prevtime < 0) {
+        // time moving backwards, should only happen if no monotonic clock
+        // not a guarantee() because JVM should not abort on kernel/glibc bugs
+        assert(!os::supports_monotonic_clock(), "unexpected time moving backwards detected on os::sleep(!interruptible)");
+      } else {
+        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
+      }
+
+      if (millis <= 0) break ;
+
+      prevtime = newtime;
+      slp->park(millis);
+    }
+    return OS_OK ;
+  }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// interrupt support
+
+void os::interrupt(Thread* thread) {
+  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
+    "possibility of dangling Thread pointer");
+
+  OSThread* osthread = thread->osthread();
+
+  if (!osthread->interrupted()) {
+    osthread->set_interrupted(true);
+    // More than one thread can get here with the same value of osthread,
+    // resulting in multiple notifications.  We do, however, want the store
+    // to interrupted() to be visible to other threads before we execute unpark().
+    OrderAccess::fence();
+    ParkEvent * const slp = thread->_SleepEvent ;
+    if (slp != NULL) slp->unpark() ;
+  }
+
+  // For JSR166. Unpark even if interrupt status already was set
+  if (thread->is_Java_thread())
+    ((JavaThread*)thread)->parker()->unpark();
+
+  ParkEvent * ev = thread->_ParkEvent ;
+  if (ev != NULL) ev->unpark() ;
+
+}
+
+bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
+  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
+    "possibility of dangling Thread pointer");
+
+  OSThread* osthread = thread->osthread();
+
+  bool interrupted = osthread->interrupted();
+
+  // NOTE that since there is no "lock" around the interrupt and
+  // is_interrupted operations, there is the possibility that the
+  // interrupted flag (in osThread) will be "false" but that the
+  // low-level events will be in the signaled state. This is
+  // intentional. The effect of this is that Object.wait() and
+  // LockSupport.park() will appear to have a spurious wakeup, which
+  // is allowed and not harmful, and the possibility is so rare that
+  // it is not worth the added complexity to add yet another lock.
+  // For the sleep event an explicit reset is performed on entry
+  // to os::sleep, so there is no early return. It has also been
+  // recommended not to put the interrupted flag into the "event"
+  // structure because it hides the issue.
+  if (interrupted && clear_interrupted) {
+    osthread->set_interrupted(false);
+    // consider thread->_SleepEvent->reset() ... optional optimization
+  }
+
+  return interrupted;
+}
+
+// Returned string is a constant. For unknown signals "UNKNOWN" is returned.
+const char* os::Posix::get_signal_name(int sig, char* out, size_t outlen) {
+
+  static const struct {
+    int sig; const char* name;
+  }
+  info[] =
+  {
+    {  SIGABRT,     "SIGABRT" },
+#ifdef SIGAIO
+    {  SIGAIO,      "SIGAIO" },
+#endif
+    {  SIGALRM,     "SIGALRM" },
+#ifdef SIGALRM1
+    {  SIGALRM1,    "SIGALRM1" },
+#endif
+    {  SIGBUS,      "SIGBUS" },
+#ifdef SIGCANCEL
+    {  SIGCANCEL,   "SIGCANCEL" },
+#endif
+    {  SIGCHLD,     "SIGCHLD" },
+#ifdef SIGCLD
+    {  SIGCLD,      "SIGCLD" },
+#endif
+    {  SIGCONT,     "SIGCONT" },
+#ifdef SIGCPUFAIL
+    {  SIGCPUFAIL,  "SIGCPUFAIL" },
+#endif
+#ifdef SIGDANGER
+    {  SIGDANGER,   "SIGDANGER" },
+#endif
+#ifdef SIGDIL
+    {  SIGDIL,      "SIGDIL" },
+#endif
+#ifdef SIGEMT
+    {  SIGEMT,      "SIGEMT" },
+#endif
+    {  SIGFPE,      "SIGFPE" },
+#ifdef SIGFREEZE
+    {  SIGFREEZE,   "SIGFREEZE" },
+#endif
+#ifdef SIGGFAULT
+    {  SIGGFAULT,   "SIGGFAULT" },
+#endif
+#ifdef SIGGRANT
+    {  SIGGRANT,    "SIGGRANT" },
+#endif
+    {  SIGHUP,      "SIGHUP" },
+    {  SIGILL,      "SIGILL" },
+    {  SIGINT,      "SIGINT" },
+#ifdef SIGIO
+    {  SIGIO,       "SIGIO" },
+#endif
+#ifdef SIGIOINT
+    {  SIGIOINT,    "SIGIOINT" },
+#endif
+#ifdef SIGIOT
+  // SIGIOT is there for BSD compatibility, but on most Unices just a
+  // synonym for SIGABRT. The result should be "SIGABRT", not
+  // "SIGIOT".
+  #if (SIGIOT != SIGABRT )
+    {  SIGIOT,      "SIGIOT" },
+  #endif
+#endif
+#ifdef SIGKAP
+    {  SIGKAP,      "SIGKAP" },
+#endif
+    {  SIGKILL,     "SIGKILL" },
+#ifdef SIGLOST
+    {  SIGLOST,     "SIGLOST" },
+#endif
+#ifdef SIGLWP
+    {  SIGLWP,      "SIGLWP" },
+#endif
+#ifdef SIGLWPTIMER
+    {  SIGLWPTIMER, "SIGLWPTIMER" },
+#endif
+#ifdef SIGMIGRATE
+    {  SIGMIGRATE,  "SIGMIGRATE" },
+#endif
+#ifdef SIGMSG
+    {  SIGMSG,      "SIGMSG" },
+#endif
+    {  SIGPIPE,     "SIGPIPE" },
+#ifdef SIGPOLL
+    {  SIGPOLL,     "SIGPOLL" },
+#endif
+#ifdef SIGPRE
+    {  SIGPRE,      "SIGPRE" },
+#endif
+    {  SIGPROF,     "SIGPROF" },
+#ifdef SIGPTY
+    {  SIGPTY,      "SIGPTY" },
+#endif
+#ifdef SIGPWR
+    {  SIGPWR,      "SIGPWR" },
+#endif
+    {  SIGQUIT,     "SIGQUIT" },
+#ifdef SIGRECONFIG
+    {  SIGRECONFIG, "SIGRECONFIG" },
+#endif
+#ifdef SIGRECOVERY
+    {  SIGRECOVERY, "SIGRECOVERY" },
+#endif
+#ifdef SIGRESERVE
+    {  SIGRESERVE,  "SIGRESERVE" },
+#endif
+#ifdef SIGRETRACT
+    {  SIGRETRACT,  "SIGRETRACT" },
+#endif
+#ifdef SIGSAK
+    {  SIGSAK,      "SIGSAK" },
+#endif
+    {  SIGSEGV,     "SIGSEGV" },
+#ifdef SIGSOUND
+    {  SIGSOUND,    "SIGSOUND" },
+#endif
+    {  SIGSTOP,     "SIGSTOP" },
+    {  SIGSYS,      "SIGSYS" },
+#ifdef SIGSYSERROR
+    {  SIGSYSERROR, "SIGSYSERROR" },
+#endif
+#ifdef SIGTALRM
+    {  SIGTALRM,    "SIGTALRM" },
+#endif
+    {  SIGTERM,     "SIGTERM" },
+#ifdef SIGTHAW
+    {  SIGTHAW,     "SIGTHAW" },
+#endif
+    {  SIGTRAP,     "SIGTRAP" },
+#ifdef SIGTSTP
+    {  SIGTSTP,     "SIGTSTP" },
+#endif
+    {  SIGTTIN,     "SIGTTIN" },
+    {  SIGTTOU,     "SIGTTOU" },
+#ifdef SIGURG
+    {  SIGURG,      "SIGURG" },
+#endif
+    {  SIGUSR1,     "SIGUSR1" },
+    {  SIGUSR2,     "SIGUSR2" },
+#ifdef SIGVIRT
+    {  SIGVIRT,     "SIGVIRT" },
+#endif
+    {  SIGVTALRM,   "SIGVTALRM" },
+#ifdef SIGWAITING
+    {  SIGWAITING,  "SIGWAITING" },
+#endif
+#ifdef SIGWINCH
+    {  SIGWINCH,    "SIGWINCH" },
+#endif
+#ifdef SIGWINDOW
+    {  SIGWINDOW,   "SIGWINDOW" },
+#endif
+    {  SIGXCPU,     "SIGXCPU" },
+    {  SIGXFSZ,     "SIGXFSZ" },
+#ifdef SIGXRES
+    {  SIGXRES,     "SIGXRES" },
+#endif
+    { -1, NULL }
+  };
+
+  const char* ret = NULL;
+
+#ifdef SIGRTMIN
+  if (sig >= SIGRTMIN && sig <= SIGRTMAX) {
+    if (sig == SIGRTMIN) {
+      ret = "SIGRTMIN";
+    } else if (sig == SIGRTMAX) {
+      ret = "SIGRTMAX";
+    } else {
+      jio_snprintf(out, outlen, "SIGRTMIN+%d", sig - SIGRTMIN);
+      return out;
+    }
+  }
+#endif
+
+  if (sig > 0) {
+    for (int idx = 0; info[idx].sig != -1; idx ++) {
+      if (info[idx].sig == sig) {
+        ret = info[idx].name;
+        break;
+      }
+    }
+  }
+
+  if (!ret) {
+    if (!is_valid_signal(sig)) {
+      ret = "INVALID";
+    } else {
+      ret = "UNKNOWN";
+    }
+  }
+
+  jio_snprintf(out, outlen, ret);
+  return out;
+}
+
+// Returns true if signal number is valid.
+bool os::Posix::is_valid_signal(int sig) {
+  // MacOS not really POSIX compliant: sigaddset does not return
+  // an error for invalid signal numbers. However, MacOS does not
+  // support real time signals and simply seems to have just 33
+  // signals with no holes in the signal range.
+#ifdef __APPLE__
+  return sig >= 1 && sig < NSIG;
+#else
+  // Use sigaddset to check for signal validity.
+  sigset_t set;
+  if (sigaddset(&set, sig) == -1 && errno == EINVAL) {
+    return false;
+  }
+  return true;
+#endif
+}
+
+#define NUM_IMPORTANT_SIGS 32
+// Returns one-line short description of a signal set in a user provided buffer.
+const char* os::Posix::describe_signal_set_short(const sigset_t* set, char* buffer, size_t buf_size) {
+  assert(buf_size == (NUM_IMPORTANT_SIGS + 1), "wrong buffer size");
+  // Note: for shortness, just print out the first 32. That should
+  // cover most of the useful ones, apart from realtime signals.
+  for (int sig = 1; sig <= NUM_IMPORTANT_SIGS; sig++) {
+    const int rc = sigismember(set, sig);
+    if (rc == -1 && errno == EINVAL) {
+      buffer[sig-1] = '?';
+    } else {
+      buffer[sig-1] = rc == 0 ? '0' : '1';
+    }
+  }
+  buffer[NUM_IMPORTANT_SIGS] = 0;
+  return buffer;
+}
+
+// Prints one-line description of a signal set.
+void os::Posix::print_signal_set_short(outputStream* st, const sigset_t* set) {
+  char buf[NUM_IMPORTANT_SIGS + 1];
+  os::Posix::describe_signal_set_short(set, buf, sizeof(buf));
+  st->print(buf);
+}
+
+// Writes one-line description of a combination of sigaction.sa_flags into a user
+// provided buffer. Returns that buffer.
+const char* os::Posix::describe_sa_flags(int flags, char* buffer, size_t size) {
+  char* p = buffer;
+  size_t remaining = size;
+  bool first = true;
+  int idx = 0;
+
+  assert(buffer, "invalid argument");
+
+  if (size == 0) {
+    return buffer;
+  }
+
+  strncpy(buffer, "none", size);
+
+  const struct {
+    int i;
+    const char* s;
+  } flaginfo [] = {
+    { SA_NOCLDSTOP, "SA_NOCLDSTOP" },
+    { SA_ONSTACK,   "SA_ONSTACK"   },
+    { SA_RESETHAND, "SA_RESETHAND" },
+    { SA_RESTART,   "SA_RESTART"   },
+    { SA_SIGINFO,   "SA_SIGINFO"   },
+    { SA_NOCLDWAIT, "SA_NOCLDWAIT" },
+    { SA_NODEFER,   "SA_NODEFER"   },
+#ifdef AIX
+    { SA_ONSTACK,   "SA_ONSTACK"   },
+    { SA_OLDSTYLE,  "SA_OLDSTYLE"  },
+#endif
+    { 0, NULL }
+  };
+
+  for (idx = 0; flaginfo[idx].s && remaining > 1; idx++) {
+    if (flags & flaginfo[idx].i) {
+      if (first) {
+        jio_snprintf(p, remaining, "%s", flaginfo[idx].s);
+        first = false;
+      } else {
+        jio_snprintf(p, remaining, "|%s", flaginfo[idx].s);
+      }
+      const size_t len = strlen(p);
+      p += len;
+      remaining -= len;
+    }
+  }
+
+  buffer[size - 1] = '\0';
+
+  return buffer;
+}
+
+// Prints one-line description of a combination of sigaction.sa_flags.
+void os::Posix::print_sa_flags(outputStream* st, int flags) {
+  char buffer[0x100];
+  os::Posix::describe_sa_flags(flags, buffer, sizeof(buffer));
+  st->print(buffer);
+}
+
+// Helper function for os::Posix::print_siginfo_...():
+// return a textual description for signal code.
+struct enum_sigcode_desc_t {
+  const char* s_name;
+  const char* s_desc;
+};
+
+static bool get_signal_code_description(const siginfo_t* si, enum_sigcode_desc_t* out) {
+
+  const struct {
+    int sig; int code; const char* s_code; const char* s_desc;
+  } t1 [] = {
+    { SIGILL,  ILL_ILLOPC,   "ILL_ILLOPC",   "Illegal opcode." },
+    { SIGILL,  ILL_ILLOPN,   "ILL_ILLOPN",   "Illegal operand." },
+    { SIGILL,  ILL_ILLADR,   "ILL_ILLADR",   "Illegal addressing mode." },
+    { SIGILL,  ILL_ILLTRP,   "ILL_ILLTRP",   "Illegal trap." },
+    { SIGILL,  ILL_PRVOPC,   "ILL_PRVOPC",   "Privileged opcode." },
+    { SIGILL,  ILL_PRVREG,   "ILL_PRVREG",   "Privileged register." },
+    { SIGILL,  ILL_COPROC,   "ILL_COPROC",   "Coprocessor error." },
+    { SIGILL,  ILL_BADSTK,   "ILL_BADSTK",   "Internal stack error." },
+#if defined(IA64) && defined(LINUX)
+    { SIGILL,  ILL_BADIADDR, "ILL_BADIADDR", "Unimplemented instruction address" },
+    { SIGILL,  ILL_BREAK,    "ILL_BREAK",    "Application Break instruction" },
+#endif
+    { SIGFPE,  FPE_INTDIV,   "FPE_INTDIV",   "Integer divide by zero." },
+    { SIGFPE,  FPE_INTOVF,   "FPE_INTOVF",   "Integer overflow." },
+    { SIGFPE,  FPE_FLTDIV,   "FPE_FLTDIV",   "Floating-point divide by zero." },
+    { SIGFPE,  FPE_FLTOVF,   "FPE_FLTOVF",   "Floating-point overflow." },
+    { SIGFPE,  FPE_FLTUND,   "FPE_FLTUND",   "Floating-point underflow." },
+    { SIGFPE,  FPE_FLTRES,   "FPE_FLTRES",   "Floating-point inexact result." },
+    { SIGFPE,  FPE_FLTINV,   "FPE_FLTINV",   "Invalid floating-point operation." },
+    { SIGFPE,  FPE_FLTSUB,   "FPE_FLTSUB",   "Subscript out of range." },
+    { SIGSEGV, SEGV_MAPERR,  "SEGV_MAPERR",  "Address not mapped to object." },
+    { SIGSEGV, SEGV_ACCERR,  "SEGV_ACCERR",  "Invalid permissions for mapped object." },
+#ifdef AIX
+    // no explanation found what keyerr would be
+    { SIGSEGV, SEGV_KEYERR,  "SEGV_KEYERR",  "key error" },
+#endif
+#if defined(IA64) && !defined(AIX)
+    { SIGSEGV, SEGV_PSTKOVF, "SEGV_PSTKOVF", "Paragraph stack overflow" },
+#endif
+    { SIGBUS,  BUS_ADRALN,   "BUS_ADRALN",   "Invalid address alignment." },
+    { SIGBUS,  BUS_ADRERR,   "BUS_ADRERR",   "Nonexistent physical address." },
+    { SIGBUS,  BUS_OBJERR,   "BUS_OBJERR",   "Object-specific hardware error." },
+    { SIGTRAP, TRAP_BRKPT,   "TRAP_BRKPT",   "Process breakpoint." },
+    { SIGTRAP, TRAP_TRACE,   "TRAP_TRACE",   "Process trace trap." },
+    { SIGCHLD, CLD_EXITED,   "CLD_EXITED",   "Child has exited." },
+    { SIGCHLD, CLD_KILLED,   "CLD_KILLED",   "Child has terminated abnormally and did not create a core file." },
+    { SIGCHLD, CLD_DUMPED,   "CLD_DUMPED",   "Child has terminated abnormally and created a core file." },
+    { SIGCHLD, CLD_TRAPPED,  "CLD_TRAPPED",  "Traced child has trapped." },
+    { SIGCHLD, CLD_STOPPED,  "CLD_STOPPED",  "Child has stopped." },
+    { SIGCHLD, CLD_CONTINUED,"CLD_CONTINUED","Stopped child has continued." },
+#ifdef SIGPOLL
+    { SIGPOLL, POLL_OUT,     "POLL_OUT",     "Output buffers available." },
+    { SIGPOLL, POLL_MSG,     "POLL_MSG",     "Input message available." },
+    { SIGPOLL, POLL_ERR,     "POLL_ERR",     "I/O error." },
+    { SIGPOLL, POLL_PRI,     "POLL_PRI",     "High priority input available." },
+    { SIGPOLL, POLL_HUP,     "POLL_HUP",     "Device disconnected. [Option End]" },
+#endif
+    { -1, -1, NULL, NULL }
+  };
+
+  // Codes valid in any signal context.
+  const struct {
+    int code; const char* s_code; const char* s_desc;
+  } t2 [] = {
+    { SI_USER,      "SI_USER",     "Signal sent by kill()." },
+    { SI_QUEUE,     "SI_QUEUE",    "Signal sent by the sigqueue()." },
+    { SI_TIMER,     "SI_TIMER",    "Signal generated by expiration of a timer set by timer_settime()." },
+    { SI_ASYNCIO,   "SI_ASYNCIO",  "Signal generated by completion of an asynchronous I/O request." },
+    { SI_MESGQ,     "SI_MESGQ",    "Signal generated by arrival of a message on an empty message queue." },
+    // Linux specific
+#ifdef SI_TKILL
+    { SI_TKILL,     "SI_TKILL",    "Signal sent by tkill (pthread_kill)" },
+#endif
+#ifdef SI_DETHREAD
+    { SI_DETHREAD,  "SI_DETHREAD", "Signal sent by execve() killing subsidiary threads" },
+#endif
+#ifdef SI_KERNEL
+    { SI_KERNEL,    "SI_KERNEL",   "Signal sent by kernel." },
+#endif
+#ifdef SI_SIGIO
+    { SI_SIGIO,     "SI_SIGIO",    "Signal sent by queued SIGIO" },
+#endif
+
+#ifdef AIX
+    { SI_UNDEFINED, "SI_UNDEFINED","siginfo contains partial information" },
+    { SI_EMPTY,     "SI_EMPTY",    "siginfo contains no useful information" },
+#endif
+
+#ifdef __sun
+    { SI_NOINFO,    "SI_NOINFO",   "No signal information" },
+    { SI_RCTL,      "SI_RCTL",     "kernel generated signal via rctl action" },
+    { SI_LWP,       "SI_LWP",      "Signal sent via lwp_kill" },
+#endif
+
+    { -1, NULL, NULL }
+  };
+
+  const char* s_code = NULL;
+  const char* s_desc = NULL;
+
+  for (int i = 0; t1[i].sig != -1; i ++) {
+    if (t1[i].sig == si->si_signo && t1[i].code == si->si_code) {
+      s_code = t1[i].s_code;
+      s_desc = t1[i].s_desc;
+      break;
+    }
+  }
+
+  if (s_code == NULL) {
+    for (int i = 0; t2[i].s_code != NULL; i ++) {
+      if (t2[i].code == si->si_code) {
+        s_code = t2[i].s_code;
+        s_desc = t2[i].s_desc;
+      }
+    }
+  }
+
+  if (s_code == NULL) {
+    out->s_name = "unknown";
+    out->s_desc = "unknown";
+    return false;
+  }
+
+  out->s_name = s_code;
+  out->s_desc = s_desc;
+
+  return true;
+}
+
+// A POSIX conform, platform-independend siginfo print routine.
+// Short print out on one line.
+void os::Posix::print_siginfo_brief(outputStream* os, const siginfo_t* si) {
+  char buf[20];
+  os->print("siginfo: ");
+
+  if (!si) {
+    os->print("<null>");
+    return;
+  }
+
+  // See print_siginfo_full() for details.
+  const int sig = si->si_signo;
+
+  os->print("si_signo: %d (%s)", sig, os::Posix::get_signal_name(sig, buf, sizeof(buf)));
+
+  enum_sigcode_desc_t ed;
+  if (get_signal_code_description(si, &ed)) {
+    os->print(", si_code: %d (%s)", si->si_code, ed.s_name);
+  } else {
+    os->print(", si_code: %d (unknown)", si->si_code);
+  }
+
+  if (si->si_errno) {
+    os->print(", si_errno: %d", si->si_errno);
+  }
+
+  const int me = (int) ::getpid();
+  const int pid = (int) si->si_pid;
+
+  if (si->si_code == SI_USER || si->si_code == SI_QUEUE) {
+    if (IS_VALID_PID(pid) && pid != me) {
+      os->print(", sent from pid: %d (uid: %d)", pid, (int) si->si_uid);
+    }
+  } else if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+             sig == SIGTRAP || sig == SIGFPE) {
+    os->print(", si_addr: " PTR_FORMAT, si->si_addr);
+#ifdef SIGPOLL
+  } else if (sig == SIGPOLL) {
+    os->print(", si_band: " PTR64_FORMAT, (uint64_t)si->si_band);
+#endif
+  } else if (sig == SIGCHLD) {
+    os->print_cr(", si_pid: %d, si_uid: %d, si_status: %d", (int) si->si_pid, si->si_uid, si->si_status);
+  }
+}
+
 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
   assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
 }
--- a/src/os/posix/vm/os_posix.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/posix/vm/os_posix.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,30 @@
   static void print_libversion_info(outputStream* st);
   static void print_load_average(outputStream* st);
 
+public:
+
+  // Returns true if signal is valid.
+  static bool is_valid_signal(int sig);
+
+  // Helper function, returns a string (e.g. "SIGILL") for a signal.
+  // Returned string is a constant. For unknown signals "UNKNOWN" is returned.
+  static const char* get_signal_name(int sig, char* out, size_t outlen);
+
+  // Returns one-line short description of a signal set in a user provided buffer.
+  static const char* describe_signal_set_short(const sigset_t* set, char* buffer, size_t size);
+
+  // Prints a short one-line description of a signal set.
+  static void print_signal_set_short(outputStream* st, const sigset_t* set);
+
+  // Writes a one-line description of a combination of sigaction.sa_flags
+  // into a user provided buffer. Returns that buffer.
+  static const char* describe_sa_flags(int flags, char* buffer, size_t size);
+
+  // Prints a one-line description of a combination of sigaction.sa_flags.
+  static void print_sa_flags(outputStream* st, int flags);
+
+  // A POSIX conform, platform-independend siginfo print routine.
+  static void print_siginfo_brief(outputStream* os, const siginfo_t* si);
 
 };
 
@@ -57,4 +81,4 @@
   sigjmp_buf _jmpbuf;
 };
 
-#endif
+#endif // OS_POSIX_VM_OS_POSIX_HPP
--- a/src/os/solaris/dtrace/hotspot.d	Tue Mar 11 15:34:06 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hotspot {
-  probe class__loaded(char*, uintptr_t, void*, uintptr_t);
-  probe class__unloaded(char*, uintptr_t, void*, uintptr_t);
-  probe class__initialization__required(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__recursive(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__concurrent(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__erroneous(char*, uintptr_t, void*, intptr_t, int);
-  probe class__initialization__super__failed(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__clinit(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__error(char*, uintptr_t, void*, intptr_t,int);
-  probe class__initialization__end(char*, uintptr_t, void*, intptr_t,int);
-  probe vm__init__begin();
-  probe vm__init__end();
-  probe vm__shutdown();
-  probe vmops__request(char*, uintptr_t, int);
-  probe vmops__begin(char*, uintptr_t, int);
-  probe vmops__end(char*, uintptr_t, int);
-  probe gc__begin(uintptr_t);
-  probe gc__end();
-  probe mem__pool__gc__begin(
-    char*, uintptr_t, char*, uintptr_t, 
-    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe mem__pool__gc__end(
-    char*, uintptr_t, char*, uintptr_t, 
-    uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__start(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__stop(char*, uintptr_t, uintptr_t, uintptr_t, uintptr_t);
-  probe thread__sleep__begin(long long);
-  probe thread__sleep__end(int);
-  probe thread__yield();
-  probe thread__park__begin(uintptr_t, int, long long);
-  probe thread__park__end(uintptr_t);
-  probe thread__unpark(uintptr_t);
-  probe method__compile__begin(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
-  probe method__compile__end(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, 
-    char*, uintptr_t, uintptr_t); 
-  probe compiled__method__load(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t, void*, uintptr_t);
-  probe compiled__method__unload(
-    char*, uintptr_t, char*, uintptr_t, char*, uintptr_t); 
-  probe monitor__contended__enter(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__contended__entered(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__contended__exit(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__wait(uintptr_t, uintptr_t, char*, uintptr_t, uintptr_t);
-  probe monitor__waited(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__notify(uintptr_t, uintptr_t, char*, uintptr_t);
-  probe monitor__notifyAll(uintptr_t, uintptr_t, char*, uintptr_t);
-
-  probe object__alloc(int, char*, uintptr_t, uintptr_t);
-  probe method__entry(
-    int, char*, int, char*, int, char*, int);
-  probe method__return(
-    int, char*, int, char*, int, char*, int);
-};
-
-#pragma D attributes Evolving/Evolving/Common provider hotspot provider
-#pragma D attributes Private/Private/Unknown provider hotspot module
-#pragma D attributes Private/Private/Unknown provider hotspot function
-#pragma D attributes Evolving/Evolving/Common provider hotspot name
-#pragma D attributes Evolving/Evolving/Common provider hotspot args
--- a/src/os/solaris/dtrace/hotspot_jni.d	Tue Mar 11 15:34:06 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,506 +0,0 @@
-/*
- * Copyright (c) 2005, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hotspot_jni {
-  probe AllocObject__entry(void*, void*);
-  probe AllocObject__return(void*);
-  probe AttachCurrentThreadAsDaemon__entry(void*, void**, void*);
-  probe AttachCurrentThreadAsDaemon__return(uint32_t);
-  probe AttachCurrentThread__entry(void*, void**, void*);
-  probe AttachCurrentThread__return(uint32_t);
-  probe CallBooleanMethodA__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethodA__return(uintptr_t);
-  probe CallBooleanMethod__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethod__return(uintptr_t);
-  probe CallBooleanMethodV__entry(void*, void*, uintptr_t);
-  probe CallBooleanMethodV__return(uintptr_t);
-  probe CallByteMethodA__entry(void*, void*, uintptr_t);
-  probe CallByteMethodA__return(char);
-  probe CallByteMethod__entry(void*, void*, uintptr_t);
-  probe CallByteMethod__return(char);
-  probe CallByteMethodV__entry(void*, void*, uintptr_t);
-  probe CallByteMethodV__return(char);
-  probe CallCharMethodA__entry(void*, void*, uintptr_t);
-  probe CallCharMethodA__return(uint16_t);
-  probe CallCharMethod__entry(void*, void*, uintptr_t);
-  probe CallCharMethod__return(uint16_t);
-  probe CallCharMethodV__entry(void*, void*, uintptr_t);
-  probe CallCharMethodV__return(uint16_t);
-  probe CallDoubleMethodA__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethodA__return();
-  probe CallDoubleMethod__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethod__return();
-  probe CallDoubleMethodV__entry(void*, void*, uintptr_t);
-  probe CallDoubleMethodV__return();
-  probe CallFloatMethodA__entry(void*, void*, uintptr_t);
-  probe CallFloatMethodA__return();
-  probe CallFloatMethod__entry(void*, void*, uintptr_t);
-  probe CallFloatMethod__return();
-  probe CallFloatMethodV__entry(void*, void*, uintptr_t);
-  probe CallFloatMethodV__return();
-  probe CallIntMethodA__entry(void*, void*, uintptr_t);
-  probe CallIntMethodA__return(uint32_t);
-  probe CallIntMethod__entry(void*, void*, uintptr_t);
-  probe CallIntMethod__return(uint32_t);
-  probe CallIntMethodV__entry(void*, void*, uintptr_t);
-  probe CallIntMethodV__return(uint32_t);
-  probe CallLongMethodA__entry(void*, void*, uintptr_t);
-  probe CallLongMethodA__return(uintptr_t);
-  probe CallLongMethod__entry(void*, void*, uintptr_t);
-  probe CallLongMethod__return(uintptr_t);
-  probe CallLongMethodV__entry(void*, void*, uintptr_t);
-  probe CallLongMethodV__return(uintptr_t);
-  probe CallNonvirtualBooleanMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethodA__return(uintptr_t);
-  probe CallNonvirtualBooleanMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethod__return(uintptr_t);
-  probe CallNonvirtualBooleanMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualBooleanMethodV__return(uintptr_t);
-  probe CallNonvirtualByteMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethodA__return(char);
-  probe CallNonvirtualByteMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethod__return(char);
-  probe CallNonvirtualByteMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualByteMethodV__return(char);
-  probe CallNonvirtualCharMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethodA__return(uint16_t);
-  probe CallNonvirtualCharMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethod__return(uint16_t);
-  probe CallNonvirtualCharMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualCharMethodV__return(uint16_t);
-  probe CallNonvirtualDoubleMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethodA__return();
-  probe CallNonvirtualDoubleMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethod__return();
-  probe CallNonvirtualDoubleMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualDoubleMethodV__return();
-  probe CallNonvirtualFloatMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethodA__return();
-  probe CallNonvirtualFloatMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethod__return();
-  probe CallNonvirtualFloatMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualFloatMethodV__return();
-  probe CallNonvirtualIntMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethodA__return(uint32_t);
-  probe CallNonvirtualIntMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethod__return(uint32_t);
-  probe CallNonvirtualIntMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualIntMethodV__return(uint32_t);
-  probe CallNonvirtualLongMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethodA__return(uintptr_t);
-  probe CallNonvirtualLongMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethod__return(uintptr_t);
-  probe CallNonvirtualLongMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualLongMethodV__return(uintptr_t);
-  probe CallNonvirtualObjectMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethodA__return(void*);
-  probe CallNonvirtualObjectMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethod__return(void*);
-  probe CallNonvirtualObjectMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualObjectMethodV__return(void*);
-  probe CallNonvirtualShortMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethodA__return(uint16_t);
-  probe CallNonvirtualShortMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethod__return(uint16_t);
-  probe CallNonvirtualShortMethodV__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualShortMethodV__return(uint16_t);
-  probe CallNonvirtualVoidMethodA__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualVoidMethodA__return();
-  probe CallNonvirtualVoidMethod__entry(void*, void*, void*, uintptr_t);
-  probe CallNonvirtualVoidMethod__return();
-  probe CallNonvirtualVoidMethodV__entry(void*, void*, void*, uintptr_t);  
-  probe CallNonvirtualVoidMethodV__return();
-  probe CallObjectMethodA__entry(void*, void*, uintptr_t);
-  probe CallObjectMethodA__return(void*);
-  probe CallObjectMethod__entry(void*, void*, uintptr_t);
-  probe CallObjectMethod__return(void*);
-  probe CallObjectMethodV__entry(void*, void*, uintptr_t);
-  probe CallObjectMethodV__return(void*);
-  probe CallShortMethodA__entry(void*, void*, uintptr_t);
-  probe CallShortMethodA__return(uint16_t);
-  probe CallShortMethod__entry(void*, void*, uintptr_t);
-  probe CallShortMethod__return(uint16_t);
-  probe CallShortMethodV__entry(void*, void*, uintptr_t);
-  probe CallShortMethodV__return(uint16_t);
-  probe CallStaticBooleanMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethodA__return(uintptr_t);
-  probe CallStaticBooleanMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethod__return(uintptr_t);
-  probe CallStaticBooleanMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticBooleanMethodV__return(uintptr_t);
-  probe CallStaticByteMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethodA__return(char);
-  probe CallStaticByteMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethod__return(char);
-  probe CallStaticByteMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticByteMethodV__return(char);
-  probe CallStaticCharMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethodA__return(uint16_t);
-  probe CallStaticCharMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethod__return(uint16_t);
-  probe CallStaticCharMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticCharMethodV__return(uint16_t);
-  probe CallStaticDoubleMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethodA__return();
-  probe CallStaticDoubleMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethod__return();
-  probe CallStaticDoubleMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticDoubleMethodV__return();
-  probe CallStaticFloatMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethodA__return();
-  probe CallStaticFloatMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethod__return();
-  probe CallStaticFloatMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticFloatMethodV__return();
-  probe CallStaticIntMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethodA__return(uint32_t);
-  probe CallStaticIntMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethod__return(uint32_t);
-  probe CallStaticIntMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticIntMethodV__return(uint32_t);
-  probe CallStaticLongMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethodA__return(uintptr_t);
-  probe CallStaticLongMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethod__return(uintptr_t);
-  probe CallStaticLongMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticLongMethodV__return(uintptr_t);
-  probe CallStaticObjectMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethodA__return(void*);
-  probe CallStaticObjectMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethod__return(void*);
-  probe CallStaticObjectMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticObjectMethodV__return(void*);
-  probe CallStaticShortMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethodA__return(uint16_t);
-  probe CallStaticShortMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethod__return(uint16_t);
-  probe CallStaticShortMethodV__entry(void*, void*, uintptr_t);
-  probe CallStaticShortMethodV__return(uint16_t);
-  probe CallStaticVoidMethodA__entry(void*, void*, uintptr_t);
-  probe CallStaticVoidMethodA__return();
-  probe CallStaticVoidMethod__entry(void*, void*, uintptr_t);
-  probe CallStaticVoidMethod__return(); 
-  probe CallStaticVoidMethodV__entry(void*, void*, uintptr_t);  
-  probe CallStaticVoidMethodV__return();
-  probe CallVoidMethodA__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethodA__return();
-  probe CallVoidMethod__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethod__return(); 
-  probe CallVoidMethodV__entry(void*, void*, uintptr_t);  
-  probe CallVoidMethodV__return();
-  probe CreateJavaVM__entry(void**, void**, void*);
-  probe CreateJavaVM__return(uint32_t);
-  probe DefineClass__entry(void*, const char*, void*, char, uintptr_t);
-  probe DefineClass__return(void*);
-  probe DeleteGlobalRef__entry(void*, void*);
-  probe DeleteGlobalRef__return();
-  probe DeleteLocalRef__entry(void*, void*);
-  probe DeleteLocalRef__return();
-  probe DeleteWeakGlobalRef__entry(void*, void*);
-  probe DeleteWeakGlobalRef__return();
-  probe DestroyJavaVM__entry(void*);
-  probe DestroyJavaVM__return(uint32_t);
-  probe DetachCurrentThread__entry(void*);
-  probe DetachCurrentThread__return(uint32_t);
-  probe EnsureLocalCapacity__entry(void*, uint32_t);
-  probe EnsureLocalCapacity__return(uint32_t);
-  probe ExceptionCheck__entry(void*);
-  probe ExceptionCheck__return(uintptr_t);
-  probe ExceptionClear__entry(void*);
-  probe ExceptionClear__return();
-  probe ExceptionDescribe__entry(void*);  
-  probe ExceptionDescribe__return();
-  probe ExceptionOccurred__entry(void*);
-  probe ExceptionOccurred__return(void*);
-  probe FatalError__entry(void* env, const char*);
-  probe FindClass__entry(void*, const char*);
-  probe FindClass__return(void*);
-  probe FromReflectedField__entry(void*, void*);
-  probe FromReflectedField__return(uintptr_t);
-  probe FromReflectedMethod__entry(void*, void*);
-  probe FromReflectedMethod__return(uintptr_t);
-  probe GetArrayLength__entry(void*, void*);
-  probe GetArrayLength__return(uintptr_t);
-  probe GetBooleanArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetBooleanArrayElements__return(uintptr_t*);
-  probe GetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
-  probe GetBooleanArrayRegion__return();
-  probe GetBooleanField__entry(void*, void*, uintptr_t);
-  probe GetBooleanField__return(uintptr_t);
-  probe GetByteArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetByteArrayElements__return(char*);
-  probe GetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
-  probe GetByteArrayRegion__return();
-  probe GetByteField__entry(void*, void*, uintptr_t);
-  probe GetByteField__return(char);
-  probe GetCharArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetCharArrayElements__return(uint16_t*);
-  probe GetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetCharArrayRegion__return();
-  probe GetCharField__entry(void*, void*, uintptr_t);
-  probe GetCharField__return(uint16_t);
-  probe GetCreatedJavaVMs__entry(void**, uintptr_t, uintptr_t*);
-  probe GetCreatedJavaVMs__return(uintptr_t);
-  probe GetDefaultJavaVMInitArgs__entry(void*);
-  probe GetDefaultJavaVMInitArgs__return(uint32_t);
-  probe GetDirectBufferAddress__entry(void*, void*);
-  probe GetDirectBufferAddress__return(void*);
-  probe GetDirectBufferCapacity__entry(void*, void*);
-  probe GetDirectBufferCapacity__return(uintptr_t);
-  probe GetDoubleArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetDoubleArrayElements__return(double*);
-  probe GetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, double*);
-  probe GetDoubleArrayRegion__return();
-  probe GetDoubleField__entry(void*, void*, uintptr_t);
-  probe GetDoubleField__return();
-  probe GetEnv__entry(void*, void*, uint32_t);
-  probe GetEnv__return(uint32_t);
-  probe GetFieldID__entry(void*, void*, const char*, const char*);
-  probe GetFieldID__return(uintptr_t);
-  probe GetFloatArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetFloatArrayElements__return(float*);
-  probe GetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, float*);
-  probe GetFloatArrayRegion__return();
-  probe GetFloatField__entry(void*, void*, uintptr_t);
-  probe GetFloatField__return();
-  probe GetIntArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetIntArrayElements__return(uint32_t*);
-  probe GetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint32_t*);
-  probe GetIntArrayRegion__return();
-  probe GetIntField__entry(void*, void*, uintptr_t);
-  probe GetIntField__return(uint32_t);
-  probe GetJavaVM__entry(void*, void**);
-  probe GetJavaVM__return(uint32_t);
-  probe GetLongArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetLongArrayElements__return(uintptr_t*);
-  probe GetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uintptr_t*);
-  probe GetLongArrayRegion__return();
-  probe GetLongField__entry(void*, void*, uintptr_t);
-  probe GetLongField__return(uintptr_t);
-  probe GetMethodID__entry(void*, void*, const char*, const char*);
-  probe GetMethodID__return(uintptr_t);
-  probe GetObjectArrayElement__entry(void*, void*, uintptr_t);
-  probe GetObjectArrayElement__return(void*);
-  probe GetObjectClass__entry(void*, void*);
-  probe GetObjectClass__return(void*);
-  probe GetObjectField__entry(void*, void*, uintptr_t);
-  probe GetObjectField__return(void*);
-  probe GetObjectRefType__entry(void*, void*);
-  probe GetObjectRefType__return(void*);
-  probe GetPrimitiveArrayCritical__entry(void*, void*, uintptr_t*);
-  probe GetPrimitiveArrayCritical__return(void*);
-  probe GetShortArrayElements__entry(void*, void*, uintptr_t*);
-  probe GetShortArrayElements__return(uint16_t*);
-  probe GetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetShortArrayRegion__return();
-  probe GetShortField__entry(void*, void*, uintptr_t);
-  probe GetShortField__return(uint16_t);
-  probe GetStaticBooleanField__entry(void*, void*, uintptr_t);
-  probe GetStaticBooleanField__return(uintptr_t);
-  probe GetStaticByteField__entry(void*, void*, uintptr_t);
-  probe GetStaticByteField__return(char);
-  probe GetStaticCharField__entry(void*, void*, uintptr_t);
-  probe GetStaticCharField__return(uint16_t);
-  probe GetStaticDoubleField__entry(void*, void*, uintptr_t);
-  probe GetStaticDoubleField__return();
-  probe GetStaticFieldID__entry(void*, void*, const char*, const char*);
-  probe GetStaticFieldID__return(uintptr_t);
-  probe GetStaticFloatField__entry(void*, void*, uintptr_t);
-  probe GetStaticFloatField__return();
-  probe GetStaticIntField__entry(void*, void*, uintptr_t);
-  probe GetStaticIntField__return(uint32_t);
-  probe GetStaticLongField__entry(void*, void*, uintptr_t);
-  probe GetStaticLongField__return(uintptr_t);
-  probe GetStaticMethodID__entry(void*, void*, const char*, const char*);
-  probe GetStaticMethodID__return(uintptr_t);
-  probe GetStaticObjectField__entry(void*, void*, uintptr_t);
-  probe GetStaticObjectField__return(void*);
-  probe GetStaticShortField__entry(void*, void*, uintptr_t);
-  probe GetStaticShortField__return(uint16_t);
-  probe GetStringChars__entry(void*, void*, uintptr_t*);
-  probe GetStringChars__return(const uint16_t*);
-  probe GetStringCritical__entry(void*, void*, uintptr_t*);
-  probe GetStringCritical__return(const uint16_t*);
-  probe GetStringLength__entry(void*, void*);
-  probe GetStringLength__return(uintptr_t);
-  probe GetStringRegion__entry(void*, void*, uintptr_t, uintptr_t, uint16_t*);
-  probe GetStringRegion__return();
-  probe GetStringUTFChars__entry(void*, void*, uintptr_t*);
-  probe GetStringUTFChars__return(const char*);
-  probe GetStringUTFLength__entry(void*, void*);
-  probe GetStringUTFLength__return(uintptr_t);
-  probe GetStringUTFRegion__entry(void*, void*, uintptr_t, uintptr_t, char*);
-  probe GetStringUTFRegion__return();
-  probe GetSuperclass__entry(void*, void*);
-  probe GetSuperclass__return(void*);
-  probe GetVersion__entry(void*);
-  probe GetVersion__return(uint32_t);
-  probe IsAssignableFrom__entry(void*, void*, void*);
-  probe IsAssignableFrom__return(uintptr_t);
-  probe IsInstanceOf__entry(void*, void*, void*);
-  probe IsInstanceOf__return(uintptr_t);
-  probe IsSameObject__entry(void*, void*, void*);
-  probe IsSameObject__return(uintptr_t);
-  probe MonitorEnter__entry(void*, void*);
-  probe MonitorEnter__return(uint32_t);
-  probe MonitorExit__entry(void*, void*);
-  probe MonitorExit__return(uint32_t);
-  probe NewBooleanArray__entry(void*, uintptr_t);
-  probe NewBooleanArray__return(void*);
-  probe NewByteArray__entry(void*, uintptr_t);
-  probe NewByteArray__return(void*);
-  probe NewCharArray__entry(void*, uintptr_t);
-  probe NewCharArray__return(void*);
-  probe NewDirectByteBuffer__entry(void*, void*, uintptr_t);
-  probe NewDirectByteBuffer__return(void*);
-  probe NewDoubleArray__entry(void*, uintptr_t);
-  probe NewDoubleArray__return(void*);
-  probe NewFloatArray__entry(void*, uintptr_t);
-  probe NewFloatArray__return(void*);
-  probe NewGlobalRef__entry(void*, void*);
-  probe NewGlobalRef__return(void*);
-  probe NewIntArray__entry(void*, uintptr_t);
-  probe NewIntArray__return(void*);
-  probe NewLocalRef__entry(void*, void*);
-  probe NewLocalRef__return(void*);
-  probe NewLongArray__entry(void*, uintptr_t);
-  probe NewLongArray__return(void*);
-  probe NewObjectA__entry(void*, void*, uintptr_t);  
-  probe NewObjectA__return(void*);
-  probe NewObjectArray__entry(void*, uintptr_t, void*, void*);
-  probe NewObjectArray__return(void*);
-  probe NewObject__entry(void*, void*, uintptr_t); 
-  probe NewObject__return(void*);
-  probe NewObjectV__entry(void*, void*, uintptr_t);  
-  probe NewObjectV__return(void*);
-  probe NewShortArray__entry(void*, uintptr_t);
-  probe NewShortArray__return(void*);
-  probe NewString__entry(void*, const uint16_t*, uintptr_t);
-  probe NewString__return(void*);
-  probe NewStringUTF__entry(void*, const char*);
-  probe NewStringUTF__return(void*);
-  probe NewWeakGlobalRef__entry(void*, void*);
-  probe NewWeakGlobalRef__return(void*);
-  probe PopLocalFrame__entry(void*, void*);
-  probe PopLocalFrame__return(void*);
-  probe PushLocalFrame__entry(void*, uint32_t);
-  probe PushLocalFrame__return(uint32_t);
-  probe RegisterNatives__entry(void*, void*, const void*, uint32_t);  
-  probe RegisterNatives__return(uint32_t);
-  probe ReleaseBooleanArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
-  probe ReleaseBooleanArrayElements__return();
-  probe ReleaseByteArrayElements__entry(void*, void*, char*, uint32_t);
-  probe ReleaseByteArrayElements__return();
-  probe ReleaseCharArrayElements__entry(void*, void*, uint16_t*, uint32_t);
-  probe ReleaseCharArrayElements__return();
-  probe ReleaseDoubleArrayElements__entry(void*, void*, double*, uint32_t);
-  probe ReleaseDoubleArrayElements__return();
-  probe ReleaseFloatArrayElements__entry(void*, void*, float*, uint32_t);
-  probe ReleaseFloatArrayElements__return();
-  probe ReleaseIntArrayElements__entry(void*, void*, uint32_t*, uint32_t);
-  probe ReleaseIntArrayElements__return();
-  probe ReleaseLongArrayElements__entry(void*, void*, uintptr_t*, uint32_t);
-  probe ReleaseLongArrayElements__return();
-  probe ReleasePrimitiveArrayCritical__entry(void*, void*, void*, uint32_t);
-  probe ReleasePrimitiveArrayCritical__return();
-  probe ReleaseShortArrayElements__entry(void*, void*, uint16_t*, uint32_t);
-  probe ReleaseShortArrayElements__return();
-  probe ReleaseStringChars__entry(void*, void*, const uint16_t*);
-  probe ReleaseStringChars__return();
-  probe ReleaseStringCritical__entry(void*, void*, const uint16_t*);
-  probe ReleaseStringCritical__return();
-  probe ReleaseStringUTFChars__entry(void*, void*, const char*);
-  probe ReleaseStringUTFChars__return();
-  probe SetBooleanArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
-  probe SetBooleanArrayRegion__return();
-  probe SetBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetBooleanField__return();
-  probe SetByteArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const char*);
-  probe SetByteArrayRegion__return();
-  probe SetByteField__entry(void*, void*, uintptr_t, char);
-  probe SetByteField__return();
-  probe SetCharArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
-  probe SetCharArrayRegion__return();
-  probe SetCharField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetCharField__return();
-  probe SetDoubleArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const double*);
-  probe SetDoubleArrayRegion__return();
-  probe SetDoubleField__entry(void*, void*, uintptr_t);
-  probe SetDoubleField__return();
-  probe SetFloatArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const float*);
-  probe SetFloatArrayRegion__return();
-  probe SetFloatField__entry(void*, void*, uintptr_t);
-  probe SetFloatField__return();
-  probe SetIntArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint32_t*);
-  probe SetIntArrayRegion__return();
-  probe SetIntField__entry(void*, void*, uintptr_t, uint32_t);
-  probe SetIntField__return();
-  probe SetLongArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uintptr_t*);
-  probe SetLongArrayRegion__return();
-  probe SetLongField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetLongField__return();
-  probe SetObjectArrayElement__entry(void*, void*, uintptr_t, void*);
-  probe SetObjectArrayElement__return();
-  probe SetObjectField__entry(void*, void*, uintptr_t, void*);
-  probe SetObjectField__return();
-  probe SetShortArrayRegion__entry(void*, void*, uintptr_t, uintptr_t, const uint16_t*);
-  probe SetShortArrayRegion__return();
-  probe SetShortField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetShortField__return();
-  probe SetStaticBooleanField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetStaticBooleanField__return();
-  probe SetStaticByteField__entry(void*, void*, uintptr_t, char);
-  probe SetStaticByteField__return();
-  probe SetStaticCharField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetStaticCharField__return();
-  probe SetStaticDoubleField__entry(void*, void*, uintptr_t);
-  probe SetStaticDoubleField__return();
-  probe SetStaticFloatField__entry(void*, void*, uintptr_t);
-  probe SetStaticFloatField__return();
-  probe SetStaticIntField__entry(void*, void*, uintptr_t, uint32_t);
-  probe SetStaticIntField__return();
-  probe SetStaticLongField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe SetStaticLongField__return();
-  probe SetStaticObjectField__entry(void*, void*, uintptr_t, void*);
-  probe SetStaticObjectField__return();
-  probe SetStaticShortField__entry(void*, void*, uintptr_t, uint16_t);
-  probe SetStaticShortField__return();
-  probe Throw__entry(void*, void*);
-  probe Throw__return(intptr_t);
-  probe ThrowNew__entry(void*, void*, const char*);  
-  probe ThrowNew__return(intptr_t);  
-  probe ToReflectedField__entry(void*, void*, uintptr_t, uintptr_t);
-  probe ToReflectedField__return(void*);
-  probe ToReflectedMethod__entry(void*, void*, uintptr_t, uintptr_t);
-  probe ToReflectedMethod__return(void*);
-  probe UnregisterNatives__entry(void*, void*);  
-  probe UnregisterNatives__return(uint32_t);
-};
-
-#pragma D attributes Standard/Standard/Common provider hotspot_jni provider
-#pragma D attributes Private/Private/Unknown provider hotspot_jni module
-#pragma D attributes Private/Private/Unknown provider hotspot_jni function
-#pragma D attributes Standard/Standard/Common provider hotspot_jni name
-#pragma D attributes Evolving/Evolving/Common provider hotspot_jni args
-
--- a/src/os/solaris/dtrace/hs_private.d	Tue Mar 11 15:34:06 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *  
- */
-
-provider hs_private {
-  probe safepoint__begin();
-  probe safepoint__end();
-  probe cms__initmark__begin();
-  probe cms__initmark__end();
-  probe cms__remark__begin();
-  probe cms__remark__end();
-};
-
-#pragma D attributes Private/Private/Common provider hs_private provider
-#pragma D attributes Private/Private/Unknown provider hs_private module
-#pragma D attributes Private/Private/Unknown provider hs_private function
-#pragma D attributes Private/Private/Common provider hs_private name
-#pragma D attributes Private/Private/Common provider hs_private args
-
--- a/src/os/solaris/dtrace/jvm_dtrace.c	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/solaris/dtrace/jvm_dtrace.c	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os/solaris/vm/globals_solaris.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/solaris/vm/globals_solaris.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os/solaris/vm/os_solaris.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/solaris/vm/os_solaris.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -332,12 +332,6 @@
   ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
 }
 
-// Version of setup_interruptible() for threads that are already in
-// _thread_blocked. Used by os_sleep().
-void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
-  thread->frame_anchor()->make_walkable(thread);
-}
-
 JavaThread* os::Solaris::setup_interruptible() {
   JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
   setup_interruptible(thread);
@@ -2146,6 +2140,10 @@
   return dlsym(handle, name);
 }
 
+void* os::get_default_process_handle() {
+  return (void*)::dlopen(NULL, RTLD_LAZY);
+}
+
 int os::stat(const char *path, struct stat *sbuf) {
   char pathbuf[MAX_PATH];
   if (strlen(path) > MAX_PATH - 1) {
@@ -2228,8 +2226,8 @@
         st->cr();
         status = true;
       }
-      ::close(fd);
     }
+    ::close(fd);
   }
   return status;
 }
@@ -2247,58 +2245,12 @@
   (void) check_addr0(st);
 }
 
-// Taken from /usr/include/sys/machsig.h  Supposed to be architecture specific
-// but they're the same for all the solaris architectures that we support.
-const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
-                          "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
-                          "ILL_COPROC", "ILL_BADSTK" };
-
-const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV",
-                          "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES",
-                          "FPE_FLTINV", "FPE_FLTSUB" };
-
-const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" };
-
-const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" };
-
 void os::print_siginfo(outputStream* st, void* siginfo) {
-  st->print("siginfo:");
-
-  const int buflen = 100;
-  char buf[buflen];
-  siginfo_t *si = (siginfo_t*)siginfo;
-  st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen));
-  char *err = strerror(si->si_errno);
-  if (si->si_errno != 0 && err != NULL) {
-    st->print("si_errno=%s", err);
-  } else {
-    st->print("si_errno=%d", si->si_errno);
-  }
-  const int c = si->si_code;
-  assert(c > 0, "unexpected si_code");
-  switch (si->si_signo) {
-  case SIGILL:
-    st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  case SIGFPE:
-    st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  case SIGSEGV:
-    st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  case SIGBUS:
-    st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]);
-    st->print(", si_addr=" PTR_FORMAT, si->si_addr);
-    break;
-  default:
-    st->print(", si_code=%d", si->si_code);
-    // no si_addr
-  }
-
-  if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
+  const siginfo_t* si = (const siginfo_t*)siginfo;
+
+  os::Posix::print_siginfo_brief(st, si);
+
+  if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
       UseSharedSpaces) {
     FileMapInfo* mapinfo = FileMapInfo::current_info();
     if (mapinfo->is_in_shared_space(si->si_addr)) {
@@ -2368,7 +2320,8 @@
     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
   }
 
-  st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask);
+  st->print(", sa_mask[0]=");
+  os::Posix::print_signal_set_short(st, &sa.sa_mask);
 
   address rh = VMError::get_resetted_sighandler(sig);
   // May be, handler was resetted by VMError?
@@ -2377,7 +2330,8 @@
     sa.sa_flags = VMError::get_resetted_sigflags(sig);
   }
 
-  st->print(", sa_flags="   PTR32_FORMAT, sa.sa_flags);
+  st->print(", sa_flags=");
+  os::Posix::print_sa_flags(st, sa.sa_flags);
 
   // Check: is it our handler?
   if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
@@ -2437,13 +2391,14 @@
     return;
   }
 
-  if (Arguments::created_by_gamma_launcher()) {
-    // Support for the gamma launcher.  Typical value for buf is
-    // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".  If "/jre/lib/" appears at
-    // the right place in the string, then assume we are installed in a JDK and
-    // we're done.  Otherwise, check for a JAVA_HOME environment variable and fix
-    // up the path so it looks like libjvm.so is installed there (append a
-    // fake suffix hotspot/libjvm.so).
+  if (Arguments::sun_java_launcher_is_altjvm()) {
+    // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
+    // value for buf is "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so".
+    // If "/jre/lib/" appears at the right place in the string, then
+    // assume we are installed in a JDK and we're done.  Otherwise, check
+    // for a JAVA_HOME environment variable and fix up the path so it
+    // looks like libjvm.so is installed there (append a fake suffix
+    // hotspot/libjvm.so).
     const char *p = buf + strlen(buf) - 1;
     for (int count = 0; p > buf && count < 5; ++count) {
       for (--p; p > buf && *p != '/'; --p)
@@ -3007,7 +2962,7 @@
 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
   const size_t types = sizeof(info_types) / sizeof(info_types[0]);
-  uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT];
+  uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
   uint_t validity[MAX_MEMINFO_CNT];
 
   size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
@@ -3046,7 +3001,7 @@
       }
     }
 
-    if (i != addrs_count) {
+    if (i < addrs_count) {
       if ((validity[i] & 2) != 0) {
         page_found->lgrp_id = outdata[types * i];
       } else {
@@ -3409,61 +3364,6 @@
   return true;
 }
 
-static int os_sleep(jlong millis, bool interruptible) {
-  const jlong limit = INT_MAX;
-  jlong prevtime;
-  int res;
-
-  while (millis > limit) {
-    if ((res = os_sleep(limit, interruptible)) != OS_OK)
-      return res;
-    millis -= limit;
-  }
-
-  // Restart interrupted polls with new parameters until the proper delay
-  // has been completed.
-
-  prevtime = getTimeMillis();
-
-  while (millis > 0) {
-    jlong newtime;
-
-    if (!interruptible) {
-      // Following assert fails for os::yield_all:
-      // assert(!thread->is_Java_thread(), "must not be java thread");
-      res = poll(NULL, 0, millis);
-    } else {
-      JavaThread *jt = JavaThread::current();
-
-      INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
-        os::Solaris::clear_interrupted);
-    }
-
-    // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
-    // thread.Interrupt.
-
-    // See c/r 6751923. Poll can return 0 before time
-    // has elapsed if time is set via clock_settime (as NTP does).
-    // res == 0 if poll timed out (see man poll RETURN VALUES)
-    // using the logic below checks that we really did
-    // sleep at least "millis" if not we'll sleep again.
-    if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
-      newtime = getTimeMillis();
-      assert(newtime >= prevtime, "time moving backwards");
-    /* Doing prevtime and newtime in microseconds doesn't help precision,
-       and trying to round up to avoid lost milliseconds can result in a
-       too-short delay. */
-      millis -= newtime - prevtime;
-      if(millis <= 0)
-        return OS_OK;
-      prevtime = newtime;
-    } else
-      return res;
-  }
-
-  return OS_OK;
-}
-
 // Read calls from inside the vm need to perform state transitions
 size_t os::read(int fd, void *buf, unsigned int nBytes) {
   INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
@@ -3473,72 +3373,14 @@
   INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
 }
 
-int os::sleep(Thread* thread, jlong millis, bool interruptible) {
-  assert(thread == Thread::current(),  "thread consistency check");
-
-  // TODO-FIXME: this should be removed.
-  // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
-  // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
-  // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
-  // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
-  // is fooled into believing that the system is making progress. In the code below we block the
-  // the watcher thread while safepoint is in progress so that it would not appear as though the
-  // system is making progress.
-  if (!Solaris::T2_libthread() &&
-      thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
-    // We now try to acquire the threads lock. Since this lock is held by the VM thread during
-    // the entire safepoint, the watcher thread will  line up here during the safepoint.
-    Threads_lock->lock_without_safepoint_check();
-    Threads_lock->unlock();
-  }
-
-  if (thread->is_Java_thread()) {
-    // This is a JavaThread so we honor the _thread_blocked protocol
-    // even for sleeps of 0 milliseconds. This was originally done
-    // as a workaround for bug 4338139. However, now we also do it
-    // to honor the suspend-equivalent protocol.
-
-    JavaThread *jt = (JavaThread *) thread;
-    ThreadBlockInVM tbivm(jt);
-
-    jt->set_suspend_equivalent();
-    // cleared by handle_special_suspend_equivalent_condition() or
-    // java_suspend_self() via check_and_wait_while_suspended()
-
-    int ret_code;
-    if (millis <= 0) {
-      thr_yield();
-      ret_code = 0;
-    } else {
-      // The original sleep() implementation did not create an
-      // OSThreadWaitState helper for sleeps of 0 milliseconds.
-      // I'm preserving that decision for now.
-      OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
-
-      ret_code = os_sleep(millis, interruptible);
-    }
-
-    // were we externally suspended while we were waiting?
-    jt->check_and_wait_while_suspended();
-
-    return ret_code;
-  }
-
-  // non-JavaThread from this point on:
-
-  if (millis <= 0) {
-    thr_yield();
-    return 0;
-  }
-
-  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
-
-  return os_sleep(millis, interruptible);
-}
-
-int os::naked_sleep() {
-  // %% make the sleep time an integer flag. for now use 1 millisec.
-  return os_sleep(1, false);
+void os::naked_short_sleep(jlong ms) {
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+
+  // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
+  // Solaris requires -lrt for this.
+  usleep((ms * 1000));
+
+  return;
 }
 
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
@@ -4173,68 +4015,6 @@
   errno = old_errno;
 }
 
-
-void os::interrupt(Thread* thread) {
-  assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
-
-  OSThread* osthread = thread->osthread();
-
-  int isInterrupted = osthread->interrupted();
-  if (!isInterrupted) {
-      osthread->set_interrupted(true);
-      OrderAccess::fence();
-      // os::sleep() is implemented with either poll (NULL,0,timeout) or
-      // by parking on _SleepEvent.  If the former, thr_kill will unwedge
-      // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
-      ParkEvent * const slp = thread->_SleepEvent ;
-      if (slp != NULL) slp->unpark() ;
-  }
-
-  // For JSR166:  unpark after setting status but before thr_kill -dl
-  if (thread->is_Java_thread()) {
-    ((JavaThread*)thread)->parker()->unpark();
-  }
-
-  // Handle interruptible wait() ...
-  ParkEvent * const ev = thread->_ParkEvent ;
-  if (ev != NULL) ev->unpark() ;
-
-  // When events are used everywhere for os::sleep, then this thr_kill
-  // will only be needed if UseVMInterruptibleIO is true.
-
-  if (!isInterrupted) {
-    int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
-    assert_status(status == 0, status, "thr_kill");
-
-    // Bump thread interruption counter
-    RuntimeService::record_thread_interrupt_signaled_count();
-  }
-}
-
-
-bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
-  assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
-
-  OSThread* osthread = thread->osthread();
-
-  bool res = osthread->interrupted();
-
-  // NOTE that since there is no "lock" around these two operations,
-  // there is the possibility that the interrupted flag will be
-  // "false" but that the interrupt event will be set. This is
-  // intentional. The effect of this is that Object.wait() will appear
-  // to have a spurious wakeup, which is not harmful, and the
-  // possibility is so rare that it is not worth the added complexity
-  // to add yet another lock. It has also been recommended not to put
-  // the interrupted flag into the os::Solaris::Event structure,
-  // because it hides the issue.
-  if (res && clear_interrupted) {
-    osthread->set_interrupted(false);
-  }
-  return res;
-}
-
-
 void os::print_statistics() {
 }
 
--- a/src/os/solaris/vm/os_solaris.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/solaris/vm/os_solaris.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -27,6 +27,9 @@
 
 // Solaris_OS defines the interface to Solaris operating systems
 
+// Information about the protection of the page at address '0' on this os.
+static bool zero_page_read_protected() { return true; }
+
 class Solaris {
   friend class os;
 
--- a/src/os/solaris/vm/os_solaris.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/solaris/vm/os_solaris.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -260,4 +260,10 @@
                             const char *optval, socklen_t optlen) {
   return ::setsockopt(fd, level, optname, optval, optlen);
 }
+
+inline bool os::supports_monotonic_clock() {
+  // javaTimeNanos() is monotonic on Solaris, see getTimeNanos() comments
+  return true;
+}
+
 #endif // OS_SOLARIS_VM_OS_SOLARIS_INLINE_HPP
--- a/src/os/solaris/vm/perfMemory_solaris.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -431,10 +431,12 @@
 
       RESTARTABLE(::read(fd, addr, remaining), result);
       if (result == OS_ERR) {
+        ::close(fd);
         THROW_MSG_0(vmSymbols::java_io_IOException(), "Read error");
+      } else {
+        remaining-=result;
+        addr+=result;
       }
-      remaining-=result;
-      addr+=result;
     }
 
     ::close(fd);
@@ -906,8 +908,16 @@
   FREE_C_HEAP_ARRAY(char, filename, mtInternal);
 
   // open the shared memory file for the give vmid
-  fd = open_sharedmem_file(rfilename, file_flags, CHECK);
-  assert(fd != OS_ERR, "unexpected value");
+  fd = open_sharedmem_file(rfilename, file_flags, THREAD);
+
+  if (fd == OS_ERR) {
+    return;
+  }
+
+  if (HAS_PENDING_EXCEPTION) {
+    ::close(fd);
+    return;
+  }
 
   if (*sizep == 0) {
     size = sharedmem_filesize(fd, CHECK);
--- a/src/os/windows/vm/decoder_windows.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/windows/vm/decoder_windows.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os/windows/vm/os_windows.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/windows/vm/os_windows.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -166,12 +166,10 @@
   return;
 }
 
-#ifndef _WIN64
 // previous UnhandledExceptionFilter, if there is one
 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 
 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
-#endif
 void os::init_system_properties_values() {
   /* sysclasspath, java_home, dll_dir */
   {
@@ -630,8 +628,6 @@
   delete osthread;
 }
 
-
-static int    has_performance_count = 0;
 static jlong first_filetime;
 static jlong initial_performance_count;
 static jlong performance_frequency;
@@ -647,7 +643,7 @@
 
 jlong os::elapsed_counter() {
   LARGE_INTEGER count;
-  if (has_performance_count) {
+  if (win32::_has_performance_count) {
     QueryPerformanceCounter(&count);
     return as_long(count) - initial_performance_count;
   } else {
@@ -659,7 +655,7 @@
 
 
 jlong os::elapsed_frequency() {
-  if (has_performance_count) {
+  if (win32::_has_performance_count) {
     return performance_frequency;
   } else {
    // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601.
@@ -738,15 +734,15 @@
   return false;
 }
 
-static void initialize_performance_counter() {
+void os::win32::initialize_performance_counter() {
   LARGE_INTEGER count;
   if (QueryPerformanceFrequency(&count)) {
-    has_performance_count = 1;
+    win32::_has_performance_count = 1;
     performance_frequency = as_long(count);
     QueryPerformanceCounter(&count);
     initial_performance_count = as_long(count);
   } else {
-    has_performance_count = 0;
+    win32::_has_performance_count = 0;
     FILETIME wt;
     GetSystemTimeAsFileTime(&wt);
     first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
@@ -841,7 +837,7 @@
 }
 
 jlong os::javaTimeNanos() {
-  if (!has_performance_count) {
+  if (!win32::_has_performance_count) {
     return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do.
   } else {
     LARGE_INTEGER current_count;
@@ -854,7 +850,7 @@
 }
 
 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
-  if (!has_performance_count) {
+  if (!win32::_has_performance_count) {
     // javaTimeMillis() doesn't have much percision,
     // but it is not going to wrap -- so all 64 bits
     info_ptr->max_value = ALL_64_BITS;
@@ -1812,32 +1808,30 @@
   }
 
   buf[0] = '\0';
-  if (Arguments::created_by_gamma_launcher()) {
-     // Support for the gamma launcher. Check for an
-     // JAVA_HOME environment variable
-     // and fix up the path so it looks like
-     // libjvm.so is installed there (append a fake suffix
-     // hotspot/libjvm.so).
-     char* java_home_var = ::getenv("JAVA_HOME");
-     if (java_home_var != NULL && java_home_var[0] != 0) {
-
-        strncpy(buf, java_home_var, buflen);
-
-        // determine if this is a legacy image or modules image
-        // modules image doesn't have "jre" subdirectory
-        size_t len = strlen(buf);
-        char* jrebin_p = buf + len;
-        jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
-        if (0 != _access(buf, 0)) {
-          jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
-        }
-        len = strlen(buf);
-        jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
-     }
-  }
-
-  if(buf[0] == '\0') {
-  GetModuleFileName(vm_lib_handle, buf, buflen);
+  if (Arguments::sun_java_launcher_is_altjvm()) {
+    // Support for the java launcher's '-XXaltjvm=<path>' option. Check
+    // for a JAVA_HOME environment variable and fix up the path so it
+    // looks like jvm.dll is installed there (append a fake suffix
+    // hotspot/jvm.dll).
+    char* java_home_var = ::getenv("JAVA_HOME");
+    if (java_home_var != NULL && java_home_var[0] != 0) {
+      strncpy(buf, java_home_var, buflen);
+
+      // determine if this is a legacy image or modules image
+      // modules image doesn't have "jre" subdirectory
+      size_t len = strlen(buf);
+      char* jrebin_p = buf + len;
+      jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
+      if (0 != _access(buf, 0)) {
+        jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
+      }
+      len = strlen(buf);
+      jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
+    }
+  }
+
+  if (buf[0] == '\0') {
+    GetModuleFileName(vm_lib_handle, buf, buflen);
   }
   strcpy(saved_jvm_path, buf);
 }
@@ -2253,11 +2247,11 @@
   return EXCEPTION_CONTINUE_EXECUTION;
 }
 
-#ifndef  _WIN64
 //-----------------------------------------------------------------------------
 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
+  PCONTEXT ctx = exceptionInfo->ContextRecord;
+#ifndef  _WIN64
   // handle exception caused by native method modifying control word
-  PCONTEXT ctx = exceptionInfo->ContextRecord;
   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
 
   switch (exception_code) {
@@ -2283,17 +2277,11 @@
     // UnhandledExceptionFilter.
     return (prev_uef_handler)(exceptionInfo);
   }
-
-  return EXCEPTION_CONTINUE_SEARCH;
-}
-#else //_WIN64
+#else // !_WIN64
 /*
   On Windows, the mxcsr control bits are non-volatile across calls
   See also CR 6192333
-  If EXCEPTION_FLT_* happened after some native method modified
-  mxcsr - it is not a jvm fault.
-  However should we decide to restore of mxcsr after a faulty
-  native method we can uncomment following code
+  */
       jint MxCsr = INITIAL_MXCSR;
         // we can't use StubRoutines::addr_mxcsr_std()
         // because in Win64 mxcsr is not saved there
@@ -2301,10 +2289,10 @@
         ctx->MxCsr = MxCsr;
         return EXCEPTION_CONTINUE_EXECUTION;
       }
-
-*/
-#endif //_WIN64
-
+#endif // !_WIN64
+
+  return EXCEPTION_CONTINUE_SEARCH;
+}
 
 // Fatal error reporting is single threaded so we can make this a
 // static and preallocated.  If it's more than MAX_PATH silently ignore
@@ -2653,7 +2641,6 @@
 
       } // switch
     }
-#ifndef _WIN64
     if (((thread->thread_state() == _thread_in_Java) ||
         (thread->thread_state() == _thread_in_native)) &&
         exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION)
@@ -2661,7 +2648,6 @@
       LONG result=Handle_FLT_Exception(exceptionInfo);
       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
     }
-#endif //_WIN64
   }
 
   if (exception_code != EXCEPTION_BREAKPOINT) {
@@ -3509,6 +3495,16 @@
   return result;
 }
 
+//
+// Short sleep, direct OS call.
+//
+// ms = 0, means allow others (if any) to run.
+//
+void os::naked_short_sleep(jlong ms) {
+  assert(ms < 1000, "Un-interruptable sleep, short time use only");
+  Sleep(ms);
+}
+
 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
 void os::infinite_sleep() {
   while (true) {    // sleep forever ...
@@ -3697,6 +3693,8 @@
 bool   os::win32::_is_windows_2003    = false;
 bool   os::win32::_is_windows_server  = false;
 
+bool   os::win32::_has_performance_count = 0;
+
 void os::win32::initialize_system_info() {
   SYSTEM_INFO si;
   GetSystemInfo(&si);
--- a/src/os/windows/vm/os_windows.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/windows/vm/os_windows.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,9 @@
 #define OS_WINDOWS_VM_OS_WINDOWS_HPP
 // Win32_OS defines the interface to windows operating systems
 
+// Information about the protection of the page at address '0' on this os.
+static bool zero_page_read_protected() { return true; }
+
 class win32 {
   friend class os;
 
@@ -39,6 +42,7 @@
   static bool   _is_nt;
   static bool   _is_windows_2003;
   static bool   _is_windows_server;
+  static bool   _has_performance_count;
 
   static void print_windows_version(outputStream* st);
 
@@ -60,6 +64,9 @@
   // load dll from Windows system directory or Windows directory
   static HINSTANCE load_Windows_dll(const char* name, char *ebuf, int ebuflen);
 
+  private:
+    static void initialize_performance_counter();
+
  public:
   // Generic interface:
 
--- a/src/os/windows/vm/os_windows.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os/windows/vm/os_windows.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -107,6 +107,10 @@
   return ::close(fd);
 }
 
+inline bool os::supports_monotonic_clock() {
+  return win32::_has_performance_count;
+}
+
 #ifndef PRODUCT
   #define CALL_TEST_FUNC_WITH_WRAPPER_IF_NEEDED(f) \
             os::win32::call_test_func_with_wrapper(f)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
+
+#include "orderAccess_aix_ppc.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/os.hpp"
+#include "vm_version_ppc.hpp"
+
+#ifndef _LP64
+#error "Atomic currently only impleneted for PPC64"
+#endif
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+//
+//   machine barrier instructions:
+//
+//   - ppc_sync            two-way memory barrier, aka fence
+//   - ppc_lwsync          orders  Store|Store,
+//                                  Load|Store,
+//                                  Load|Load,
+//                         but not Store|Load
+//   - ppc_eieio           orders memory accesses for device memory (only)
+//   - ppc_isync           invalidates speculatively executed instructions
+//                         From the POWER ISA 2.06 documentation:
+//                          "[...] an isync instruction prevents the execution of
+//                         instructions following the isync until instructions
+//                         preceding the isync have completed, [...]"
+//                         From IBM's AIX assembler reference:
+//                          "The isync [...] instructions causes the processor to
+//                         refetch any instructions that might have been fetched
+//                         prior to the isync instruction. The instruction isync
+//                         causes the processor to wait for all previous instructions
+//                         to complete. Then any instructions already fetched are
+//                         discarded and instruction processing continues in the
+//                         environment established by the previous instructions."
+//
+//   semantic barrier instructions:
+//   (as defined in orderAccess.hpp)
+//
+//   - ppc_release         orders Store|Store,       (maps to ppc_lwsync)
+//                                 Load|Store
+//   - ppc_acquire         orders  Load|Store,       (maps to ppc_lwsync)
+//                                 Load|Load
+//   - ppc_fence           orders Store|Store,       (maps to ppc_sync)
+//                                 Load|Store,
+//                                 Load|Load,
+//                                Store|Load
+//
+
+#define strasm_sync                       "\n  sync    \n"
+#define strasm_lwsync                     "\n  lwsync  \n"
+#define strasm_isync                      "\n  isync   \n"
+#define strasm_release                    strasm_lwsync
+#define strasm_acquire                    strasm_lwsync
+#define strasm_fence                      strasm_sync
+#define strasm_nobarrier                  ""
+#define strasm_nobarrier_clobber_memory   ""
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+
+  unsigned int result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: lwarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (jint) result;
+}
+
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+
+  long result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: ldarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (intptr_t) result;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::inc    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::dec    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jint) old_value;
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (intptr_t) old_value;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
+  // (see atomic.hpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* fence */
+    strasm_sync
+    /* simple guard */
+    "   lwz     %[old_value], 0(%[dest])                \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* acquire */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jint) old_value;
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
+  // (see atomic.hpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* fence */
+    strasm_sync
+    /* simple guard */
+    "   ld      %[old_value], 0(%[dest])                \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* acquire */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jlong) old_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+}
+
+#undef strasm_sync
+#undef strasm_lwsync
+#undef strasm_isync
+#undef strasm_release
+#undef strasm_acquire
+#undef strasm_fence
+#undef strasm_nobarrier
+#undef strasm_nobarrier_clobber_memory
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_ATOMIC_AIX_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/globals_aix_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_GLOBALS_AIX_PPC_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_GLOBALS_AIX_PPC_HPP
+
+// Sets the default values for platform dependent flags used by the runtime system.
+// (see globals.hpp)
+
+define_pd_global(bool, DontYieldALot,            false);
+define_pd_global(intx, ThreadStackSize,          2048); // 0 => use system default
+define_pd_global(intx, VMThreadStackSize,        2048);
+
+// if we set CompilerThreadStackSize to a value different than 0, it will
+// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(),
+// the stack size for compiler threads will default to VMThreadStackSize, although it
+// is defined to 4M in os::Aix::default_stack_size()!
+define_pd_global(intx, CompilerThreadStackSize,  4096);
+
+// Allow extra space in DEBUG builds for asserts.
+define_pd_global(uintx,JVMInvokeMethodSlack,     8192);
+
+define_pd_global(intx, StackYellowPages,         6);
+define_pd_global(intx, StackRedPages,            1);
+define_pd_global(intx, StackShadowPages,         6 DEBUG_ONLY(+2));
+
+// Only used on 64 bit platforms
+define_pd_global(uintx,HeapBaseMinAddress,       2*G);
+// Only used on 64 bit Windows platforms
+define_pd_global(bool, UseVectoredExceptions,    false);
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_GLOBALS_AIX_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/orderAccess_aix_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
+
+#include "runtime/orderAccess.hpp"
+#include "vm_version_ppc.hpp"
+
+// Implementation of class OrderAccess.
+
+//
+// Machine barrier instructions:
+//
+// - sync            Two-way memory barrier, aka fence.
+// - lwsync          orders  Store|Store,
+//                            Load|Store,
+//                            Load|Load,
+//                   but not Store|Load
+// - eieio           orders  Store|Store
+// - isync           Invalidates speculatively executed instructions,
+//                   but isync may complete before storage accesses
+//                   associated with instructions preceding isync have
+//                   been performed.
+//
+// Semantic barrier instructions:
+// (as defined in orderAccess.hpp)
+//
+// - release         orders Store|Store,       (maps to lwsync)
+//                           Load|Store
+// - acquire         orders  Load|Store,       (maps to lwsync)
+//                           Load|Load
+// - fence           orders Store|Store,       (maps to sync)
+//                           Load|Store,
+//                           Load|Load,
+//                          Store|Load
+//
+
+#define inlasm_sync()     __asm__ __volatile__ ("sync"   : : : "memory");
+#define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
+#define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
+#define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
+#define inlasm_release()  inlasm_lwsync();
+#define inlasm_acquire()  inlasm_lwsync();
+// Use twi-isync for load_acquire (faster than lwsync).
+// ATTENTION: seems like xlC 10.1 has problems with this inline assembler macro (VerifyMethodHandles found "bad vminfo in AMH.conv"):
+// #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
+#define inlasm_acquire_reg(X) inlasm_lwsync();
+#define inlasm_fence()    inlasm_sync();
+
+inline void     OrderAccess::loadload()   { inlasm_lwsync();  }
+inline void     OrderAccess::storestore() { inlasm_lwsync();  }
+inline void     OrderAccess::loadstore()  { inlasm_lwsync();  }
+inline void     OrderAccess::storeload()  { inlasm_fence();   }
+
+inline void     OrderAccess::acquire()    { inlasm_acquire(); }
+inline void     OrderAccess::release()    { inlasm_release(); }
+inline void     OrderAccess::fence()      { inlasm_fence();   }
+
+inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { register jbyte t = *p;   inlasm_acquire_reg(t); return t; }
+inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { register jshort t = *p;  inlasm_acquire_reg(t); return t; }
+inline jint     OrderAccess::load_acquire(volatile jint*    p) { register jint t = *p;    inlasm_acquire_reg(t); return t; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { register jlong t = *p;   inlasm_acquire_reg(t); return t; }
+inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { register jubyte t = *p;  inlasm_acquire_reg(t); return t; }
+inline jushort  OrderAccess::load_acquire(volatile jushort* p) { register jushort t = *p; inlasm_acquire_reg(t); return t; }
+inline juint    OrderAccess::load_acquire(volatile juint*   p) { register juint t = *p;   inlasm_acquire_reg(t); return t; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return (julong)load_acquire((volatile jlong*)p); }
+inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { register jfloat t = *p;  inlasm_acquire(); return t; }
+inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { register jdouble t = *p; inlasm_acquire(); return t; }
+
+inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return (intptr_t)load_acquire((volatile jlong*)p); }
+inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return (void*)   load_acquire((volatile jlong*)p); }
+inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*)   load_acquire((volatile jlong*)p); }
+
+inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; }
+
+inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { inlasm_release(); *(void* volatile *)p = v; }
+
+inline void     OrderAccess::store_fence(jbyte*   p, jbyte   v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jshort*  p, jshort  v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jint*    p, jint    v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jlong*   p, jlong   v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jushort* p, jushort v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(juint*   p, juint   v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(julong*  p, julong  v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; inlasm_fence(); }
+
+inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) { *p = v; inlasm_fence(); }
+
+inline void     OrderAccess::release_store_fence(volatile jbyte*   p, jbyte   v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jshort*  p, jshort  v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jint*    p, jint    v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; inlasm_fence(); }
+
+inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) { inlasm_release(); *(void* volatile *)p = v; inlasm_fence(); }
+
+#undef inlasm_sync
+#undef inlasm_lwsync
+#undef inlasm_eieio
+#undef inlasm_isync
+#undef inlasm_release
+#undef inlasm_acquire
+#undef inlasm_fence
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// no precompiled headers
+#include "assembler_ppc.inline.hpp"
+#include "classfile/classLoader.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "code/icBuffer.hpp"
+#include "code/vtableStubs.hpp"
+#include "interpreter/interpreter.hpp"
+#include "jvm_aix.h"
+#include "memory/allocation.inline.hpp"
+#include "mutex_aix.inline.hpp"
+#include "nativeInst_ppc.hpp"
+#include "os_share_aix.hpp"
+#include "prims/jniFastGetField.hpp"
+#include "prims/jvm.h"
+#include "prims/jvm_misc.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/extendedPC.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/java.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/osThread.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/timer.hpp"
+#include "thread_aix.inline.hpp"
+#include "utilities/events.hpp"
+#include "utilities/vmError.hpp"
+#ifdef COMPILER1
+#include "c1/c1_Runtime1.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/runtime.hpp"
+#endif
+
+// put OS-includes here
+# include <ucontext.h>
+
+address os::current_stack_pointer() {
+  address csp;
+
+#if !defined(USE_XLC_BUILTINS)
+  // inline assembly for `mr regno(csp), R1_SP':
+  __asm__ __volatile__ ("mr %0, 1":"=r"(csp):);
+#else
+  csp = (address) __builtin_frame_address(0);
+#endif
+
+  return csp;
+}
+
+char* os::non_memory_address_word() {
+  // Must never look like an address returned by reserve_memory,
+  // even in its subfields (as defined by the CPU immediate fields,
+  // if the CPU splits constants across multiple instructions).
+
+  return (char*) -1;
+}
+
+// OS specific thread initialization
+//
+// Calculate and store the limits of the memory stack.
+void os::initialize_thread(Thread *thread) { }
+
+// Frame information (pc, sp, fp) retrieved via ucontext
+// always looks like a C-frame according to the frame
+// conventions in frame_ppc64.hpp.
+address os::Aix::ucontext_get_pc(ucontext_t * uc) {
+  return (address)uc->uc_mcontext.jmp_context.iar;
+}
+
+intptr_t* os::Aix::ucontext_get_sp(ucontext_t * uc) {
+  // gpr1 holds the stack pointer on aix
+  return (intptr_t*)uc->uc_mcontext.jmp_context.gpr[1/*REG_SP*/];
+}
+
+intptr_t* os::Aix::ucontext_get_fp(ucontext_t * uc) {
+  return NULL;
+}
+
+void os::Aix::ucontext_set_pc(ucontext_t* uc, address new_pc) {
+  uc->uc_mcontext.jmp_context.iar = (uint64_t) new_pc;
+}
+
+ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+                                        intptr_t** ret_sp, intptr_t** ret_fp) {
+
+  ExtendedPC  epc;
+  ucontext_t* uc = (ucontext_t*)ucVoid;
+
+  if (uc != NULL) {
+    epc = ExtendedPC(os::Aix::ucontext_get_pc(uc));
+    if (ret_sp) *ret_sp = os::Aix::ucontext_get_sp(uc);
+    if (ret_fp) *ret_fp = os::Aix::ucontext_get_fp(uc);
+  } else {
+    // construct empty ExtendedPC for return value checking
+    epc = ExtendedPC(NULL);
+    if (ret_sp) *ret_sp = (intptr_t *)NULL;
+    if (ret_fp) *ret_fp = (intptr_t *)NULL;
+  }
+
+  return epc;
+}
+
+frame os::fetch_frame_from_context(void* ucVoid) {
+  intptr_t* sp;
+  intptr_t* fp;
+  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
+  // Avoid crash during crash if pc broken.
+  if (epc.pc()) {
+    frame fr(sp, epc.pc());
+    return fr;
+  }
+  frame fr(sp);
+  return fr;
+}
+
+frame os::get_sender_for_C_frame(frame* fr) {
+  if (*fr->sp() == NULL) {
+    // fr is the last C frame
+    return frame(NULL, NULL);
+  }
+  return frame(fr->sender_sp(), fr->sender_pc());
+}
+
+
+frame os::current_frame() {
+  intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer());
+  // hack.
+  frame topframe(csp, (address)0x8);
+  // return sender of current topframe which hopefully has pc != NULL.
+  return os::get_sender_for_C_frame(&topframe);
+}
+
+// Utility functions
+
+extern "C" JNIEXPORT int
+JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrecognized) {
+
+  ucontext_t* uc = (ucontext_t*) ucVoid;
+
+  Thread* t = ThreadLocalStorage::get_thread_slow();   // slow & steady
+
+  SignalHandlerMark shm(t);
+
+  // Note: it's not uncommon that JNI code uses signal/sigset to install
+  // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
+  // or have a SIGILL handler when detecting CPU type). When that happens,
+  // JVM_handle_aix_signal() might be invoked with junk info/ucVoid. To
+  // avoid unnecessary crash when libjsig is not preloaded, try handle signals
+  // that do not require siginfo/ucontext first.
+
+  if (sig == SIGPIPE) {
+    if (os::Aix::chained_handler(sig, info, ucVoid)) {
+      return 1;
+    } else {
+      if (PrintMiscellaneous && (WizardMode || Verbose)) {
+        warning("Ignoring SIGPIPE - see bug 4229104");
+      }
+      return 1;
+    }
+  }
+
+  JavaThread* thread = NULL;
+  VMThread* vmthread = NULL;
+  if (os::Aix::signal_handlers_are_installed) {
+    if (t != NULL) {
+      if(t->is_Java_thread()) {
+        thread = (JavaThread*)t;
+      }
+      else if(t->is_VM_thread()) {
+        vmthread = (VMThread *)t;
+      }
+    }
+  }
+
+  // Decide if this trap can be handled by a stub.
+  address stub = NULL;
+
+  // retrieve program counter
+  address const pc = uc ? os::Aix::ucontext_get_pc(uc) : NULL;
+
+  // retrieve crash address
+  address const addr = info ? (const address) info->si_addr : NULL;
+
+  // SafeFetch 32 handling:
+  // - make it work if _thread is null
+  // - make it use the standard os::...::ucontext_get/set_pc APIs
+  if (uc) {
+    address const pc = os::Aix::ucontext_get_pc(uc);
+    if (pc && StubRoutines::is_safefetch_fault(pc)) {
+      os::Aix::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
+      return true;
+    }
+  }
+
+  // Handle SIGDANGER right away. AIX would raise SIGDANGER whenever available swap
+  // space falls below 30%. This is only a chance for the process to gracefully abort.
+  // We can't hope to proceed after SIGDANGER since SIGKILL tailgates.
+  if (sig == SIGDANGER) {
+    goto report_and_die;
+  }
+
+  if (info == NULL || uc == NULL || thread == NULL && vmthread == NULL) {
+    goto run_chained_handler;
+  }
+
+  // If we are a java thread...
+  if (thread != NULL) {
+
+    // Handle ALL stack overflow variations here
+    if (sig == SIGSEGV && (addr < thread->stack_base() &&
+                           addr >= thread->stack_base() - thread->stack_size())) {
+      // stack overflow
+      //
+      // If we are in a yellow zone and we are inside java, we disable the yellow zone and
+      // throw a stack overflow exception.
+      // If we are in native code or VM C code, we report-and-die. The original coding tried
+      // to continue with yellow zone disabled, but that doesn't buy us much and prevents
+      // hs_err_pid files.
+      if (thread->in_stack_yellow_zone(addr)) {
+        thread->disable_stack_yellow_zone();
+        if (thread->thread_state() == _thread_in_Java) {
+          // Throw a stack overflow exception.
+          // Guard pages will be reenabled while unwinding the stack.
+          stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
+          goto run_stub;
+        } else {
+          // Thread was in the vm or native code. Return and try to finish.
+          return 1;
+        }
+      } else if (thread->in_stack_red_zone(addr)) {
+        // Fatal red zone violation. Disable the guard pages and fall through
+        // to handle_unexpected_exception way down below.
+        thread->disable_stack_red_zone();
+        tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
+        goto report_and_die;
+      } else {
+        // This means a segv happened inside our stack, but not in
+        // the guarded zone. I'd like to know when this happens,
+        tty->print_raw_cr("SIGSEGV happened inside stack but outside yellow and red zone.");
+        goto report_and_die;
+      }
+
+    } // end handle SIGSEGV inside stack boundaries
+
+    if (thread->thread_state() == _thread_in_Java) {
+      // Java thread running in Java code
+
+      // The following signals are used for communicating VM events:
+      //
+      // SIGILL: the compiler generates illegal opcodes
+      //   at places where it wishes to interrupt the VM:
+      //   Safepoints, Unreachable Code, Entry points of Zombie methods,
+      //    This results in a SIGILL with (*pc) == inserted illegal instruction.
+      //
+      //   (so, SIGILLs with a pc inside the zero page are real errors)
+      //
+      // SIGTRAP:
+      //   The ppc trap instruction raises a SIGTRAP and is very efficient if it
+      //   does not trap. It is used for conditional branches that are expected
+      //   to be never taken. These are:
+      //     - zombie methods
+      //     - IC (inline cache) misses.
+      //     - null checks leading to UncommonTraps.
+      //     - range checks leading to Uncommon Traps.
+      //   On Aix, these are especially null checks, as the ImplicitNullCheck
+      //   optimization works only in rare cases, as the page at address 0 is only
+      //   write protected.      //
+      //   Note: !UseSIGTRAP is used to prevent SIGTRAPS altogether, to facilitate debugging.
+      //
+      // SIGSEGV:
+      //   used for safe point polling:
+      //     To notify all threads that they have to reach a safe point, safe point polling is used:
+      //     All threads poll a certain mapped memory page. Normally, this page has read access.
+      //     If the VM wants to inform the threads about impending safe points, it puts this
+      //     page to read only ("poisens" the page), and the threads then reach a safe point.
+      //   used for null checks:
+      //     If the compiler finds a store it uses it for a null check. Unfortunately this
+      //     happens rarely.  In heap based and disjoint base compressd oop modes also loads
+      //     are used for null checks.
+
+      // A VM-related SIGILL may only occur if we are not in the zero page.
+      // On AIX, we get a SIGILL if we jump to 0x0 or to somewhere else
+      // in the zero page, because it is filled with 0x0. We ignore
+      // explicit SIGILLs in the zero page.
+      if (sig == SIGILL && (pc < (address) 0x200)) {
+        if (TraceTraps) {
+          tty->print_raw_cr("SIGILL happened inside zero page.");
+        }
+        goto report_and_die;
+      }
+
+      // Handle signal from NativeJump::patch_verified_entry().
+      if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
+          (!TrapBasedNotEntrantChecks && sig == SIGILL  && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
+        if (TraceTraps) {
+          tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
+        }
+        stub = SharedRuntime::get_handle_wrong_method_stub();
+        goto run_stub;
+      }
+
+      else if (sig == SIGSEGV && os::is_poll_address(addr)) {
+        if (TraceTraps) {
+          tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc);
+        }
+        stub = SharedRuntime::get_poll_stub(pc);
+        goto run_stub;
+      }
+
+      // SIGTRAP-based ic miss check in compiled code.
+      else if (sig == SIGTRAP && TrapBasedICMissChecks &&
+               nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) {
+        if (TraceTraps) {
+          tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+        }
+        stub = SharedRuntime::get_ic_miss_stub();
+        goto run_stub;
+      }
+
+      // SIGTRAP-based implicit null check in compiled code.
+      else if (sig == SIGTRAP && TrapBasedNullChecks &&
+               nativeInstruction_at(pc)->is_sigtrap_null_check()) {
+        if (TraceTraps) {
+          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+        goto run_stub;
+      }
+
+      // SIGSEGV-based implicit null check in compiled code.
+      else if (sig == SIGSEGV && ImplicitNullChecks &&
+               CodeCache::contains((void*) pc) &&
+               !MacroAssembler::needs_explicit_null_check((intptr_t) info->si_addr)) {
+        if (TraceTraps) {
+          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc);
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+      }
+
+#ifdef COMPILER2
+      // SIGTRAP-based implicit range check in compiled code.
+      else if (sig == SIGTRAP && TrapBasedRangeChecks &&
+               nativeInstruction_at(pc)->is_sigtrap_range_check()) {
+        if (TraceTraps) {
+          tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+        goto run_stub;
+      }
+#endif
+
+      else if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
+        if (TraceTraps) {
+          tty->print_raw_cr("Fix SIGFPE handler, trying divide by zero handler.");
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
+        goto run_stub;
+      }
+
+      else if (sig == SIGBUS) {
+        // BugId 4454115: A read from a MappedByteBuffer can fault here if the
+        // underlying file has been truncated. Do not crash the VM in such a case.
+        CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
+        nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
+        if (nm != NULL && nm->has_unsafe_access()) {
+          // We don't really need a stub here! Just set the pending exeption and
+          // continue at the next instruction after the faulting read. Returning
+          // garbage from this read is ok.
+          thread->set_pending_unsafe_access_error();
+          uc->uc_mcontext.jmp_context.iar = ((unsigned long)pc) + 4;
+          return 1;
+        }
+      }
+    }
+
+    else { // thread->thread_state() != _thread_in_Java
+      // Detect CPU features. This is only done at the very start of the VM. Later, the
+      // VM_Version::is_determine_features_test_running() flag should be false.
+
+      if (sig == SIGILL && VM_Version::is_determine_features_test_running()) {
+        // SIGILL must be caused by VM_Version::determine_features().
+        *(int *)pc = 0; // patch instruction to 0 to indicate that it causes a SIGILL,
+                        // flushing of icache is not necessary.
+        stub = pc + 4;  // continue with next instruction.
+        goto run_stub;
+      }
+      else if (thread->thread_state() == _thread_in_vm &&
+               sig == SIGBUS && thread->doing_unsafe_access()) {
+        // We don't really need a stub here! Just set the pending exeption and
+        // continue at the next instruction after the faulting read. Returning
+        // garbage from this read is ok.
+        thread->set_pending_unsafe_access_error();
+        uc->uc_mcontext.jmp_context.iar = ((unsigned long)pc) + 4;
+        return 1;
+      }
+    }
+
+    // Check to see if we caught the safepoint code in the
+    // process of write protecting the memory serialization page.
+    // It write enables the page immediately after protecting it
+    // so we can just return to retry the write.
+    if ((sig == SIGSEGV) &&
+        os::is_memory_serialize_page(thread, addr)) {
+      // Synchronization problem in the pseudo memory barrier code (bug id 6546278)
+      // Block current thread until the memory serialize page permission restored.
+      os::block_on_serialize_page_trap();
+      return true;
+    }
+  }
+
+run_stub:
+
+  // One of the above code blocks ininitalized the stub, so we want to
+  // delegate control to that stub.
+  if (stub != NULL) {
+    // Save all thread context in case we need to restore it.
+    if (thread != NULL) thread->set_saved_exception_pc(pc);
+    uc->uc_mcontext.jmp_context.iar = (unsigned long)stub;
+    return 1;
+  }
+
+run_chained_handler:
+
+  // signal-chaining
+  if (os::Aix::chained_handler(sig, info, ucVoid)) {
+    return 1;
+  }
+  if (!abort_if_unrecognized) {
+    // caller wants another chance, so give it to him
+    return 0;
+  }
+
+report_and_die:
+
+  // Use sigthreadmask instead of sigprocmask on AIX and unmask current signal.
+  sigset_t newset;
+  sigemptyset(&newset);
+  sigaddset(&newset, sig);
+  sigthreadmask(SIG_UNBLOCK, &newset, NULL);
+
+  VMError err(t, sig, pc, info, ucVoid);
+  err.report_and_die();
+
+  ShouldNotReachHere();
+  return 0;
+}
+
+void os::Aix::init_thread_fpu_state(void) {
+#if !defined(USE_XLC_BUILTINS)
+  // Disable FP exceptions.
+  __asm__ __volatile__ ("mtfsfi 6,0");
+#else
+  __mtfsfi(6, 0);
+#endif
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// thread stack
+
+size_t os::Aix::min_stack_allowed = 768*K;
+
+// Aix is always in floating stack mode. The stack size for a new
+// thread can be set via pthread_attr_setstacksize().
+bool os::Aix::supports_variable_stack_size() { return true; }
+
+// return default stack size for thr_type
+size_t os::Aix::default_stack_size(os::ThreadType thr_type) {
+  // default stack size (compiler thread needs larger stack)
+  // Notice that the setting for compiler threads here have no impact
+  // because of the strange 'fallback logic' in os::create_thread().
+  // Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
+  // specify a different stack size for compiler threads!
+  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
+  return s;
+}
+
+size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
+  return 2 * page_size();
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// helper functions for fatal error handler
+
+void os::print_context(outputStream *st, void *context) {
+  if (context == NULL) return;
+
+  ucontext_t* uc = (ucontext_t*)context;
+
+  st->print_cr("Registers:");
+  st->print("pc =" INTPTR_FORMAT "  ", uc->uc_mcontext.jmp_context.iar);
+  st->print("lr =" INTPTR_FORMAT "  ", uc->uc_mcontext.jmp_context.lr);
+  st->print("ctr=" INTPTR_FORMAT "  ", uc->uc_mcontext.jmp_context.ctr);
+  st->cr();
+  for (int i = 0; i < 32; i++) {
+    st->print("r%-2d=" INTPTR_FORMAT "  ", i, uc->uc_mcontext.jmp_context.gpr[i]);
+    if (i % 3 == 2) st->cr();
+  }
+  st->cr();
+  st->cr();
+
+  intptr_t *sp = (intptr_t *)os::Aix::ucontext_get_sp(uc);
+  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
+  print_hex_dump(st, (address)sp, (address)(sp + 128), sizeof(intptr_t));
+  st->cr();
+
+  // Note: it may be unsafe to inspect memory near pc. For example, pc may
+  // point to garbage if entry point in an nmethod is corrupted. Leave
+  // this at the end, and hope for the best.
+  address pc = os::Aix::ucontext_get_pc(uc);
+  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
+  print_hex_dump(st, pc - 64, pc + 64, /*instrsize=*/4);
+  st->cr();
+
+  // Try to decode the instructions.
+  st->print_cr("Decoded instructions: (pc=" PTR_FORMAT ")", pc);
+  st->print("<TODO: PPC port - print_context>");
+  // TODO: PPC port Disassembler::decode(pc, 16, 16, st);
+  st->cr();
+}
+
+void os::print_register_info(outputStream *st, void *context) {
+  if (context == NULL) return;
+  st->print("Not ported - print_register_info\n");
+}
+
+extern "C" {
+  int SpinPause() {
+    return 0;
+  }
+}
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+  assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/os_aix_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
+
+  static void setup_fpu() {}
+
+  // Used to register dynamic code cache area with the OS
+  // Note: Currently only used in 64 bit Windows implementations
+  static bool register_code_area(char *low, char *high) { return true; }
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/prefetch_aix_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
+#define OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
+
+#include "runtime/prefetch.hpp"
+
+
+inline void Prefetch::read(void *loc, intx interval) {
+#if !defined(USE_XLC_BUILTINS)
+  __asm__ __volatile__ (
+    "   dcbt   0, %0       \n"
+    :
+    : /*%0*/"r" ( ((address)loc) +((long)interval) )
+    //:
+    );
+#else
+  __dcbt(((address)loc) +((long)interval));
+#endif
+}
+
+inline void Prefetch::write(void *loc, intx interval) {
+#if !defined(USE_XLC_PREFETCH_WRITE_BUILTIN)
+  __asm__ __volatile__ (
+    "   dcbtst 0, %0       \n"
+    :
+    : /*%0*/"r" ( ((address)loc) +((long)interval) )
+    //:
+    );
+#else
+  __dcbtst( ((address)loc) +((long)interval) );
+#endif
+}
+
+#endif // OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/threadLocalStorage.hpp"
+#include "thread_aix.inline.hpp"
+
+void ThreadLocalStorage::generate_code_for_get_thread() {
+    // nothing we can do here for user-level thread
+}
+
+void ThreadLocalStorage::pd_init() {
+  // Nothing to do
+}
+
+void ThreadLocalStorage::pd_set_thread(Thread* thread) {
+  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
+
+  // Processor dependent parts of ThreadLocalStorage
+
+public:
+  static Thread* thread() {
+    return (Thread *) os::thread_local_storage_at(thread_index());
+  }
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/thread_aix_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/frame.inline.hpp"
+#include "thread_aix.inline.hpp"
+
+// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Aix/PPC.
+bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
+  Unimplemented();
+  return false;
+}
+
+void JavaThread::cache_global_variables() { }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
+
+ private:
+  void pd_initialize() {
+    _anchor.clear();
+    _last_interpreter_fp = NULL;
+  }
+
+  // The `last' frame is the youngest Java frame on the thread's stack.
+  frame pd_last_frame() {
+    assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+
+    intptr_t* sp = last_Java_sp();
+    address pc = _anchor.last_Java_pc();
+
+    // Last_Java_pc ist not set, if we come here from compiled code.
+    if (pc == NULL)
+      pc =  (address) *(sp + 2);
+
+    return frame(sp, pc);
+  }
+
+ public:
+  void set_base_of_stack_pointer(intptr_t* base_sp) {}
+  intptr_t* base_of_stack_pointer()   { return NULL; }
+  void record_base_of_stack_pointer() {}
+
+  // These routines are only used on cpu architectures that
+  // have separate register stacks (Itanium).
+  static bool register_stack_overflow() { return false; }
+  static void enable_register_stack_guard() {}
+  static void disable_register_stack_guard() {}
+
+  bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
+                                           bool isInJava);
+
+  // -Xprof support
+  //
+  // In order to find the last Java fp from an async profile
+  // tick, we store the current interpreter fp in the thread.
+  // This value is only valid while we are in the C++ interpreter
+  // and profiling.
+ protected:
+  intptr_t *_last_interpreter_fp;
+
+ public:
+  static ByteSize last_interpreter_fp_offset() {
+    return byte_offset_of(JavaThread, _last_interpreter_fp);
+  }
+
+  intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/aix_ppc/vm/vmStructs_aix_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_VMSTRUCTS_AIX_PPC_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_VMSTRUCTS_AIX_PPC_HPP
+
+// These are the OS and CPU-specific fields, types and integer
+// constants required by the Serviceability Agent. This file is
+// referenced by vmStructs.cpp.
+
+#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
+                                                                                                                                     \
+  /******************************/                                                                                                   \
+  /* Threads (NOTE: incomplete) */                                                                                                   \
+  /******************************/                                                                                                   \
+  nonstatic_field(OSThread,                      _thread_id,                                      pid_t)                             \
+  nonstatic_field(OSThread,                      _pthread_id,                                     pthread_t)
+
+
+#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
+                                                                          \
+  /**********************/                                                \
+  /* Posix Thread IDs   */                                                \
+  /**********************/                                                \
+                                                                          \
+  declare_integer_type(pid_t)                                             \
+  declare_unsigned_integer_type(pthread_t)
+
+#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
+
+#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_VMSTRUCTS_AIX_PPC_HPP
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_32.s	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/bsd_x86/vm/bsd_x86_32.s	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_64.s	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/bsd_x86/vm/bsd_x86_64.s	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2004, 2007, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/bsd_x86/vm/vmStructs_bsd_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/bsd_x86/vm/vmStructs_bsd_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/bsd_zero/vm/globals_bsd_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/bsd_zero/vm/globals_bsd_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/os_cpu/bsd_zero/vm/os_bsd_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/bsd_zero/vm/os_bsd_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -36,7 +36,7 @@
 
   // Atomically copy 64 bits of data
   static void atomic_copy64(volatile void *src, volatile void *dst) {
-#if defined(PPC) && !defined(_LP64)
+#if defined(PPC32)
     double tmp;
     asm volatile ("lfd  %0, 0(%1)\n"
                   "stfd %0, 0(%2)\n"
--- a/src/os_cpu/bsd_zero/vm/thread_bsd_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/bsd_zero/vm/thread_bsd_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/os_cpu/bsd_zero/vm/vmStructs_bsd_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/bsd_zero/vm/vmStructs_bsd_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
+#define OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
+
+#include "orderAccess_linux_ppc.inline.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/os.hpp"
+#include "vm_version_ppc.hpp"
+
+#ifndef PPC64
+#error "Atomic currently only implemented for PPC64"
+#endif
+
+// Implementation of class atomic
+
+inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
+
+inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
+inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
+inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
+inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
+inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
+inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
+
+inline jlong Atomic::load(volatile jlong* src) { return *src; }
+
+/*
+  machine barrier instructions:
+
+  - sync            two-way memory barrier, aka fence
+  - lwsync          orders  Store|Store,
+                             Load|Store,
+                             Load|Load,
+                    but not Store|Load
+  - eieio           orders memory accesses for device memory (only)
+  - isync           invalidates speculatively executed instructions
+                    From the POWER ISA 2.06 documentation:
+                     "[...] an isync instruction prevents the execution of
+                    instructions following the isync until instructions
+                    preceding the isync have completed, [...]"
+                    From IBM's AIX assembler reference:
+                     "The isync [...] instructions causes the processor to
+                    refetch any instructions that might have been fetched
+                    prior to the isync instruction. The instruction isync
+                    causes the processor to wait for all previous instructions
+                    to complete. Then any instructions already fetched are
+                    discarded and instruction processing continues in the
+                    environment established by the previous instructions."
+
+  semantic barrier instructions:
+  (as defined in orderAccess.hpp)
+
+  - release         orders Store|Store,       (maps to lwsync)
+                            Load|Store
+  - acquire         orders  Load|Store,       (maps to lwsync)
+                            Load|Load
+  - fence           orders Store|Store,       (maps to sync)
+                            Load|Store,
+                            Load|Load,
+                           Store|Load
+*/
+
+#define strasm_sync                       "\n  sync    \n"
+#define strasm_lwsync                     "\n  lwsync  \n"
+#define strasm_isync                      "\n  isync   \n"
+#define strasm_release                    strasm_lwsync
+#define strasm_acquire                    strasm_lwsync
+#define strasm_fence                      strasm_sync
+#define strasm_nobarrier                  ""
+#define strasm_nobarrier_clobber_memory   ""
+
+inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+
+  unsigned int result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: lwarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (jint) result;
+}
+
+
+inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+
+  long result;
+
+  __asm__ __volatile__ (
+    strasm_lwsync
+    "1: ldarx   %0,  0, %2    \n"
+    "   add     %0, %0, %1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_isync
+    : /*%0*/"=&r" (result)
+    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
+    : "cc", "memory" );
+
+  return (intptr_t) result;
+}
+
+inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
+  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::inc    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0,  1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::inc_ptr(volatile void*     dest) {
+  inc_ptr((volatile intptr_t*)dest);
+}
+
+
+inline void Atomic::dec    (volatile jint*     dest) {
+
+  unsigned int temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: lwarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stwcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile intptr_t* dest) {
+
+  long temp;
+
+  __asm__ __volatile__ (
+    strasm_nobarrier
+    "1: ldarx   %0,  0, %2    \n"
+    "   addic   %0, %0, -1    \n"
+    "   stdcx.  %0,  0, %2    \n"
+    "   bne-    1b            \n"
+    strasm_nobarrier
+    : /*%0*/"=&r" (temp), "=m" (*dest)
+    : /*%2*/"r" (dest), "m" (*dest)
+    : "cc" strasm_nobarrier_clobber_memory);
+
+}
+
+inline void Atomic::dec_ptr(volatile void*     dest) {
+  dec_ptr((volatile intptr_t*)dest);
+}
+
+inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jint) old_value;
+}
+
+inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
+
+  // Note that xchg_ptr doesn't necessarily do an acquire
+  // (see synchronizer.cpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* lwsync */
+    strasm_lwsync
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* isync */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (intptr_t) old_value;
+}
+
+inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
+  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
+}
+
+inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
+  // (see atomic.hpp).
+
+  unsigned int old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* fence */
+    strasm_sync
+    /* simple guard */
+    "   lwz     %[old_value], 0(%[dest])                \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   lwarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpw    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* acquire */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jint) old_value;
+}
+
+inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
+
+  // Note that cmpxchg guarantees a two-way memory barrier across
+  // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
+  // (see atomic.hpp).
+
+  long old_value;
+  const uint64_t zero = 0;
+
+  __asm__ __volatile__ (
+    /* fence */
+    strasm_sync
+    /* simple guard */
+    "   ld      %[old_value], 0(%[dest])                \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    /* atomic loop */
+    "1:                                                 \n"
+    "   ldarx   %[old_value], %[dest], %[zero]          \n"
+    "   cmpd    %[compare_value], %[old_value]          \n"
+    "   bne-    2f                                      \n"
+    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
+    "   bne-    1b                                      \n"
+    /* acquire */
+    strasm_sync
+    /* exit */
+    "2:                                                 \n"
+    /* out */
+    : [old_value]       "=&r"   (old_value),
+                        "=m"    (*dest)
+    /* in */
+    : [dest]            "b"     (dest),
+      [zero]            "r"     (zero),
+      [compare_value]   "r"     (compare_value),
+      [exchange_value]  "r"     (exchange_value),
+                        "m"     (*dest)
+    /* clobber */
+    : "cc",
+      "memory"
+    );
+
+  return (jlong) old_value;
+}
+
+inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
+  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+}
+
+inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
+  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
+}
+
+#undef strasm_sync
+#undef strasm_lwsync
+#undef strasm_isync
+#undef strasm_release
+#undef strasm_acquire
+#undef strasm_fence
+#undef strasm_nobarrier
+#undef strasm_nobarrier_clobber_memory
+
+#endif // OS_CPU_LINUX_PPC_VM_ATOMIC_LINUX_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/globals_linux_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_GLOBALS_LINUX_PPC_HPP
+#define OS_CPU_LINUX_PPC_VM_GLOBALS_LINUX_PPC_HPP
+
+// Sets the default values for platform dependent flags used by the runtime system.
+// (see globals.hpp)
+
+define_pd_global(bool, DontYieldALot,            false);
+define_pd_global(intx, ThreadStackSize,          2048); // 0 => use system default
+define_pd_global(intx, VMThreadStackSize,        2048);
+
+// if we set CompilerThreadStackSize to a value different than 0, it will
+// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(),
+// the stack size for compiler threads will default to VMThreadStackSize, although it
+// is defined to 4M in os::Linux::default_stack_size()!
+define_pd_global(intx, CompilerThreadStackSize,  4096);
+
+// Allow extra space in DEBUG builds for asserts.
+define_pd_global(uintx,JVMInvokeMethodSlack,     8192);
+
+define_pd_global(intx, StackYellowPages,         6);
+define_pd_global(intx, StackRedPages,            1);
+define_pd_global(intx, StackShadowPages,         6 DEBUG_ONLY(+2));
+
+// Only used on 64 bit platforms
+define_pd_global(uintx,HeapBaseMinAddress,       2*G);
+// Only used on 64 bit Windows platforms
+define_pd_global(bool, UseVectoredExceptions,    false);
+
+#endif // OS_CPU_LINUX_PPC_VM_GLOBALS_LINUX_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/orderAccess_linux_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
+#define OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
+
+#include "runtime/orderAccess.hpp"
+#include "vm_version_ppc.hpp"
+
+#ifndef PPC64
+#error "OrderAccess currently only implemented for PPC64"
+#endif
+
+// Implementation of class OrderAccess.
+
+//
+// Machine barrier instructions:
+//
+// - sync            Two-way memory barrier, aka fence.
+// - lwsync          orders  Store|Store,
+//                            Load|Store,
+//                            Load|Load,
+//                   but not Store|Load
+// - eieio           orders  Store|Store
+// - isync           Invalidates speculatively executed instructions,
+//                   but isync may complete before storage accesses
+//                   associated with instructions preceding isync have
+//                   been performed.
+//
+// Semantic barrier instructions:
+// (as defined in orderAccess.hpp)
+//
+// - release         orders Store|Store,       (maps to lwsync)
+//                           Load|Store
+// - acquire         orders  Load|Store,       (maps to lwsync)
+//                           Load|Load
+// - fence           orders Store|Store,       (maps to sync)
+//                           Load|Store,
+//                           Load|Load,
+//                          Store|Load
+//
+
+#define inlasm_sync()     __asm__ __volatile__ ("sync"   : : : "memory");
+#define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
+#define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
+#define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
+#define inlasm_release()  inlasm_lwsync();
+#define inlasm_acquire()  inlasm_lwsync();
+// Use twi-isync for load_acquire (faster than lwsync).
+#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
+#define inlasm_fence()    inlasm_sync();
+
+inline void     OrderAccess::loadload()   { inlasm_lwsync();  }
+inline void     OrderAccess::storestore() { inlasm_lwsync();  }
+inline void     OrderAccess::loadstore()  { inlasm_lwsync();  }
+inline void     OrderAccess::storeload()  { inlasm_fence();   }
+
+inline void     OrderAccess::acquire()    { inlasm_acquire(); }
+inline void     OrderAccess::release()    { inlasm_release(); }
+inline void     OrderAccess::fence()      { inlasm_fence();   }
+
+inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { register jbyte t = *p;   inlasm_acquire_reg(t); return t; }
+inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { register jshort t = *p;  inlasm_acquire_reg(t); return t; }
+inline jint     OrderAccess::load_acquire(volatile jint*    p) { register jint t = *p;    inlasm_acquire_reg(t); return t; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { register jlong t = *p;   inlasm_acquire_reg(t); return t; }
+inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { register jubyte t = *p;  inlasm_acquire_reg(t); return t; }
+inline jushort  OrderAccess::load_acquire(volatile jushort* p) { register jushort t = *p; inlasm_acquire_reg(t); return t; }
+inline juint    OrderAccess::load_acquire(volatile juint*   p) { register juint t = *p;   inlasm_acquire_reg(t); return t; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { return (julong)load_acquire((volatile jlong*)p); }
+inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { register jfloat t = *p;  inlasm_acquire(); return t; }
+inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { register jdouble t = *p; inlasm_acquire(); return t; }
+
+inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return (intptr_t)load_acquire((volatile jlong*)p); }
+inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return (void*)   load_acquire((volatile jlong*)p); }
+inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*)   load_acquire((volatile jlong*)p); }
+
+inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; }
+
+inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; }
+inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { inlasm_release(); *(void* volatile *)p = v; }
+
+inline void     OrderAccess::store_fence(jbyte*   p, jbyte   v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jshort*  p, jshort  v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jint*    p, jint    v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jlong*   p, jlong   v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jushort* p, jushort v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(juint*   p, juint   v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(julong*  p, julong  v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; inlasm_fence(); }
+
+inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; inlasm_fence(); }
+inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) { *p = v; inlasm_fence(); }
+
+inline void     OrderAccess::release_store_fence(volatile jbyte*   p, jbyte   v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jshort*  p, jshort  v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jint*    p, jint    v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; inlasm_fence(); }
+
+inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; inlasm_fence(); }
+inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) { inlasm_release(); *(void* volatile *)p = v; inlasm_fence(); }
+
+#undef inlasm_sync
+#undef inlasm_lwsync
+#undef inlasm_eieio
+#undef inlasm_isync
+#undef inlasm_release
+#undef inlasm_acquire
+#undef inlasm_fence
+
+#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,614 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file hat
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// no precompiled headers
+#include "assembler_ppc.inline.hpp"
+#include "classfile/classLoader.hpp"
+#include "classfile/systemDictionary.hpp"
+#include "classfile/vmSymbols.hpp"
+#include "code/icBuffer.hpp"
+#include "code/vtableStubs.hpp"
+#include "interpreter/interpreter.hpp"
+#include "jvm_linux.h"
+#include "memory/allocation.inline.hpp"
+#include "mutex_linux.inline.hpp"
+#include "nativeInst_ppc.hpp"
+#include "os_share_linux.hpp"
+#include "prims/jniFastGetField.hpp"
+#include "prims/jvm.h"
+#include "prims/jvm_misc.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/extendedPC.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.hpp"
+#include "runtime/java.hpp"
+#include "runtime/javaCalls.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/osThread.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/thread.inline.hpp"
+#include "runtime/timer.hpp"
+#include "utilities/events.hpp"
+#include "utilities/vmError.hpp"
+
+// put OS-includes here
+# include <sys/types.h>
+# include <sys/mman.h>
+# include <pthread.h>
+# include <signal.h>
+# include <errno.h>
+# include <dlfcn.h>
+# include <stdlib.h>
+# include <stdio.h>
+# include <unistd.h>
+# include <sys/resource.h>
+# include <pthread.h>
+# include <sys/stat.h>
+# include <sys/time.h>
+# include <sys/utsname.h>
+# include <sys/socket.h>
+# include <sys/wait.h>
+# include <pwd.h>
+# include <poll.h>
+# include <ucontext.h>
+
+
+address os::current_stack_pointer() {
+  intptr_t* csp;
+
+  // inline assembly `mr regno(csp), R1_SP':
+  __asm__ __volatile__ ("mr %0, 1":"=r"(csp):);
+
+  return (address) csp;
+}
+
+char* os::non_memory_address_word() {
+  // Must never look like an address returned by reserve_memory,
+  // even in its subfields (as defined by the CPU immediate fields,
+  // if the CPU splits constants across multiple instructions).
+
+  return (char*) -1;
+}
+
+void os::initialize_thread(Thread *thread) { }
+
+// Frame information (pc, sp, fp) retrieved via ucontext
+// always looks like a C-frame according to the frame
+// conventions in frame_ppc64.hpp.
+address os::Linux::ucontext_get_pc(ucontext_t * uc) {
+  // On powerpc64, ucontext_t is not selfcontained but contains
+  // a pointer to an optional substructure (mcontext_t.regs) containing the volatile
+  // registers - NIP, among others.
+  // This substructure may or may not be there depending where uc came from:
+  // - if uc was handed over as the argument to a sigaction handler, a pointer to the
+  //   substructure was provided by the kernel when calling the signal handler, and
+  //   regs->nip can be accessed.
+  // - if uc was filled by getcontext(), it is undefined - getcontext() does not fill
+  //   it because the volatile registers are not needed to make setcontext() work.
+  //   Hopefully it was zero'd out beforehand.
+  guarantee(uc->uc_mcontext.regs != NULL, "only use ucontext_get_pc in sigaction context");
+  return (address)uc->uc_mcontext.regs->nip;
+}
+
+intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) {
+  return (intptr_t*)uc->uc_mcontext.regs->gpr[1/*REG_SP*/];
+}
+
+intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) {
+  return NULL;
+}
+
+ExtendedPC os::fetch_frame_from_context(void* ucVoid,
+                    intptr_t** ret_sp, intptr_t** ret_fp) {
+
+  ExtendedPC  epc;
+  ucontext_t* uc = (ucontext_t*)ucVoid;
+
+  if (uc != NULL) {
+    epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
+    if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
+    if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
+  } else {
+    // construct empty ExtendedPC for return value checking
+    epc = ExtendedPC(NULL);
+    if (ret_sp) *ret_sp = (intptr_t *)NULL;
+    if (ret_fp) *ret_fp = (intptr_t *)NULL;
+  }
+
+  return epc;
+}
+
+frame os::fetch_frame_from_context(void* ucVoid) {
+  intptr_t* sp;
+  intptr_t* fp;
+  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
+  return frame(sp, epc.pc());
+}
+
+frame os::get_sender_for_C_frame(frame* fr) {
+  if (*fr->sp() == 0) {
+    // fr is the last C frame
+    return frame(NULL, NULL);
+  }
+  return frame(fr->sender_sp(), fr->sender_pc());
+}
+
+
+frame os::current_frame() {
+  intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer());
+  // hack.
+  frame topframe(csp, (address)0x8);
+  // return sender of current topframe which hopefully has pc != NULL.
+  return os::get_sender_for_C_frame(&topframe);
+}
+
+// Utility functions
+
+extern "C" JNIEXPORT int
+JVM_handle_linux_signal(int sig,
+                        siginfo_t* info,
+                        void* ucVoid,
+                        int abort_if_unrecognized) {
+  ucontext_t* uc = (ucontext_t*) ucVoid;
+
+  Thread* t = ThreadLocalStorage::get_thread_slow();
+
+  SignalHandlerMark shm(t);
+
+  // Note: it's not uncommon that JNI code uses signal/sigset to install
+  // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
+  // or have a SIGILL handler when detecting CPU type). When that happens,
+  // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
+  // avoid unnecessary crash when libjsig is not preloaded, try handle signals
+  // that do not require siginfo/ucontext first.
+
+  if (sig == SIGPIPE) {
+    if (os::Linux::chained_handler(sig, info, ucVoid)) {
+      return true;
+    } else {
+      if (PrintMiscellaneous && (WizardMode || Verbose)) {
+        warning("Ignoring SIGPIPE - see bug 4229104");
+      }
+      return true;
+    }
+  }
+
+  JavaThread* thread = NULL;
+  VMThread* vmthread = NULL;
+  if (os::Linux::signal_handlers_are_installed) {
+    if (t != NULL) {
+      if(t->is_Java_thread()) {
+        thread = (JavaThread*)t;
+      } else if(t->is_VM_thread()) {
+        vmthread = (VMThread *)t;
+      }
+    }
+  }
+
+  // Moved SafeFetch32 handling outside thread!=NULL conditional block to make
+  // it work if no associated JavaThread object exists.
+  if (uc) {
+    address const pc = os::Linux::ucontext_get_pc(uc);
+    if (pc && StubRoutines::is_safefetch_fault(pc)) {
+      uc->uc_mcontext.regs->nip = (unsigned long)StubRoutines::continuation_for_safefetch_fault(pc);
+      return true;
+    }
+  }
+
+  // decide if this trap can be handled by a stub
+  address stub = NULL;
+  address pc   = NULL;
+
+  //%note os_trap_1
+  if (info != NULL && uc != NULL && thread != NULL) {
+    pc = (address) os::Linux::ucontext_get_pc(uc);
+
+    // Handle ALL stack overflow variations here
+    if (sig == SIGSEGV) {
+      // Si_addr may not be valid due to a bug in the linux-ppc64 kernel (see
+      // comment below). Use get_stack_bang_address instead of si_addr.
+      address addr = ((NativeInstruction*)pc)->get_stack_bang_address(uc);
+
+      // Check if fault address is within thread stack.
+      if (addr < thread->stack_base() &&
+          addr >= thread->stack_base() - thread->stack_size()) {
+        // stack overflow
+        if (thread->in_stack_yellow_zone(addr)) {
+          thread->disable_stack_yellow_zone();
+          if (thread->thread_state() == _thread_in_Java) {
+            // Throw a stack overflow exception.
+            // Guard pages will be reenabled while unwinding the stack.
+            stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
+          } else {
+            // Thread was in the vm or native code. Return and try to finish.
+            return 1;
+          }
+        } else if (thread->in_stack_red_zone(addr)) {
+          // Fatal red zone violation.  Disable the guard pages and fall through
+          // to handle_unexpected_exception way down below.
+          thread->disable_stack_red_zone();
+          tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
+
+          // This is a likely cause, but hard to verify. Let's just print
+          // it as a hint.
+          tty->print_raw_cr("Please check if any of your loaded .so files has "
+                            "enabled executable stack (see man page execstack(8))");
+        } else {
+          // Accessing stack address below sp may cause SEGV if current
+          // thread has MAP_GROWSDOWN stack. This should only happen when
+          // current thread was created by user code with MAP_GROWSDOWN flag
+          // and then attached to VM. See notes in os_linux.cpp.
+          if (thread->osthread()->expanding_stack() == 0) {
+             thread->osthread()->set_expanding_stack();
+             if (os::Linux::manually_expand_stack(thread, addr)) {
+               thread->osthread()->clear_expanding_stack();
+               return 1;
+             }
+             thread->osthread()->clear_expanding_stack();
+          } else {
+             fatal("recursive segv. expanding stack.");
+          }
+        }
+      }
+    }
+
+    if (thread->thread_state() == _thread_in_Java) {
+      // Java thread running in Java code => find exception handler if any
+      // a fault inside compiled code, the interpreter, or a stub
+
+      // A VM-related SIGILL may only occur if we are not in the zero page.
+      // On AIX, we get a SIGILL if we jump to 0x0 or to somewhere else
+      // in the zero page, because it is filled with 0x0. We ignore
+      // explicit SIGILLs in the zero page.
+      if (sig == SIGILL && (pc < (address) 0x200)) {
+        if (TraceTraps) {
+          tty->print_raw_cr("SIGILL happened inside zero page.");
+        }
+        goto report_and_die;
+      }
+
+      // Handle signal from NativeJump::patch_verified_entry().
+      if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
+          (!TrapBasedNotEntrantChecks && sig == SIGILL  && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
+        if (TraceTraps) {
+          tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
+        }
+        stub = SharedRuntime::get_handle_wrong_method_stub();
+      }
+
+      else if (sig == SIGSEGV &&
+               // A linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults
+               // in 64bit mode (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6),
+               // especially when we try to read from the safepoint polling page. So the check
+               //   (address)info->si_addr == os::get_standard_polling_page()
+               // doesn't work for us. We use:
+               ((NativeInstruction*)pc)->is_safepoint_poll()) {
+        if (TraceTraps) {
+          tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc);
+        }
+        stub = SharedRuntime::get_poll_stub(pc);
+      }
+
+      // SIGTRAP-based ic miss check in compiled code.
+      else if (sig == SIGTRAP && TrapBasedICMissChecks &&
+               nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) {
+        if (TraceTraps) {
+          tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+        }
+        stub = SharedRuntime::get_ic_miss_stub();
+      }
+
+      // SIGTRAP-based implicit null check in compiled code.
+      else if (sig == SIGTRAP && TrapBasedNullChecks &&
+               nativeInstruction_at(pc)->is_sigtrap_null_check()) {
+        if (TraceTraps) {
+          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+      }
+
+      // SIGSEGV-based implicit null check in compiled code.
+      else if (sig == SIGSEGV && ImplicitNullChecks &&
+               CodeCache::contains((void*) pc) &&
+               !MacroAssembler::needs_explicit_null_check((intptr_t) info->si_addr)) {
+        if (TraceTraps) {
+          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc);
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+      }
+
+#ifdef COMPILER2
+      // SIGTRAP-based implicit range check in compiled code.
+      else if (sig == SIGTRAP && TrapBasedRangeChecks &&
+               nativeInstruction_at(pc)->is_sigtrap_range_check()) {
+        if (TraceTraps) {
+          tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+      }
+#endif
+      else if (sig == SIGBUS) {
+        // BugId 4454115: A read from a MappedByteBuffer can fault here if the
+        // underlying file has been truncated. Do not crash the VM in such a case.
+        CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
+        nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
+        if (nm != NULL && nm->has_unsafe_access()) {
+          // We don't really need a stub here! Just set the pending exeption and
+          // continue at the next instruction after the faulting read. Returning
+          // garbage from this read is ok.
+          thread->set_pending_unsafe_access_error();
+          uc->uc_mcontext.regs->nip = ((unsigned long)pc) + 4;
+          return true;
+        }
+      }
+    }
+
+    else { // thread->thread_state() != _thread_in_Java
+      if (sig == SIGILL && VM_Version::is_determine_features_test_running()) {
+        // SIGILL must be caused by VM_Version::determine_features().
+        *(int *)pc = 0; // patch instruction to 0 to indicate that it causes a SIGILL,
+                        // flushing of icache is not necessary.
+        stub = pc + 4;  // continue with next instruction.
+      }
+      else if (thread->thread_state() == _thread_in_vm &&
+               sig == SIGBUS && thread->doing_unsafe_access()) {
+        // We don't really need a stub here! Just set the pending exeption and
+        // continue at the next instruction after the faulting read. Returning
+        // garbage from this read is ok.
+        thread->set_pending_unsafe_access_error();
+        uc->uc_mcontext.regs->nip = ((unsigned long)pc) + 4;
+        return true;
+      }
+    }
+
+    // Check to see if we caught the safepoint code in the
+    // process of write protecting the memory serialization page.
+    // It write enables the page immediately after protecting it
+    // so we can just return to retry the write.
+    if ((sig == SIGSEGV) &&
+        // Si_addr may not be valid due to a bug in the linux-ppc64 kernel (see comment above).
+        // Use is_memory_serialization instead of si_addr.
+        ((NativeInstruction*)pc)->is_memory_serialization(thread, ucVoid)) {
+      // Synchronization problem in the pseudo memory barrier code (bug id 6546278)
+      // Block current thread until the memory serialize page permission restored.
+      os::block_on_serialize_page_trap();
+      return true;
+    }
+  }
+
+  if (stub != NULL) {
+    // Save all thread context in case we need to restore it.
+    if (thread != NULL) thread->set_saved_exception_pc(pc);
+    uc->uc_mcontext.regs->nip = (unsigned long)stub;
+    return true;
+  }
+
+  // signal-chaining
+  if (os::Linux::chained_handler(sig, info, ucVoid)) {
+    return true;
+  }
+
+  if (!abort_if_unrecognized) {
+    // caller wants another chance, so give it to him
+    return false;
+  }
+
+  if (pc == NULL && uc != NULL) {
+    pc = os::Linux::ucontext_get_pc(uc);
+  }
+
+report_and_die:
+  // unmask current signal
+  sigset_t newset;
+  sigemptyset(&newset);
+  sigaddset(&newset, sig);
+  sigprocmask(SIG_UNBLOCK, &newset, NULL);
+
+  VMError err(t, sig, pc, info, ucVoid);
+  err.report_and_die();
+
+  ShouldNotReachHere();
+  return false;
+}
+
+void os::Linux::init_thread_fpu_state(void) {
+  // Disable FP exceptions.
+  __asm__ __volatile__ ("mtfsfi 6,0");
+}
+
+int os::Linux::get_fpu_control_word(void) {
+  // x86 has problems with FPU precision after pthread_cond_timedwait().
+  // nothing to do on ppc64.
+  return 0;
+}
+
+void os::Linux::set_fpu_control_word(int fpu_control) {
+  // x86 has problems with FPU precision after pthread_cond_timedwait().
+  // nothing to do on ppc64.
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// thread stack
+
+size_t os::Linux::min_stack_allowed = 768*K;
+
+bool os::Linux::supports_variable_stack_size() { return true; }
+
+// return default stack size for thr_type
+size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
+  // default stack size (compiler thread needs larger stack)
+  // Notice that the setting for compiler threads here have no impact
+  // because of the strange 'fallback logic' in os::create_thread().
+  // Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
+  // specify a different stack size for compiler threads!
+  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
+  return s;
+}
+
+size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
+  return 2 * page_size();
+}
+
+// Java thread:
+//
+//   Low memory addresses
+//    +------------------------+
+//    |                        |\  JavaThread created by VM does not have glibc
+//    |    glibc guard page    | - guard, attached Java thread usually has
+//    |                        |/  1 page glibc guard.
+// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
+//    |                        |\
+//    |  HotSpot Guard Pages   | - red and yellow pages
+//    |                        |/
+//    +------------------------+ JavaThread::stack_yellow_zone_base()
+//    |                        |\
+//    |      Normal Stack      | -
+//    |                        |/
+// P2 +------------------------+ Thread::stack_base()
+//
+// Non-Java thread:
+//
+//   Low memory addresses
+//    +------------------------+
+//    |                        |\
+//    |  glibc guard page      | - usually 1 page
+//    |                        |/
+// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
+//    |                        |\
+//    |      Normal Stack      | -
+//    |                        |/
+// P2 +------------------------+ Thread::stack_base()
+//
+// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
+//    pthread_attr_getstack()
+
+static void current_stack_region(address * bottom, size_t * size) {
+  if (os::Linux::is_initial_thread()) {
+     // initial thread needs special handling because pthread_getattr_np()
+     // may return bogus value.
+    *bottom = os::Linux::initial_thread_stack_bottom();
+    *size   = os::Linux::initial_thread_stack_size();
+  } else {
+    pthread_attr_t attr;
+
+    int rslt = pthread_getattr_np(pthread_self(), &attr);
+
+    // JVM needs to know exact stack location, abort if it fails
+    if (rslt != 0) {
+      if (rslt == ENOMEM) {
+        vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
+      } else {
+        fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
+      }
+    }
+
+    if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
+      fatal("Can not locate current stack attributes!");
+    }
+
+    pthread_attr_destroy(&attr);
+
+  }
+  assert(os::current_stack_pointer() >= *bottom &&
+         os::current_stack_pointer() < *bottom + *size, "just checking");
+}
+
+address os::current_stack_base() {
+  address bottom;
+  size_t size;
+  current_stack_region(&bottom, &size);
+  return (bottom + size);
+}
+
+size_t os::current_stack_size() {
+  // stack size includes normal stack and HotSpot guard pages
+  address bottom;
+  size_t size;
+  current_stack_region(&bottom, &size);
+  return size;
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// helper functions for fatal error handler
+
+void os::print_context(outputStream *st, void *context) {
+  if (context == NULL) return;
+
+  ucontext_t* uc = (ucontext_t*)context;
+
+  st->print_cr("Registers:");
+  st->print("pc =" INTPTR_FORMAT "  ", uc->uc_mcontext.regs->nip);
+  st->print("lr =" INTPTR_FORMAT "  ", uc->uc_mcontext.regs->link);
+  st->print("ctr=" INTPTR_FORMAT "  ", uc->uc_mcontext.regs->ctr);
+  st->cr();
+  for (int i = 0; i < 32; i++) {
+    st->print("r%-2d=" INTPTR_FORMAT "  ", i, uc->uc_mcontext.regs->gpr[i]);
+    if (i % 3 == 2) st->cr();
+  }
+  st->cr();
+  st->cr();
+
+  intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
+  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
+  print_hex_dump(st, (address)sp, (address)(sp + 128), sizeof(intptr_t));
+  st->cr();
+
+  // Note: it may be unsafe to inspect memory near pc. For example, pc may
+  // point to garbage if entry point in an nmethod is corrupted. Leave
+  // this at the end, and hope for the best.
+  address pc = os::Linux::ucontext_get_pc(uc);
+  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
+  print_hex_dump(st, pc - 64, pc + 64, /*instrsize=*/4);
+  st->cr();
+}
+
+void os::print_register_info(outputStream *st, void *context) {
+  if (context == NULL) return;
+
+  ucontext_t *uc = (ucontext_t*)context;
+
+  st->print_cr("Register to memory mapping:");
+  st->cr();
+
+  // this is only for the "general purpose" registers
+  for (int i = 0; i < 32; i++) {
+    st->print("r%-2d=", i);
+    print_location(st, uc->uc_mcontext.regs->gpr[i]);
+  }
+  st->cr();
+}
+
+extern "C" {
+  int SpinPause() {
+    return 0;
+  }
+}
+
+#ifndef PRODUCT
+void os::verify_stack_alignment() {
+  assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/os_linux_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_OS_LINUX_PPC_HPP
+#define OS_CPU_LINUX_PPC_VM_OS_LINUX_PPC_HPP
+
+  static void setup_fpu() {}
+
+  // Used to register dynamic code cache area with the OS
+  // Note: Currently only used in 64 bit Windows implementations
+  static bool register_code_area(char *low, char *high) { return true; }
+
+#endif // OS_CPU_LINUX_PPC_VM_OS_LINUX_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/prefetch_linux_ppc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP
+#define OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_PPC_INLINE_HPP
+
+#include "runtime/prefetch.hpp"
+
+
+inline void Prefetch::read(void *loc, intx interval) {
+  __asm__ __volatile__ (
+    "   dcbt   0, %0       \n"
+    :
+    : /*%0*/"r" ( ((address)loc) +((long)interval) )
+    //:
+    );
+}
+
+inline void Prefetch::write(void *loc, intx interval) {
+  __asm__ __volatile__ (
+    "   dcbtst 0, %0       \n"
+    :
+    : /*%0*/"r" ( ((address)loc) +((long)interval) )
+    //:
+    );
+}
+
+#endif // OS_CPU_LINUX_PPC_VM_PREFETCH_LINUX_OJDKPPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/threadLocalStorage.hpp"
+
+void ThreadLocalStorage::generate_code_for_get_thread() {
+    // nothing we can do here for user-level thread
+}
+
+void ThreadLocalStorage::pd_init() {
+  // Nothing to do
+}
+
+void ThreadLocalStorage::pd_set_thread(Thread* thread) {
+  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/threadLS_linux_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
+#define OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
+
+  // Processor dependent parts of ThreadLocalStorage
+
+public:
+  static Thread* thread() {
+    return (Thread *) os::thread_local_storage_at(thread_index());
+  }
+
+#endif // OS_CPU_LINUX_PPC_VM_THREADLS_LINUX_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/thread_linux_ppc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/frame.inline.hpp"
+#include "thread_linux.inline.hpp"
+
+// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/PPC.
+bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
+  Unimplemented();
+  return false;
+}
+
+void JavaThread::cache_global_variables() { }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP
+#define OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP
+
+ private:
+
+  void pd_initialize() {
+    _anchor.clear();
+    _last_interpreter_fp = NULL;
+  }
+
+  // The `last' frame is the youngest Java frame on the thread's stack.
+  frame pd_last_frame() {
+    assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+
+    intptr_t* sp = last_Java_sp();
+    address pc = _anchor.last_Java_pc();
+
+    // Last_Java_pc ist not set, if we come here from compiled code.
+    if (pc == NULL) {
+      pc = (address) *(sp + 2);
+    }
+
+    return frame(sp, pc);
+  }
+
+ public:
+
+  void set_base_of_stack_pointer(intptr_t* base_sp) {}
+  intptr_t* base_of_stack_pointer() { return NULL; }
+  void record_base_of_stack_pointer() {}
+
+  // These routines are only used on cpu architectures that
+  // have separate register stacks (Itanium).
+  static bool register_stack_overflow() { return false; }
+  static void enable_register_stack_guard() {}
+  static void disable_register_stack_guard() {}
+
+  bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava);
+
+ protected:
+
+  // -Xprof support
+  //
+  // In order to find the last Java fp from an async profile
+  // tick, we store the current interpreter fp in the thread.
+  // This value is only valid while we are in the C++ interpreter
+  // and profiling.
+  intptr_t *_last_interpreter_fp;
+
+ public:
+
+  static ByteSize last_interpreter_fp_offset() {
+    return byte_offset_of(JavaThread, _last_interpreter_fp);
+  }
+
+  intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
+
+#endif // OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os_cpu/linux_ppc/vm/vmStructs_linux_ppc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_VMSTRUCTS_LINUX_PPC_HPP
+#define OS_CPU_LINUX_PPC_VM_VMSTRUCTS_LINUX_PPC_HPP
+
+// These are the OS and CPU-specific fields, types and integer
+// constants required by the Serviceability Agent. This file is
+// referenced by vmStructs.cpp.
+
+#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
+                                                                                                                                     \
+  /******************************/                                                                                                   \
+  /* Threads (NOTE: incomplete) */                                                                                                   \
+  /******************************/                                                                                                   \
+  nonstatic_field(OSThread,                      _thread_id,                                      pid_t)                             \
+  nonstatic_field(OSThread,                      _pthread_id,                                     pthread_t)
+
+
+#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
+                                                                          \
+  /**********************/                                                \
+  /* Posix Thread IDs   */                                                \
+  /**********************/                                                \
+                                                                          \
+  declare_integer_type(pid_t)                                             \
+  declare_unsigned_integer_type(pthread_t)
+
+#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
+
+#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
+
+#endif // OS_CPU_LINUX_PPC_VM_VMSTRUCTS_LINUX_PPC_HPP
--- a/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_sparc/vm/linux_sparc.s	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_sparc/vm/linux_sparc.s	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_sparc/vm/vmStructs_linux_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_sparc/vm/vmStructs_linux_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_x86/vm/linux_x86_32.s	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_x86/vm/linux_x86_32.s	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_x86/vm/linux_x86_64.s	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_x86/vm/linux_x86_64.s	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2004, 2007, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "services/memTracker.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 
@@ -906,6 +907,9 @@
   if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
     return; // No matter, we tried, best effort.
   }
+
+  MemTracker::record_virtual_memory_type((address)codebuf, mtInternal);
+
   if (PrintMiscellaneous && (Verbose || WizardMode)) {
      tty->print_cr("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
   }
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_x86/vm/vmStructs_linux_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_x86/vm/vmStructs_linux_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -36,7 +36,7 @@
 
   // Atomically copy 64 bits of data
   static void atomic_copy64(volatile void *src, volatile void *dst) {
-#if defined(PPC) && !defined(_LP64)
+#if defined(PPC32)
     double tmp;
     asm volatile ("lfd  %0, 0(%1)\n"
                   "stfd %0, 0(%2)\n"
--- a/src/os_cpu/linux_zero/vm/vmStructs_linux_zero.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/linux_zero/vm/vmStructs_linux_zero.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.s	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.s	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 !!
-!! Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
+!! Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 !! DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 !!
 !! This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_sparc/vm/vmStructs_solaris_sparc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/solaris_sparc/vm/vmStructs_solaris_sparc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -75,13 +75,19 @@
     do_sysinfo(SI_ARCHITECTURE_64, "sparcv9", &features, generic_v9_m);
 
     // Extract valid instruction set extensions.
-    uint_t av;
-    uint_t avn = os::Solaris::getisax(&av, 1);
-    assert(avn == 1, "should only return one av");
+    uint_t avs[2];
+    uint_t avn = os::Solaris::getisax(avs, 2);
+    assert(avn <= 2, "should return two or less av's");
+    uint_t av = avs[0];
 
 #ifndef PRODUCT
-    if (PrintMiscellaneous && Verbose)
-      tty->print_cr("getisax(2) returned: " PTR32_FORMAT, av);
+    if (PrintMiscellaneous && Verbose) {
+      tty->print("getisax(2) returned: " PTR32_FORMAT, av);
+      if (avn > 1) {
+        tty->print(", " PTR32_FORMAT, avs[1]);
+      }
+      tty->cr();
+    }
 #endif
 
     if (av & AV_SPARC_MUL32)  features |= hardware_mul32_m;
@@ -91,6 +97,13 @@
     if (av & AV_SPARC_POPC)   features |= hardware_popc_m;
     if (av & AV_SPARC_VIS)    features |= vis1_instructions_m;
     if (av & AV_SPARC_VIS2)   features |= vis2_instructions_m;
+    if (avn > 1) {
+      uint_t av2 = avs[1];
+#ifndef AV2_SPARC_SPARC5
+#define AV2_SPARC_SPARC5 0x00000008 /* The 29 new fp and sub instructions */
+#endif
+      if (av2 & AV2_SPARC_SPARC5)       features |= sparc5_instructions_m;
+    }
 
     // Next values are not defined before Solaris 10
     // but Solaris 8 is used for jdk6 update builds.
@@ -119,6 +132,11 @@
 #endif
     if (av & AV_SPARC_CBCOND)       features |= cbcond_instructions_m;
 
+#ifndef AV_SPARC_AES
+#define AV_SPARC_AES 0x00020000  /* aes instrs supported */
+#endif
+    if (av & AV_SPARC_AES)       features |= aes_instructions_m;
+
   } else {
     // getisax(2) failed, use the old legacy code.
 #ifndef PRODUCT
--- a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -475,9 +475,11 @@
         // here if the underlying file has been truncated.
         // Do not crash the VM in such a case.
         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
-        nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
-        if (nm != NULL && nm->has_unsafe_access()) {
-          stub = StubRoutines::handler_for_unsafe_access();
+        if (cb != NULL) {
+          nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
+          if (nm != NULL && nm->has_unsafe_access()) {
+            stub = StubRoutines::handler_for_unsafe_access();
+          }
         }
       }
       else
@@ -724,6 +726,7 @@
   err.report_and_die();
 
   ShouldNotReachHere();
+  return false;
 }
 
 void os::print_context(outputStream *st, void *context) {
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.s	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.s	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.s	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.s	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /
-/ Copyright (c) 2004, 2005, Oracle and/or its affiliates. All rights reserved.
+/ Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
 / DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 /
 / This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/solaris_x86/vm/vmStructs_solaris_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/solaris_x86/vm/vmStructs_solaris_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os_cpu/windows_x86/vm/vmStructs_windows_x86.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/os_cpu/windows_x86/vm/vmStructs_windows_x86.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Wed Mar 12 13:30:08 2014 +0100
@@ -161,7 +161,18 @@
         for (BuildConfig cfg : allConfigs) {
             startTag(cfg, "PropertyGroup");
             tagData("LocalDebuggerCommand", cfg.get("JdkTargetRoot") + "\\bin\\java.exe");
-            tagData("LocalDebuggerCommandArguments", "-XXaltjvm=$(TargetDir) -Dsun.java.launcher=gamma");
+            // The JVM loads some libraries using a path relative to
+            // itself because it expects to be in a JRE or a JDK. The java
+            // launcher's '-XXaltjvm=' option allows the JVM to be outside
+            // the JRE or JDK so '-Dsun.java.launcher.is_altjvm=true'
+            // forces a fake JAVA_HOME relative path to be used to
+            // find the other libraries. The '-XX:+PauseAtExit' option
+            // causes the VM to wait for key press before exiting; this
+            // allows any stdout or stderr messages to be seen before
+            // the cmdtool exits.
+            tagData("LocalDebuggerCommandArguments", "-XXaltjvm=$(TargetDir) "
+                    + "-Dsun.java.launcher.is_altjvm=true "
+                    + "-XX:+UnlockDiagnosticVMOptions -XX:+PauseAtExit");
             tagData("LocalDebuggerEnvironment", "JAVA_HOME=" + cfg.get("JdkTargetRoot"));
             endTag();
         }
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/tools/hsdis/Makefile	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/tools/hsdis/Makefile	Wed Mar 12 13:30:08 2014 +0100
@@ -27,6 +27,7 @@
 # Default arch; it is changed below as needed.
 ARCH		= i386
 OS		= $(shell uname)
+AR		= ar
 
 ## OS = SunOS ##
 ifeq		($(OS),SunOS)
@@ -73,6 +74,7 @@
 ifdef LP64
 CFLAGS/sparcv9	+= -m64
 CFLAGS/amd64	+= -m64
+CFLAGS/ppc64	+= -m64
 else
 ARCH=$(ARCH1:amd64=i386)
 CFLAGS/i386	+= -m32
@@ -88,8 +90,20 @@
 DLDFLAGS	+= -shared
 LDFLAGS         += -ldl
 OUTFLAGS	+= -o $@
-## OS = Windows ##
-else   # !SunOS, !Linux => Darwin or Windows
+else
+## OS = AIX ##
+ifeq		($(OS),AIX)
+OS              = aix
+ARCH            = ppc64
+CC              = xlc_r
+CFLAGS          += -DAIX -g -qpic=large -q64
+CFLAGS/ppc64    += -q64
+AR              = ar -X64
+DLDFLAGS        += -qmkshrobj -lz
+OUTFLAGS        += -o $@
+LIB_EXT		= .so
+else
+## OS = Darwin ##
 ifeq ($(OS),Darwin)
 CPU             = $(shell uname -m)
 ARCH1=$(CPU:x86_64=amd64)
@@ -113,7 +127,8 @@
 DLDFLAGS        += -lz
 LDFLAGS         += -ldl
 OUTFLAGS        += -o $@
-else #Windows
+else
+## OS = Windows ##
 OS		= windows
 CC		= gcc
 CFLAGS		+=  /nologo /MD /W3 /WX /O2 /Fo$(@:.dll=.obj) /Gi-
@@ -123,6 +138,7 @@
 OUTFLAGS	+= /link /out:$@
 LIB_EXT		= .dll
 endif   # Darwin
+endif   # AIX
 endif	# Linux
 endif	# SunOS
 
@@ -176,7 +192,7 @@
 	if [ ! -f $@ ]; then cd $(TARGET_DIR); make all-opcodes; fi
 
 $(TARGET_DIR)/Makefile:
-	(cd $(TARGET_DIR); CC=$(CC) CFLAGS="$(CFLAGS)" $(BINUTILSDIR)/configure --disable-nls $(CONFIGURE_ARGS))
+	(cd $(TARGET_DIR); CC=$(CC) CFLAGS="$(CFLAGS)" AR="$(AR)" $(BINUTILSDIR)/configure --disable-nls $(CONFIGURE_ARGS))
 
 $(TARGET): $(SOURCE) $(LIBS) $(LIBRARIES) $(TARGET_DIR)
 	$(CC) $(OUTFLAGS) $(CPPFLAGS) $(CFLAGS) $(SOURCE) $(DLDFLAGS) $(LIBRARIES)
--- a/src/share/tools/hsdis/README	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/tools/hsdis/README	Wed Mar 12 13:30:08 2014 +0100
@@ -1,4 +1,4 @@
-Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   
 This code is free software; you can redistribute it and/or modify it
@@ -54,6 +54,17 @@
 disassembler library only.  If you build demo it will build a demo
 program that attempts to exercise the library.
 
+With recent version of binutils (i.e. binutils-2.23.2) you may get the
+following build error:
+
+WARNING: `makeinfo' is missing on your system.  You should only need it if
+         you modified a `.texi' or `.texinfo' file, or any other file
+         ...
+
+This is because of "Bug 15345 - binutils-2.23.2 tarball doesn't build
+without makeinfo" [2]. The easiest way to work around this problem is
+by doing a "touch $BINUTILS/bfd/doc/bfd.info".
+
 Windows
 
 In theory this should be buildable on Windows but getting a working
@@ -101,3 +112,13 @@
 
 If the product mode of the JVM does not accept -XX:+PrintAssembly,
 you do not have a version new enough to use the hsdis plugin.
+
+* Wiki
+
+More information can be found in the OpenJDK HotSpot Wiki [1].
+
+
+Resources:
+
+[1] https://wiki.openjdk.java.net/display/HotSpot/PrintAssembly
+[2] http://sourceware.org/bugzilla/show_bug.cgi?id=15345
--- a/src/share/tools/hsdis/hsdis.c	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/tools/hsdis/hsdis.c	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -307,7 +307,8 @@
                                  app_data->printf_stream,
                                  app_data->printf_callback,
                                  native_bfd,
-                                 app_data->insn_options);
+                                 /* On PowerPC we get warnings, if we pass empty options */
+                                 (caller_options == NULL) ? NULL : app_data->insn_options);
 
   /* Finish linking together the various callback blocks. */
   app_data->dinfo.application_data = (void*) app_data;
@@ -459,6 +460,9 @@
 #ifdef LIBARCH_sparcv9
   res = "sparc:v9b";
 #endif
+#ifdef LIBARCH_ppc64
+  res = "powerpc:common64";
+#endif
   if (res == NULL)
     res = "architecture not set in Makefile!";
   return res;
--- a/src/share/vm/adlc/adlc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/adlc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -38,6 +38,9 @@
 #include "stdarg.h"
 #include <sys/types.h>
 
+/* Make sure that we have the intptr_t and uintptr_t definitions */
+#ifdef _WIN32
+
 #if _MSC_VER >= 1300
 using namespace std;
 #endif
@@ -46,8 +49,6 @@
 #define strdup _strdup
 #endif
 
-/* Make sure that we have the intptr_t and uintptr_t definitions */
-#ifdef _WIN32
 #ifndef _INTPTR_T_DEFINED
 #ifdef _WIN64
 typedef __int64 intptr_t;
@@ -65,6 +66,7 @@
 #endif
 #define _UINTPTR_T_DEFINED
 #endif
+
 #endif // _WIN32
 
 #if defined(LINUX) || defined(_ALLBSD_SOURCE)
--- a/src/share/vm/adlc/adlparse.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/adlparse.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -219,19 +219,21 @@
     else if (!strcmp(ident, "encode"))  {
       parse_err(SYNERR, "Instructions specify ins_encode, not encode\n");
     }
-    else if (!strcmp(ident, "ins_encode"))     ins_encode_parse(*instr);
-    else if (!strcmp(ident, "opcode"))         instr->_opcode    = opcode_parse(instr);
-    else if (!strcmp(ident, "size"))           instr->_size      = size_parse(instr);
-    else if (!strcmp(ident, "effect"))         effect_parse(instr);
-    else if (!strcmp(ident, "expand"))         instr->_exprule   = expand_parse(instr);
-    else if (!strcmp(ident, "rewrite"))        instr->_rewrule   = rewrite_parse();
+    else if (!strcmp(ident, "ins_encode"))       ins_encode_parse(*instr);
+    // Parse late expand keyword.
+    else if (!strcmp(ident, "postalloc_expand")) postalloc_expand_parse(*instr);
+    else if (!strcmp(ident, "opcode"))           instr->_opcode    = opcode_parse(instr);
+    else if (!strcmp(ident, "size"))             instr->_size      = size_parse(instr);
+    else if (!strcmp(ident, "effect"))           effect_parse(instr);
+    else if (!strcmp(ident, "expand"))           instr->_exprule   = expand_parse(instr);
+    else if (!strcmp(ident, "rewrite"))          instr->_rewrule   = rewrite_parse();
     else if (!strcmp(ident, "constraint")) {
       parse_err(SYNERR, "Instructions do not specify a constraint\n");
     }
     else if (!strcmp(ident, "construct")) {
       parse_err(SYNERR, "Instructions do not specify a construct\n");
     }
-    else if (!strcmp(ident, "format"))         instr->_format    = format_parse();
+    else if (!strcmp(ident, "format"))           instr->_format    = format_parse();
     else if (!strcmp(ident, "interface")) {
       parse_err(SYNERR, "Instructions do not specify an interface\n");
     }
@@ -240,13 +242,14 @@
       // Check identifier to see if it is the name of an attribute
       const Form    *form = _globalNames[ident];
       AttributeForm *attr = form ? form->is_attribute() : NULL;
-      if( attr && (attr->_atype == INS_ATTR) ) {
+      if (attr && (attr->_atype == INS_ATTR)) {
         // Insert the new attribute into the linked list.
         Attribute *temp = attr_parse(ident);
         temp->_next = instr->_attribs;
         instr->_attribs = temp;
       } else {
-        parse_err(SYNERR, "expected one of:\n predicate, match, encode, or the name of an instruction attribute at %s\n", ident);
+        parse_err(SYNERR, "expected one of:\n predicate, match, encode, or the name of"
+                  " an instruction attribute at %s\n", ident);
       }
     }
     skipws();
@@ -258,13 +261,17 @@
   }
   // Check for "Set" form of chain rule
   adjust_set_rule(instr);
-  if (_AD._pipeline ) {
-    if( instr->expands() ) {
-      if( instr->_ins_pipe )
-        parse_err(WARN, "ins_pipe and expand rule both specified for instruction \"%s\"; ins_pipe will be unused\n", instr->_ident);
+  if (_AD._pipeline) {
+    // No pipe required for late expand.
+    if (instr->expands() || instr->postalloc_expands()) {
+      if (instr->_ins_pipe) {
+        parse_err(WARN, "ins_pipe and expand rule both specified for instruction \"%s\";"
+                  " ins_pipe will be unused\n", instr->_ident);
+      }
     } else {
-      if( !instr->_ins_pipe )
+      if (!instr->_ins_pipe) {
         parse_err(WARN, "No ins_pipe specified for instruction \"%s\"\n", instr->_ident);
+      }
     }
   }
   // Add instruction to tail of instruction list
@@ -2779,11 +2786,13 @@
     encoding->add_parameter(opForm->_ident, param);
   }
 
-  // Define a MacroAssembler instance for use by the encoding.  The
-  // name is chosen to match the __ idiom used for assembly in other
-  // parts of hotspot and assumes the existence of the standard
-  // #define __ _masm.
-  encoding->add_code("    MacroAssembler _masm(&cbuf);\n");
+  if (!inst._is_postalloc_expand) {
+    // Define a MacroAssembler instance for use by the encoding.  The
+    // name is chosen to match the __ idiom used for assembly in other
+    // parts of hotspot and assumes the existence of the standard
+    // #define __ _masm.
+    encoding->add_code("    MacroAssembler _masm(&cbuf);\n");
+  }
 
   // Parse the following %{ }% block
   ins_encode_parse_block_impl(inst, encoding, ec_name);
@@ -2854,10 +2863,14 @@
       // Check if this instruct is a MachConstantNode.
       if (strcmp(rep_var, "constanttablebase") == 0) {
         // This instruct is a MachConstantNode.
-        inst.set_is_mach_constant(true);
+        inst.set_needs_constant_base(true);
+        if (strncmp("MachCall", inst.mach_base_class(_globalNames), strlen("MachCall")) != 0 ) {
+          inst.set_is_mach_constant(true);
+        }
 
         if (_curchar == '(')  {
-          parse_err(SYNERR, "constanttablebase in instruct %s cannot have an argument (only constantaddress and constantoffset)", ec_name);
+          parse_err(SYNERR, "constanttablebase in instruct %s cannot have an argument "
+                            "(only constantaddress and constantoffset)", ec_name);
           return;
         }
       }
@@ -2955,18 +2968,34 @@
       while (_curchar != ')') {
         char *param = get_ident_or_literal_constant("encoding operand");
         if ( param != NULL ) {
-          // Found a parameter:
-          // Check it is a local name, add it to the list, then check for more
-          // New: allow hex constants as parameters to an encode method.
-          // New: allow parenthesized expressions as parameters.
-          // New: allow "primary", "secondary", "tertiary" as parameters.
-          // New: allow user-defined register name as parameter
-          if ( (inst._localNames[param] == NULL) &&
-               !ADLParser::is_literal_constant(param) &&
-               (Opcode::as_opcode_type(param) == Opcode::NOT_AN_OPCODE) &&
-               ((_AD._register == NULL ) || (_AD._register->getRegDef(param) == NULL)) ) {
-            parse_err(SYNERR, "Using non-locally defined parameter %s for encoding %s.\n", param, ec_name);
-            return;
+
+          // Check if this instruct is a MachConstantNode.
+          if (strcmp(param, "constanttablebase") == 0) {
+            // This instruct is a MachConstantNode.
+            inst.set_needs_constant_base(true);
+            if (strncmp("MachCall", inst.mach_base_class(_globalNames), strlen("MachCall")) != 0 ) {
+              inst.set_is_mach_constant(true);
+            }
+
+            if (_curchar == '(')  {
+              parse_err(SYNERR, "constanttablebase in instruct %s cannot have an argument "
+                        "(only constantaddress and constantoffset)", ec_name);
+              return;
+            }
+          } else {
+            // Found a parameter:
+            // Check it is a local name, add it to the list, then check for more
+            // New: allow hex constants as parameters to an encode method.
+            // New: allow parenthesized expressions as parameters.
+            // New: allow "primary", "secondary", "tertiary" as parameters.
+            // New: allow user-defined register name as parameter
+            if ( (inst._localNames[param] == NULL) &&
+                 !ADLParser::is_literal_constant(param) &&
+                 (Opcode::as_opcode_type(param) == Opcode::NOT_AN_OPCODE) &&
+                 ((_AD._register == NULL ) || (_AD._register->getRegDef(param) == NULL)) ) {
+              parse_err(SYNERR, "Using non-locally defined parameter %s for encoding %s.\n", param, ec_name);
+              return;
+            }
           }
           params->add_entry(param);
 
@@ -3050,6 +3079,160 @@
   inst._insencode = encrule;
 }
 
+//------------------------------postalloc_expand_parse---------------------------
+// Encode rules have the form
+//   postalloc_expand( encode_class_name(parameter_list) );
+//
+// The "encode_class_name" must be defined in the encode section.
+// The parameter list contains $names that are locals.
+//
+// This is just a copy of ins_encode_parse without the loop.
+void ADLParser::postalloc_expand_parse(InstructForm& inst) {
+  inst._is_postalloc_expand = true;
+
+  // Parse encode class name.
+  skipws();                        // Skip whitespace.
+  if (_curchar != '(') {
+    // Check for postalloc_expand %{ form
+    if ((_curchar == '%') && (*(_ptr+1) == '{')) {
+      next_char();                      // Skip '%'
+      next_char();                      // Skip '{'
+
+      // Parse the block form of postalloc_expand
+      ins_encode_parse_block(inst);
+      return;
+    }
+
+    parse_err(SYNERR, "missing '(' in postalloc_expand definition\n");
+    return;
+  }
+  next_char();                     // Move past '('.
+  skipws();
+
+  InsEncode *encrule = new InsEncode(); // Encode class for instruction.
+  encrule->_linenum = linenum();
+  char      *ec_name = NULL;       // String representation of encode rule.
+  // identifier is optional.
+  if (_curchar != ')') {
+    ec_name = get_ident();
+    if (ec_name == NULL) {
+      parse_err(SYNERR, "Invalid postalloc_expand class name after 'postalloc_expand('.\n");
+      return;
+    }
+    // Check that encoding is defined in the encode section.
+    EncClass *encode_class = _AD._encode->encClass(ec_name);
+
+    // Get list for encode method's parameters
+    NameAndList *params = encrule->add_encode(ec_name);
+
+    // Parse the parameters to this encode method.
+    skipws();
+    if (_curchar == '(') {
+      next_char();                 // Move past '(' for parameters.
+
+      // Parse the encode method's parameters.
+      while (_curchar != ')') {
+        char *param = get_ident_or_literal_constant("encoding operand");
+        if (param != NULL) {
+          // Found a parameter:
+
+          // First check for constant table support.
+
+          // Check if this instruct is a MachConstantNode.
+          if (strcmp(param, "constanttablebase") == 0) {
+            // This instruct is a MachConstantNode.
+            inst.set_needs_constant_base(true);
+            if (strncmp("MachCall", inst.mach_base_class(_globalNames), strlen("MachCall")) != 0 ) {
+              inst.set_is_mach_constant(true);
+            }
+
+            if (_curchar == '(') {
+              parse_err(SYNERR, "constanttablebase in instruct %s cannot have an argument "
+                        "(only constantaddress and constantoffset)", ec_name);
+              return;
+            }
+          }
+          else if ((strcmp(param, "constantaddress") == 0) ||
+                   (strcmp(param, "constantoffset")  == 0))  {
+            // This instruct is a MachConstantNode.
+            inst.set_is_mach_constant(true);
+
+            // If the constant keyword has an argument, parse it.
+            if (_curchar == '(') constant_parse(inst);
+          }
+
+          // Else check it is a local name, add it to the list, then check for more.
+          // New: allow hex constants as parameters to an encode method.
+          // New: allow parenthesized expressions as parameters.
+          // New: allow "primary", "secondary", "tertiary" as parameters.
+          // New: allow user-defined register name as parameter.
+          else if ((inst._localNames[param] == NULL) &&
+                   !ADLParser::is_literal_constant(param) &&
+                   (Opcode::as_opcode_type(param) == Opcode::NOT_AN_OPCODE) &&
+                   ((_AD._register == NULL) || (_AD._register->getRegDef(param) == NULL))) {
+            parse_err(SYNERR, "Using non-locally defined parameter %s for encoding %s.\n", param, ec_name);
+            return;
+          }
+          params->add_entry(param);
+
+          skipws();
+          if (_curchar == ',') {
+            // More parameters to come.
+            next_char();           // Move past ',' between parameters.
+            skipws();              // Skip to next parameter.
+          } else if (_curchar == ')') {
+            // Done with parameter list
+          } else {
+            // Only ',' or ')' are valid after a parameter name.
+            parse_err(SYNERR, "expected ',' or ')' after parameter %s.\n", ec_name);
+            return;
+          }
+
+        } else {
+          skipws();
+          // Did not find a parameter.
+          if (_curchar == ',') {
+            parse_err(SYNERR, "Expected encode parameter before ',' in postalloc_expand %s.\n", ec_name);
+            return;
+          }
+          if (_curchar != ')') {
+            parse_err(SYNERR, "Expected ')' after postalloc_expand parameters.\n");
+            return;
+          }
+        }
+      } // WHILE loop collecting parameters.
+      next_char();                 // Move past ')' at end of parameters.
+    } // Done with parameter list for encoding.
+
+    // Check for ',' or ')' after encoding.
+    skipws();                      // Move to character after parameters.
+    if (_curchar != ')') {
+      // Only a ')' is allowed.
+      parse_err(SYNERR, "Expected ')' after postalloc_expand %s.\n", ec_name);
+      return;
+    }
+  } // Done parsing postalloc_expand method and their parameters.
+  if (_curchar != ')') {
+    parse_err(SYNERR, "Missing ')' at end of postalloc_expand description.\n");
+    return;
+  }
+  next_char();                     // Move past ')'.
+  skipws();                        // Skip leading whitespace.
+
+  if (_curchar != ';') {
+    parse_err(SYNERR, "Missing ';' at end of postalloc_expand.\n");
+    return;
+  }
+  next_char();                     // Move past ';'.
+  skipws();                        // Be friendly to oper_parse().
+
+  // Debug Stuff.
+  if (_AD._adl_debug > 1) fprintf(stderr, "Instruction postalloc_expand: %s\n", ec_name);
+
+  // Set encode class of this instruction.
+  inst._insencode = encrule;
+}
+
 
 //------------------------------constant_parse---------------------------------
 // Parse a constant expression.
@@ -3835,13 +4018,11 @@
 //------------------------------expand_parse-----------------------------------
 ExpandRule* ADLParser::expand_parse(InstructForm *instr) {
   char         *ident, *ident2;
-  OperandForm  *oper;
-  InstructForm *ins;
   NameAndList  *instr_and_operands = NULL;
   ExpandRule   *exp = new ExpandRule();
 
-  // Expand is a block containing an ordered list of instructions, each of
-  // which has an ordered list of operands.
+  // Expand is a block containing an ordered list of operands with initializers,
+  // or instructions, each of which has an ordered list of operands.
   // Check for block delimiter
   skipws();                        // Skip leading whitespace
   if ((_curchar != '%')
@@ -3855,12 +4036,30 @@
     if (ident == NULL) {
       parse_err(SYNERR, "identifier expected at %c\n", _curchar);
       continue;
-    }                              // Check that you have a valid instruction
+    }
+
+    // Check whether we should parse an instruction or operand.
     const Form *form = _globalNames[ident];
-    ins = form ? form->is_instruction() : NULL;
-    if (ins == NULL) {
+    bool parse_oper = false;
+    bool parse_ins  = false;
+    if (form == NULL) {
+      skipws();
+      // Check whether this looks like an instruction specification.  If so,
+      // just parse the instruction.  The declaration of the instruction is
+      // not needed here.
+      if (_curchar == '(') parse_ins = true;
+    } else if (form->is_instruction()) {
+      parse_ins = true;
+    } else if (form->is_operand()) {
+      parse_oper = true;
+    } else {
+      parse_err(SYNERR, "instruction/operand name expected at %s\n", ident);
+      continue;
+    }
+
+    if (parse_oper) {
       // This is a new operand
-      oper = form ? form->is_operand() : NULL;
+      OperandForm *oper = form->is_operand();
       if (oper == NULL) {
         parse_err(SYNERR, "instruction/operand name expected at %s\n", ident);
         continue;
@@ -3895,6 +4094,7 @@
       skipws();
     }
     else {
+      assert(parse_ins, "sanity");
       // Add instruction to list
       instr_and_operands = new NameAndList(ident);
       // Grab operands, build nameList of them, and then put into dictionary
@@ -3918,7 +4118,7 @@
           parse_err(SYNERR, "operand name expected at %s\n", ident2);
           continue;
         }
-        oper = form2->is_operand();
+        OperandForm *oper = form2->is_operand();
         if (oper == NULL && !form2->is_opclass()) {
           parse_err(SYNERR, "operand name expected at %s\n", ident2);
           continue;
--- a/src/share/vm/adlc/adlparse.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/adlparse.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -159,6 +159,8 @@
   void           ins_encode_parse(InstructForm &inst);
   void           ins_encode_parse_block(InstructForm &inst);
   void           ins_encode_parse_block_impl(InstructForm& inst, EncClass* encoding, char* ec_name);
+  // Parse instruction postalloc expand rule.
+  void           postalloc_expand_parse(InstructForm &inst);
 
   void           constant_parse(InstructForm& inst);
   void           constant_parse_expression(EncClass* encoding, char* ec_name);
--- a/src/share/vm/adlc/archDesc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/archDesc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -43,32 +43,6 @@
   return result;
 }
 
-// Utilities to characterize effect statements
-static bool is_def(int usedef) {
-  switch(usedef) {
-  case Component::DEF:
-  case Component::USE_DEF: return true; break;
-  }
-  return false;
-}
-
-static bool is_use(int usedef) {
-  switch(usedef) {
-  case Component::USE:
-  case Component::USE_DEF:
-  case Component::USE_KILL: return true; break;
-  }
-  return false;
-}
-
-static bool is_kill(int usedef) {
-  switch(usedef) {
-  case Component::KILL:
-  case Component::USE_KILL: return true; break;
-  }
-  return false;
-}
-
 //---------------------------ChainList Methods-------------------------------
 ChainList::ChainList() {
 }
@@ -172,7 +146,8 @@
     _internalOps(cmpstr,hashstr, Form::arena),
     _internalMatch(cmpstr,hashstr, Form::arena),
     _chainRules(cmpstr,hashstr, Form::arena),
-    _cisc_spill_operand(NULL) {
+    _cisc_spill_operand(NULL),
+    _needs_clone_jvms(false) {
 
       // Initialize the opcode to MatchList table with NULLs
       for( int i=0; i<_last_opcode; ++i ) {
@@ -1192,15 +1167,12 @@
          || strcmp(idealName,"CmpF") == 0
          || strcmp(idealName,"FastLock") == 0
          || strcmp(idealName,"FastUnlock") == 0
-         || strcmp(idealName,"AddExactI") == 0
-         || strcmp(idealName,"AddExactL") == 0
-         || strcmp(idealName,"SubExactI") == 0
-         || strcmp(idealName,"SubExactL") == 0
-         || strcmp(idealName,"MulExactI") == 0
-         || strcmp(idealName,"MulExactL") == 0
-         || strcmp(idealName,"NegExactI") == 0
-         || strcmp(idealName,"NegExactL") == 0
-         || strcmp(idealName,"FlagsProj") == 0
+         || strcmp(idealName,"OverflowAddI") == 0
+         || strcmp(idealName,"OverflowAddL") == 0
+         || strcmp(idealName,"OverflowSubI") == 0
+         || strcmp(idealName,"OverflowSubL") == 0
+         || strcmp(idealName,"OverflowMulI") == 0
+         || strcmp(idealName,"OverflowMulL") == 0
          || strcmp(idealName,"Bool") == 0
          || strcmp(idealName,"Binary") == 0 ) {
       // Removed ConI from the must_clone list.  CPUs that cannot use
--- a/src/share/vm/adlc/archDesc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/archDesc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -121,6 +121,12 @@
   // to access [stack_pointer + offset]
   OperandForm  *_cisc_spill_operand;
 
+  // If a Call node uses $constanttablebase, it gets MachConstantBaseNode
+  // by the matcher and the matcher will modify the jvms. If so, jvm states
+  // always have to be cloned when a node is cloned.  Adlc generates
+  // Compile::needs_clone_jvms() accordingly.
+  bool _needs_clone_jvms;
+
   // Methods for outputting the DFA
   void gen_match(FILE *fp, MatchList &mlist, ProductionState &status, Dict &operands_chained_from);
   void chain_rule(FILE *fp, const char *indent, const char *ideal,
@@ -289,6 +295,7 @@
   void addPreHeaderBlocks(FILE *fp_hpp);
   void addHeaderBlocks(FILE *fp_hpp);
   void addSourceBlocks(FILE *fp_cpp);
+  void generate_needs_clone_jvms(FILE *fp_cpp);
   void generate_adlc_verification(FILE *fp_cpp);
 
   // output declaration of class State
@@ -311,6 +318,8 @@
   void defineEvalConstant(FILE *fp, InstructForm &node);
   // Generator for Emit methods for instructions
   void defineEmit        (FILE *fp, InstructForm &node);
+  // Generator for postalloc_expand methods for instructions.
+  void define_postalloc_expand(FILE *fp, InstructForm &node);
 
   // Define a MachOper encode method
   void define_oper_interface(FILE *fp, OperandForm &oper, FormDict &globals,
--- a/src/share/vm/adlc/dfa.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/dfa.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/adlc/dict2.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/dict2.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/adlc/formssel.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/formssel.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,31 +32,33 @@
     _localNames(cmpstr, hashstr, Form::arena),
     _effects(cmpstr, hashstr, Form::arena),
     _is_mach_constant(false),
+    _needs_constant_base(false),
     _has_call(false)
 {
       _ftype = Form::INS;
 
-      _matrule   = NULL;
-      _insencode = NULL;
-      _constant  = NULL;
-      _opcode    = NULL;
-      _size      = NULL;
-      _attribs   = NULL;
-      _predicate = NULL;
-      _exprule   = NULL;
-      _rewrule   = NULL;
-      _format    = NULL;
-      _peephole  = NULL;
-      _ins_pipe  = NULL;
-      _uniq_idx  = NULL;
-      _num_uniq  = 0;
-      _cisc_spill_operand = Not_cisc_spillable;// Which operand may cisc-spill
+      _matrule              = NULL;
+      _insencode            = NULL;
+      _constant             = NULL;
+      _is_postalloc_expand  = false;
+      _opcode               = NULL;
+      _size                 = NULL;
+      _attribs              = NULL;
+      _predicate            = NULL;
+      _exprule              = NULL;
+      _rewrule              = NULL;
+      _format               = NULL;
+      _peephole             = NULL;
+      _ins_pipe             = NULL;
+      _uniq_idx             = NULL;
+      _num_uniq             = 0;
+      _cisc_spill_operand   = Not_cisc_spillable;// Which operand may cisc-spill
       _cisc_spill_alternate = NULL;            // possible cisc replacement
-      _cisc_reg_mask_name = NULL;
-      _is_cisc_alternate = false;
-      _is_short_branch = false;
-      _short_branch_form = NULL;
-      _alignment = 1;
+      _cisc_reg_mask_name   = NULL;
+      _is_cisc_alternate    = false;
+      _is_short_branch      = false;
+      _short_branch_form    = NULL;
+      _alignment            = 1;
 }
 
 InstructForm::InstructForm(const char *id, InstructForm *instr, MatchRule *rule)
@@ -64,31 +66,33 @@
     _localNames(instr->_localNames),
     _effects(instr->_effects),
     _is_mach_constant(false),
+    _needs_constant_base(false),
     _has_call(false)
 {
       _ftype = Form::INS;
 
-      _matrule   = rule;
-      _insencode = instr->_insencode;
-      _constant  = instr->_constant;
-      _opcode    = instr->_opcode;
-      _size      = instr->_size;
-      _attribs   = instr->_attribs;
-      _predicate = instr->_predicate;
-      _exprule   = instr->_exprule;
-      _rewrule   = instr->_rewrule;
-      _format    = instr->_format;
-      _peephole  = instr->_peephole;
-      _ins_pipe  = instr->_ins_pipe;
-      _uniq_idx  = instr->_uniq_idx;
-      _num_uniq  = instr->_num_uniq;
-      _cisc_spill_operand = Not_cisc_spillable;// Which operand may cisc-spill
-      _cisc_spill_alternate = NULL;            // possible cisc replacement
-      _cisc_reg_mask_name = NULL;
-      _is_cisc_alternate = false;
-      _is_short_branch = false;
-      _short_branch_form = NULL;
-      _alignment = 1;
+      _matrule               = rule;
+      _insencode             = instr->_insencode;
+      _constant              = instr->_constant;
+      _is_postalloc_expand   = instr->_is_postalloc_expand;
+      _opcode                = instr->_opcode;
+      _size                  = instr->_size;
+      _attribs               = instr->_attribs;
+      _predicate             = instr->_predicate;
+      _exprule               = instr->_exprule;
+      _rewrule               = instr->_rewrule;
+      _format                = instr->_format;
+      _peephole              = instr->_peephole;
+      _ins_pipe              = instr->_ins_pipe;
+      _uniq_idx              = instr->_uniq_idx;
+      _num_uniq              = instr->_num_uniq;
+      _cisc_spill_operand    = Not_cisc_spillable; // Which operand may cisc-spill
+      _cisc_spill_alternate  = NULL;               // possible cisc replacement
+      _cisc_reg_mask_name    = NULL;
+      _is_cisc_alternate     = false;
+      _is_short_branch       = false;
+      _short_branch_form     = NULL;
+      _alignment             = 1;
      // Copy parameters
      const char *name;
      instr->_parameters.reset();
@@ -157,6 +161,11 @@
   return ( _exprule != NULL );
 }
 
+// This instruction has a late expand rule?
+bool InstructForm::postalloc_expands() const {
+  return _is_postalloc_expand;
+}
+
 // This instruction has a peephole rule?
 Peephole *InstructForm::peepholes() const {
   return _peephole;
@@ -639,6 +648,8 @@
   if( strcmp(_matrule->_opType,"MemBarReleaseLock") == 0 ) return true;
   if( strcmp(_matrule->_opType,"MemBarAcquireLock") == 0 ) return true;
   if( strcmp(_matrule->_opType,"MemBarStoreStore") == 0 ) return true;
+  if( strcmp(_matrule->_opType,"StoreFence") == 0 ) return true;
+  if( strcmp(_matrule->_opType,"LoadFence") == 0 ) return true;
 
   return false;
 }
@@ -1269,11 +1280,11 @@
     return;
   }
   if (strcmp(rep_var, "constantoffset") == 0) {
-    fprintf(fp, "st->print(\"#%%d\", constant_offset());\n");
+    fprintf(fp, "st->print(\"#%%d\", constant_offset_unchecked());\n");
     return;
   }
   if (strcmp(rep_var, "constantaddress") == 0) {
-    fprintf(fp, "st->print(\"constant table base + #%%d\", constant_offset());\n");
+    fprintf(fp, "st->print(\"constant table base + #%%d\", constant_offset_unchecked());\n");
     return;
   }
 
@@ -4045,13 +4056,15 @@
 bool MatchRule::is_ideal_membar() const {
   if( !_opType ) return false;
   return
-    !strcmp(_opType,"MemBarAcquire"  ) ||
-    !strcmp(_opType,"MemBarRelease"  ) ||
+    !strcmp(_opType,"MemBarAcquire") ||
+    !strcmp(_opType,"MemBarRelease") ||
     !strcmp(_opType,"MemBarAcquireLock") ||
     !strcmp(_opType,"MemBarReleaseLock") ||
-    !strcmp(_opType,"MemBarVolatile" ) ||
-    !strcmp(_opType,"MemBarCPUOrder" ) ||
-    !strcmp(_opType,"MemBarStoreStore" );
+    !strcmp(_opType,"LoadFence" ) ||
+    !strcmp(_opType,"StoreFence") ||
+    !strcmp(_opType,"MemBarVolatile") ||
+    !strcmp(_opType,"MemBarCPUOrder") ||
+    !strcmp(_opType,"MemBarStoreStore");
 }
 
 bool MatchRule::is_ideal_loadPC() const {
--- a/src/share/vm/adlc/formssel.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/formssel.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -83,35 +83,37 @@
   const char    *_cisc_reg_mask_name;
   InstructForm  *_short_branch_form;
   bool           _is_short_branch;
-  bool           _is_mach_constant;   // true if Node is a MachConstantNode
+  bool           _is_mach_constant;    // True if Node is a MachConstantNode.
+  bool           _needs_constant_base; // True if Node needs the mach_constant_base input.
   uint           _alignment;
 
 public:
   // Public Data
-  const char    *_ident;           // Name of this instruction
-  NameList       _parameters;      // Locally defined names
-  FormDict       _localNames;      // Table of operands & their types
-  MatchRule     *_matrule;         // Matching rule for this instruction
-  Opcode        *_opcode;          // Encoding of the opcode for instruction
-  char          *_size;            // Size of instruction
-  InsEncode     *_insencode;       // Encoding class instruction belongs to
-  InsEncode     *_constant;        // Encoding class constant value belongs to
-  Attribute     *_attribs;         // List of Attribute rules
-  Predicate     *_predicate;       // Predicate test for this instruction
-  FormDict       _effects;         // Dictionary of effect rules
-  ExpandRule    *_exprule;         // Expand rule for this instruction
-  RewriteRule   *_rewrule;         // Rewrite rule for this instruction
-  FormatRule    *_format;          // Format for assembly generation
-  Peephole      *_peephole;        // List of peephole rules for instruction
-  const char    *_ins_pipe;        // Instruction Scheduling description class
+  const char    *_ident;               // Name of this instruction
+  NameList       _parameters;          // Locally defined names
+  FormDict       _localNames;          // Table of operands & their types
+  MatchRule     *_matrule;             // Matching rule for this instruction
+  Opcode        *_opcode;              // Encoding of the opcode for instruction
+  char          *_size;                // Size of instruction
+  InsEncode     *_insencode;           // Encoding class instruction belongs to
+  InsEncode     *_constant;            // Encoding class constant value belongs to
+  bool           _is_postalloc_expand; // Indicates that encoding just does a lateExpand.
+  Attribute     *_attribs;             // List of Attribute rules
+  Predicate     *_predicate;           // Predicate test for this instruction
+  FormDict       _effects;             // Dictionary of effect rules
+  ExpandRule    *_exprule;             // Expand rule for this instruction
+  RewriteRule   *_rewrule;             // Rewrite rule for this instruction
+  FormatRule    *_format;              // Format for assembly generation
+  Peephole      *_peephole;            // List of peephole rules for instruction
+  const char    *_ins_pipe;            // Instruction Scheduling description class
 
-  uint          *_uniq_idx;        // Indexes of unique operands
-  uint           _uniq_idx_length; // Length of _uniq_idx array
-  uint           _num_uniq;        // Number  of unique operands
-  ComponentList  _components;      // List of Components matches MachNode's
-                                   // operand structure
+  uint          *_uniq_idx;            // Indexes of unique operands
+  uint           _uniq_idx_length;     // Length of _uniq_idx array
+  uint           _num_uniq;            // Number  of unique operands
+  ComponentList  _components;          // List of Components matches MachNode's
+                                       // operand structure
 
-  bool           _has_call;        // contain a call and caller save registers should be saved?
+  bool           _has_call;            // contain a call and caller save registers should be saved?
 
   // Public Methods
   InstructForm(const char *id, bool ideal_only = false);
@@ -133,6 +135,8 @@
   virtual uint        num_defs_or_kills();
   // This instruction has an expand rule?
   virtual bool        expands() const ;
+  // This instruction has a late expand rule?
+  virtual bool        postalloc_expands() const;
   // Return this instruction's first peephole rule, or NULL
   virtual Peephole   *peepholes() const;
   // Add a peephole rule to this instruction
@@ -259,6 +263,8 @@
 
   bool                    is_mach_constant() const { return _is_mach_constant;     }
   void                set_is_mach_constant(bool x) {        _is_mach_constant = x; }
+  bool                    needs_constant_base() const { return _needs_constant_base;     }
+  void                set_needs_constant_base(bool x) {        _needs_constant_base = x; }
 
   InstructForm       *short_branch_form() { return _short_branch_form; }
   bool                has_short_branch_form() { return _short_branch_form != NULL; }
--- a/src/share/vm/adlc/main.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/main.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -29,7 +29,6 @@
 static void  usage(ArchDesc& AD);          // Print usage message and exit
 static char *strip_ext(char *fname);       // Strip off name extension
 static char *base_plus_suffix(const char* base, const char *suffix);// New concatenated string
-static char *prefix_plus_base_plus_suffix(const char* prefix, const char* base, const char *suffix);// New concatenated string
 static int get_legal_text(FileBuff &fbuf, char **legal_text); // Get pointer to legal text
 
 ArchDesc* globalAD = NULL;      // global reference to Architecture Description object
@@ -243,6 +242,11 @@
   AD.addInclude(AD._CPP_file, "nativeInst_arm.hpp");
   AD.addInclude(AD._CPP_file, "vmreg_arm.inline.hpp");
 #endif
+#ifdef TARGET_ARCH_ppc
+  AD.addInclude(AD._CPP_file, "assembler_ppc.inline.hpp");
+  AD.addInclude(AD._CPP_file, "nativeInst_ppc.hpp");
+  AD.addInclude(AD._CPP_file, "vmreg_ppc.inline.hpp");
+#endif
   AD.addInclude(AD._HPP_file, "memory/allocation.hpp");
   AD.addInclude(AD._HPP_file, "opto/machnode.hpp");
   AD.addInclude(AD._HPP_file, "opto/node.hpp");
@@ -267,6 +271,7 @@
   AD.addInclude(AD._CPP_PIPELINE_file, "adfiles", get_basename(AD._HPP_file._name));
   AD.addInclude(AD._DFA_file, "precompiled.hpp");
   AD.addInclude(AD._DFA_file, "adfiles", get_basename(AD._HPP_file._name));
+  AD.addInclude(AD._DFA_file, "opto/cfgnode.hpp");  // Use PROB_MAX in predicate.
   AD.addInclude(AD._DFA_file, "opto/matcher.hpp");
   AD.addInclude(AD._DFA_file, "opto/opcodes.hpp");
   // Make sure each .cpp file starts with include lines:
@@ -300,6 +305,7 @@
   AD.buildInstructMatchCheck(AD._CPP_file._fp);  // .cpp
   // define methods for machine dependent frame management
   AD.buildFrameMethods(AD._CPP_file._fp);         // .cpp
+  AD.generate_needs_clone_jvms(AD._CPP_file._fp);
 
   // do this last:
   AD.addPreprocessorChecks(AD._CPP_file._fp);     // .cpp
--- a/src/share/vm/adlc/output_c.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/output_c.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,23 +35,6 @@
   return false;
 }
 
-static bool is_use(int usedef) {
-  switch(usedef) {
-  case Component::USE:
-  case Component::USE_DEF:
-  case Component::USE_KILL: return true; break;
-  }
-  return false;
-}
-
-static bool is_kill(int usedef) {
-  switch(usedef) {
-  case Component::KILL:
-  case Component::USE_KILL: return true; break;
-  }
-  return false;
-}
-
 // Define  an array containing the machine register names, strings.
 static void defineRegNames(FILE *fp, RegisterForm *registers) {
   if (registers) {
@@ -1570,6 +1553,13 @@
       new_id = expand_instr->name();
 
       InstructForm* expand_instruction = (InstructForm*)globalAD->globalNames()[new_id];
+
+      if (!expand_instruction) {
+        globalAD->syntax_err(node->_linenum, "In %s: instruction %s used in expand not declared\n",
+                             node->_ident, new_id);
+        continue;
+      }
+
       if (expand_instruction->has_temps()) {
         globalAD->syntax_err(node->_linenum, "In %s: expand rules using instructs with TEMPs aren't supported: %s",
                              node->_ident, new_id);
@@ -1628,6 +1618,13 @@
         // Use 'parameter' at current position in list of new instruction's formals
         // instead of 'opid' when looking up info internal to new_inst
         const char *parameter = formal_lst->iter();
+        if (!parameter) {
+          globalAD->syntax_err(node->_linenum, "Operand %s of expand instruction %s has"
+                               " no equivalent in new instruction %s.",
+                               opid, node->_ident, new_inst->_ident);
+          assert(0, "Wrong expand");
+        }
+
         // Check for an operand which is created in the expand rule
         if ((exp_pos = node->_exprule->_newopers.index(opid)) != -1) {
           new_pos = new_inst->operand_position(parameter,Component::USE);
@@ -1825,18 +1822,26 @@
 
   // If the node is a MachConstantNode, insert the MachConstantBaseNode edge.
   // NOTE: this edge must be the last input (see MachConstantNode::mach_constant_base_node_input).
-  if (node->is_mach_constant()) {
-    fprintf(fp,"  add_req(C->mach_constant_base_node());\n");
+  // There are nodes that don't use $constantablebase, but still require that it
+  // is an input to the node. Example: divF_reg_immN, Repl32B_imm on x86_64.
+  if (node->is_mach_constant() || node->needs_constant_base()) {
+    if (node->is_ideal_call() != Form::invalid_type &&
+        node->is_ideal_call() != Form::JAVA_LEAF) {
+      fprintf(fp, "  // MachConstantBaseNode added in matcher.\n");
+      _needs_clone_jvms = true;
+    } else {
+      fprintf(fp, "  add_req(C->mach_constant_base_node());\n");
+    }
   }
 
-  fprintf(fp,"\n");
-  if( node->expands() ) {
-    fprintf(fp,"  return result;\n");
+  fprintf(fp, "\n");
+  if (node->expands()) {
+    fprintf(fp, "  return result;\n");
   } else {
-    fprintf(fp,"  return this;\n");
+    fprintf(fp, "  return this;\n");
   }
-  fprintf(fp,"}\n");
-  fprintf(fp,"\n");
+  fprintf(fp, "}\n");
+  fprintf(fp, "\n");
 }
 
 
@@ -1938,9 +1943,9 @@
       else if ((strcmp(rep_var, "constanttablebase") == 0) ||
                (strcmp(rep_var, "constantoffset")    == 0) ||
                (strcmp(rep_var, "constantaddress")   == 0)) {
-        if (!_inst.is_mach_constant()) {
+        if (!(_inst.is_mach_constant() || _inst.needs_constant_base())) {
           _AD.syntax_err(_encoding._linenum,
-                         "Replacement variable %s not allowed in instruct %s (only in MachConstantNode).\n",
+                         "Replacement variable %s not allowed in instruct %s (only in MachConstantNode or MachCall).\n",
                          rep_var, _encoding._name);
         }
       }
@@ -2103,16 +2108,21 @@
         if (strcmp(rep_var,"$reg") == 0 || reg_conversion(rep_var) != NULL) {
           _reg_status  = LITERAL_ACCESSED;
         } else {
+          _AD.syntax_err(_encoding._linenum,
+                         "Invalid access to literal register parameter '%s' in %s.\n",
+                         rep_var, _encoding._name);
           assert( false, "invalid access to literal register parameter");
         }
       }
       // literal constant parameters must be accessed as a 'constant' field
-      if ( _constant_status != LITERAL_NOT_SEEN ) {
-        assert( _constant_status == LITERAL_SEEN, "Must have seen constant literal before now");
-        if( strcmp(rep_var,"$constant") == 0 ) {
-          _constant_status  = LITERAL_ACCESSED;
+      if (_constant_status != LITERAL_NOT_SEEN) {
+        assert(_constant_status == LITERAL_SEEN, "Must have seen constant literal before now");
+        if (strcmp(rep_var,"$constant") == 0) {
+          _constant_status = LITERAL_ACCESSED;
         } else {
-          assert( false, "invalid access to literal constant parameter");
+          _AD.syntax_err(_encoding._linenum,
+                         "Invalid access to literal constant parameter '%s' in %s.\n",
+                         rep_var, _encoding._name);
         }
       }
     } // end replacement and/or subfield
@@ -2294,6 +2304,7 @@
 #if defined(IA32) || defined(AMD64)
     if (strcmp(rep_var,"$XMMRegister") == 0)   return "as_XMMRegister";
 #endif
+    if (strcmp(rep_var,"$CondRegister") == 0)  return "as_ConditionRegister";
     return NULL;
   }
 
@@ -2488,7 +2499,113 @@
   fprintf(fp, "  return (VerifyOops ? MachNode::size(ra_) : %s);\n", inst._size);
 
   // (3) and (4)
-  fprintf(fp,"}\n");
+  fprintf(fp,"}\n\n");
+}
+
+// Emit postalloc expand function.
+void ArchDesc::define_postalloc_expand(FILE *fp, InstructForm &inst) {
+  InsEncode *ins_encode = inst._insencode;
+
+  // Output instruction's postalloc_expand prototype.
+  fprintf(fp, "void  %sNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {\n",
+          inst._ident);
+
+  assert((_encode != NULL) && (ins_encode != NULL), "You must define an encode section.");
+
+  // Output each operand's offset into the array of registers.
+  inst.index_temps(fp, _globalNames);
+
+  // Output variables "unsigned idx_<par_name>", Node *n_<par_name> and "MachOpnd *op_<par_name>"
+  // for each parameter <par_name> specified in the encoding.
+  ins_encode->reset();
+  const char *ec_name = ins_encode->encode_class_iter();
+  assert(ec_name != NULL, "Postalloc expand must specify an encoding.");
+
+  EncClass *encoding = _encode->encClass(ec_name);
+  if (encoding == NULL) {
+    fprintf(stderr, "User did not define contents of this encode_class: %s\n", ec_name);
+    abort();
+  }
+  if (ins_encode->current_encoding_num_args() != encoding->num_args()) {
+    globalAD->syntax_err(ins_encode->_linenum, "In %s: passing %d arguments to %s but expecting %d",
+                         inst._ident, ins_encode->current_encoding_num_args(),
+                         ec_name, encoding->num_args());
+  }
+
+  fprintf(fp, "  // Access to ins and operands for postalloc expand.\n");
+  const int buflen = 2000;
+  char idxbuf[buflen]; char *ib = idxbuf; idxbuf[0] = '\0';
+  char nbuf  [buflen]; char *nb = nbuf;   nbuf[0]   = '\0';
+  char opbuf [buflen]; char *ob = opbuf;  opbuf[0]  = '\0';
+
+  encoding->_parameter_type.reset();
+  encoding->_parameter_name.reset();
+  const char *type = encoding->_parameter_type.iter();
+  const char *name = encoding->_parameter_name.iter();
+  int param_no = 0;
+  for (; (type != NULL) && (name != NULL);
+       (type = encoding->_parameter_type.iter()), (name = encoding->_parameter_name.iter())) {
+    const char* arg_name = ins_encode->rep_var_name(inst, param_no);
+    int idx = inst.operand_position_format(arg_name);
+    if (strcmp(arg_name, "constanttablebase") == 0) {
+      ib += sprintf(ib, "  unsigned idx_%-5s = mach_constant_base_node_input(); \t// %s, \t%s\n",
+                    name, type, arg_name);
+      nb += sprintf(nb, "  Node    *n_%-7s = lookup(idx_%s);\n", name, name);
+      // There is no operand for the constanttablebase.
+    } else if (inst.is_noninput_operand(idx)) {
+      globalAD->syntax_err(inst._linenum,
+                           "In %s: you can not pass the non-input %s to a postalloc expand encoding.\n",
+                           inst._ident, arg_name);
+    } else {
+      ib += sprintf(ib, "  unsigned idx_%-5s = idx%d; \t// %s, \t%s\n",
+                    name, idx, type, arg_name);
+      nb += sprintf(nb, "  Node    *n_%-7s = lookup(idx_%s);\n", name, name);
+      ob += sprintf(ob, "  %sOper *op_%s = (%sOper *)opnd_array(%d);\n", type, name, type, idx);
+    }
+    param_no++;
+  }
+  assert(ib < &idxbuf[buflen-1] && nb < &nbuf[buflen-1] && ob < &opbuf[buflen-1], "buffer overflow");
+
+  fprintf(fp, "%s", idxbuf);
+  fprintf(fp, "  Node    *n_region  = lookup(0);\n");
+  fprintf(fp, "%s%s", nbuf, opbuf);
+  fprintf(fp, "  Compile *C = ra_->C;\n");
+
+  // Output this instruction's encodings.
+  fprintf(fp, "  {");
+  const char *ec_code    = NULL;
+  const char *ec_rep_var = NULL;
+  assert(encoding == _encode->encClass(ec_name), "");
+
+  DefineEmitState pending(fp, *this, *encoding, *ins_encode, inst);
+  encoding->_code.reset();
+  encoding->_rep_vars.reset();
+  // Process list of user-defined strings,
+  // and occurrences of replacement variables.
+  // Replacement Vars are pushed into a list and then output.
+  while ((ec_code = encoding->_code.iter()) != NULL) {
+    if (! encoding->_code.is_signal(ec_code)) {
+      // Emit pending code.
+      pending.emit();
+      pending.clear();
+      // Emit this code section.
+      fprintf(fp, "%s", ec_code);
+    } else {
+      // A replacement variable or one of its subfields.
+      // Obtain replacement variable from list.
+      ec_rep_var = encoding->_rep_vars.iter();
+      pending.add_rep_var(ec_rep_var);
+    }
+  }
+  // Emit pending code.
+  pending.emit();
+  pending.clear();
+  fprintf(fp, "  }\n");
+
+  fprintf(fp, "}\n\n");
+
+  ec_name = ins_encode->encode_class_iter();
+  assert(ec_name == NULL, "Postalloc expand may only have one encoding.");
 }
 
 // defineEmit -----------------------------------------------------------------
@@ -2841,7 +2958,7 @@
   } else if ( (strcmp(name,"disp") == 0) ) {
     fprintf(fp,"(PhaseRegAlloc *ra_, const Node *node, int idx) const { \n");
   } else {
-    fprintf(fp,"() const { \n");
+    fprintf(fp, "() const {\n");
   }
 
   // Check for hexadecimal value OR replacement variable
@@ -2891,6 +3008,8 @@
     // Hex value
     fprintf(fp,"    return %s;\n", encoding);
   } else {
+    globalAD->syntax_err(oper._linenum, "In operand %s: Do not support this encode constant: '%s' for %s.",
+                         oper._ident, encoding, name);
     assert( false, "Do not support octal or decimal encode constants");
   }
   fprintf(fp,"  }\n");
@@ -3055,6 +3174,7 @@
     if( instr->expands() || instr->needs_projections() ||
         instr->has_temps() ||
         instr->is_mach_constant() ||
+        instr->needs_constant_base() ||
         instr->_matrule != NULL &&
         instr->num_opnds() != instr->num_unique_opnds() )
       defineExpand(_CPP_EXPAND_file._fp, instr);
@@ -3142,7 +3262,15 @@
     // Ensure this is a machine-world instruction
     if ( instr->ideal_only() ) continue;
 
-    if (instr->_insencode)         defineEmit        (fp, *instr);
+    if (instr->_insencode) {
+      if (instr->postalloc_expands()) {
+        // Don't write this to _CPP_EXPAND_file, as the code generated calls C-code
+        // from code sections in ad file that is dumped to fp.
+        define_postalloc_expand(fp, *instr);
+      } else {
+        defineEmit(fp, *instr);
+      }
+    }
     if (instr->is_mach_constant()) defineEvalConstant(fp, *instr);
     if (instr->_size)              defineSize        (fp, *instr);
 
@@ -3503,6 +3631,11 @@
   return callconv;
 }
 
+void ArchDesc::generate_needs_clone_jvms(FILE *fp_cpp) {
+  fprintf(fp_cpp, "bool Compile::needs_clone_jvms() { return %s; }\n\n",
+          _needs_clone_jvms ? "true" : "false");
+}
+
 //---------------------------generate_assertion_checks-------------------
 void ArchDesc::generate_adlc_verification(FILE *fp_cpp) {
   fprintf(fp_cpp, "\n");
@@ -3819,8 +3952,10 @@
   }
 
   // Fill in the bottom_type where requested
-  if ( inst->captures_bottom_type(_globalNames) ) {
-    fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent);
+  if (inst->captures_bottom_type(_globalNames)) {
+    if (strncmp("MachCall", inst->mach_base_class(_globalNames), strlen("MachCall"))) {
+      fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent);
+    }
   }
   if( inst->is_ideal_if() ) {
     fprintf(fp_cpp, "%s node->_prob = _leaf->as_If()->_prob;\n", indent);
--- a/src/share/vm/adlc/output_h.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/adlc/output_h.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,11 @@
 #include "adlc.hpp"
 
 // The comment delimiter used in format statements after assembler instructions.
+#if defined(PPC64)
+#define commentSeperator "\t//"
+#else
 #define commentSeperator "!"
+#endif
 
 // Generate the #define that describes the number of registers.
 static void defineRegCount(FILE *fp, RegisterForm *registers) {
@@ -1551,7 +1555,20 @@
     if ( instr->is_ideal_jump() ) {
       fprintf(fp, "  GrowableArray<Label*> _index2label;\n");
     }
-    fprintf(fp,"public:\n");
+
+    fprintf(fp, "public:\n");
+
+    Attribute *att = instr->_attribs;
+    // Fields of the node specified in the ad file.
+    while (att != NULL) {
+      if (strncmp(att->_ident, "ins_field_", 10) == 0) {
+        const char *field_name = att->_ident+10;
+        const char *field_type = att->_val;
+        fprintf(fp, "  %s _%s;\n", field_type, field_name);
+      }
+      att = (Attribute *)att->_next;
+    }
+
     fprintf(fp,"  MachOper *opnd_array(uint operand_index) const {\n");
     fprintf(fp,"    assert(operand_index < _num_opnds, \"invalid _opnd_array index\");\n");
     fprintf(fp,"    return _opnd_array[operand_index];\n");
@@ -1598,14 +1615,19 @@
     Attribute *attr = instr->_attribs;
     bool avoid_back_to_back = false;
     while (attr != NULL) {
-      if (strcmp(attr->_ident,"ins_cost") &&
-          strcmp(attr->_ident,"ins_short_branch")) {
-        fprintf(fp,"          int            %s() const { return %s; }\n",
-                attr->_ident, attr->_val);
+      if (strcmp (attr->_ident, "ins_cost") != 0 &&
+          strncmp(attr->_ident, "ins_field_", 10) != 0 &&
+          // Must match function in node.hpp: return type bool, no prefix "ins_".
+          strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") != 0 &&
+          strcmp (attr->_ident, "ins_short_branch") != 0) {
+        fprintf(fp, "  virtual int            %s() const { return %s; }\n", attr->_ident, attr->_val);
       }
       // Check value for ins_avoid_back_to_back, and if it is true (1), set the flag
-      if (!strcmp(attr->_ident,"ins_avoid_back_to_back") && attr->int_val(*this) != 0)
+      if (!strcmp(attr->_ident, "ins_avoid_back_to_back") != 0 && attr->int_val(*this) != 0)
         avoid_back_to_back = true;
+      if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0)
+        fprintf(fp, "  virtual bool           is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
+
       attr = (Attribute *)attr->_next;
     }
 
@@ -1619,7 +1641,12 @@
     // Output the opcode function and the encode function here using the
     // encoding class information in the _insencode slot.
     if ( instr->_insencode ) {
-      fprintf(fp,"  virtual void           emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;\n");
+      if (instr->postalloc_expands()) {
+        fprintf(fp,"  virtual bool           requires_postalloc_expand() const { return true; }\n");
+        fprintf(fp,"  virtual void           postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_);\n");
+      } else {
+        fprintf(fp,"  virtual void           emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;\n");
+      }
     }
 
     // virtual function for getting the size of an instruction
@@ -1636,6 +1663,19 @@
             instr->ideal_Opcode(_globalNames) );
     }
 
+    if (instr->needs_constant_base() &&
+        !instr->is_mach_constant()) {  // These inherit the funcion from MachConstantNode.
+      fprintf(fp,"  virtual uint           mach_constant_base_node_input() const { ");
+      if (instr->is_ideal_call() != Form::invalid_type &&
+          instr->is_ideal_call() != Form::JAVA_LEAF) {
+        // MachConstantBase goes behind arguments, but before jvms.
+        fprintf(fp,"assert(tf() && tf()->domain(), \"\"); return tf()->domain()->cnt();");
+      } else {
+        fprintf(fp,"return req()-1;");
+      }
+      fprintf(fp," }\n");
+    }
+
     // Allow machine-independent optimization, invert the sense of the IF test
     if( instr->is_ideal_if() ) {
       fprintf(fp,"  virtual void           negate() { \n");
@@ -1804,6 +1844,7 @@
     if( instr->expands() || instr->needs_projections() ||
         instr->has_temps() ||
         instr->is_mach_constant() ||
+        instr->needs_constant_base() ||
         instr->_matrule != NULL &&
         instr->num_opnds() != instr->num_unique_opnds() ) {
       fprintf(fp,"  virtual MachNode      *Expand(State *state, Node_List &proj_list, Node* mem);\n");
--- a/src/share/vm/asm/assembler.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/asm/assembler.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/asm/assembler.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/asm/assembler.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -204,10 +204,11 @@
   CodeSection* _code_section;          // section within the code buffer
   OopRecorder* _oop_recorder;          // support for relocInfo::oop_type
 
+ public:
   // Code emission & accessing
   address addr_at(int pos) const { return code_section()->start() + pos; }
 
-
+ protected:
   // This routine is called with a label is used for an address.
   // Labels and displacements truck in offsets, but target must return a PC.
   address target(Label& L)             { return code_section()->target(L, pc()); }
--- a/src/share/vm/asm/codeBuffer.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/asm/codeBuffer.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/asm/macroAssembler.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/asm/macroAssembler.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/asm/macroAssembler.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/asm/macroAssembler.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Canonicalizer.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Canonicalizer.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_Canonicalizer.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_CodeStubs.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_CodeStubs.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Compilation.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_Compilation.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Compilation.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_Compilation.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -259,6 +259,9 @@
   }
 
   ciKlass* cha_exact_type(ciType* type);
+
+  // Dump inlining replay data to the stream.
+  void dump_inline_data(outputStream* out) { /* do nothing now */ }
 };
 
 
--- a/src/share/vm/c1/c1_Compiler.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_Compiler.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Compiler.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_Compiler.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,6 @@
   // Name of this compiler
   virtual const char* name()                     { return "C1"; }
 
-  virtual bool is_c1()                           { return true; };
-
   // Missing feature tests
   virtual bool supports_native()                 { return true; }
   virtual bool supports_osr   ()                 { return true; }
--- a/src/share/vm/c1/c1_FrameMap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_FrameMap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -133,7 +133,7 @@
     }
   }
 
-  intptr_t out_preserve = SharedRuntime::c_calling_convention(sig_bt, regs, sizeargs);
+  intptr_t out_preserve = SharedRuntime::c_calling_convention(sig_bt, regs, NULL, sizeargs);
   LIR_OprList* args = new LIR_OprList(signature->length());
   for (i = 0; i < sizeargs;) {
     BasicType t = sig_bt[i];
--- a/src/share/vm/c1/c1_FrameMap.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_FrameMap.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -2276,7 +2276,7 @@
   if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != NULL)) {
     assert(instruction->exception_state() == NULL
            || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState
-           || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->jvmti_can_access_local_variables()),
+           || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()),
            "exception_state should be of exception kind");
     return new XHandlers();
   }
@@ -2367,7 +2367,7 @@
       // This scope and all callees do not handle exceptions, so the local
       // variables of this scope are not needed. However, the scope itself is
       // required for a correct exception stack trace -> clear out the locals.
-      if (_compilation->env()->jvmti_can_access_local_variables()) {
+      if (_compilation->env()->should_retain_local_variables()) {
         cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci());
       } else {
         cur_state = cur_state->copy(ValueStack::EmptyExceptionState, cur_state->bci());
@@ -3251,7 +3251,7 @@
 ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) {
   ValueStack* s = copy_state_exhandling_with_bci(bci);
   if (s == NULL) {
-    if (_compilation->env()->jvmti_can_access_local_variables()) {
+    if (_compilation->env()->should_retain_local_variables()) {
       s = state()->copy(ValueStack::ExceptionState, bci);
     } else {
       s = state()->copy(ValueStack::EmptyExceptionState, bci);
--- a/src/share/vm/c1/c1_GraphBuilder.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_GraphBuilder.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_IR.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_IR.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_IR.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_IR.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Instruction.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_Instruction.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,7 +76,7 @@
 
 void Instruction::update_exception_state(ValueStack* state) {
   if (state != NULL && (state->kind() == ValueStack::EmptyExceptionState || state->kind() == ValueStack::ExceptionState)) {
-    assert(state->kind() == ValueStack::EmptyExceptionState || Compilation::current()->env()->jvmti_can_access_local_variables(), "unexpected state kind");
+    assert(state->kind() == ValueStack::EmptyExceptionState || Compilation::current()->env()->should_retain_local_variables(), "unexpected state kind");
     _exception_state = state;
   } else {
     _exception_state = NULL;
--- a/src/share/vm/c1/c1_InstructionPrinter.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_InstructionPrinter.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -858,9 +858,7 @@
 
 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
 #ifndef PRODUCT
-  if (VerifyOopMaps || VerifyOops) {
-    bool v = VerifyOops;
-    VerifyOops = true;
+  if (VerifyOops) {
     OopMapStream s(info->oop_map());
     while (!s.is_done()) {
       OopMapValue v = s.current();
@@ -883,7 +881,6 @@
 
       s.next();
     }
-    VerifyOops = v;
   }
 #endif
 }
--- a/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -3288,7 +3288,10 @@
   ciSignature* signature_at_call = NULL;
   x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
 
-  ciKlass* exact = profile_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
+  // The offset within the MDO of the entry to update may be too large
+  // to be used in load/store instructions on some platforms. So have
+  // profile_type() compute the address of the profile in a register.
+  ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
                                 ret->type(), x->ret(), mdp,
                                 !x->needs_null_check(),
                                 signature_at_call->return_type()->as_klass(),
--- a/src/share/vm/c1/c1_LinearScan.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Optimizer.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_Optimizer.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_RangeCheckElimination.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_RangeCheckElimination.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_RangeCheckElimination.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_RangeCheckElimination.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_Runtime1.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_Runtime1.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_ValueMap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_ValueMap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_ValueMap.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_ValueMap.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_ValueStack.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_ValueStack.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -52,7 +52,7 @@
   , _stack()
   , _locks(copy_from->locks_size())
 {
-  assert(kind != EmptyExceptionState || !Compilation::current()->env()->jvmti_can_access_local_variables(), "need locals");
+  assert(kind != EmptyExceptionState || !Compilation::current()->env()->should_retain_local_variables(), "need locals");
   if (kind != EmptyExceptionState) {
     // only allocate space if we need to copy the locals-array
     _locals = Values(copy_from->locals_size());
--- a/src/share/vm/c1/c1_ValueStack.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_ValueStack.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -75,7 +75,7 @@
 
   void set_caller_state(ValueStack* s)           {
     assert(kind() == EmptyExceptionState ||
-           (Compilation::current()->env()->jvmti_can_access_local_variables() && kind() == ExceptionState),
+           (Compilation::current()->env()->should_retain_local_variables() && kind() == ExceptionState),
            "only EmptyExceptionStates can be modified");
     _caller_state = s;
   }
--- a/src/share/vm/c1/c1_globals.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_globals.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/c1/c1_globals.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/c1/c1_globals.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "c1_globals_windows.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "c1_globals_aix.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "c1_globals_bsd.hpp"
 #endif
@@ -269,9 +272,6 @@
   develop(bool, PrintNotLoaded, false,                                      \
           "Prints where classes are not loaded during code generation")     \
                                                                             \
-  notproduct(bool, VerifyOopMaps, false,                                    \
-          "Adds oopmap verification code to the generated code")            \
-                                                                            \
   develop(bool, PrintLIR, false,                                            \
           "print low-level IR")                                             \
                                                                             \
--- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/bcEscapeAnalyzer.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/bcEscapeAnalyzer.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciArray.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciArray.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciArray.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciArray.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciClassList.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciClassList.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -103,6 +103,7 @@
 friend class ciMethodType;             \
 friend class ciReceiverTypeData;       \
 friend class ciTypeEntries;            \
+friend class ciSpeculativeTrapData;    \
 friend class ciSymbol;                 \
 friend class ciArray;                  \
 friend class ciObjArray;               \
--- a/src/share/vm/ci/ciConstant.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciConstant.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciEnv.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciEnv.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -136,6 +136,11 @@
   _ClassCastException_instance = NULL;
   _the_null_string = NULL;
   _the_min_jint_string = NULL;
+
+  _jvmti_can_hotswap_or_post_breakpoint = false;
+  _jvmti_can_access_local_variables = false;
+  _jvmti_can_post_on_exceptions = false;
+  _jvmti_can_pop_frame = false;
 }
 
 ciEnv::ciEnv(Arena* arena) {
@@ -186,6 +191,11 @@
   _ClassCastException_instance = NULL;
   _the_null_string = NULL;
   _the_min_jint_string = NULL;
+
+  _jvmti_can_hotswap_or_post_breakpoint = false;
+  _jvmti_can_access_local_variables = false;
+  _jvmti_can_post_on_exceptions = false;
+  _jvmti_can_pop_frame = false;
 }
 
 ciEnv::~ciEnv() {
@@ -205,6 +215,31 @@
   _jvmti_can_hotswap_or_post_breakpoint = JvmtiExport::can_hotswap_or_post_breakpoint();
   _jvmti_can_access_local_variables     = JvmtiExport::can_access_local_variables();
   _jvmti_can_post_on_exceptions         = JvmtiExport::can_post_on_exceptions();
+  _jvmti_can_pop_frame                  = JvmtiExport::can_pop_frame();
+}
+
+bool ciEnv::should_retain_local_variables() const {
+  return _jvmti_can_access_local_variables || _jvmti_can_pop_frame;
+}
+
+bool ciEnv::jvmti_state_changed() const {
+  if (!_jvmti_can_access_local_variables &&
+      JvmtiExport::can_access_local_variables()) {
+    return true;
+  }
+  if (!_jvmti_can_hotswap_or_post_breakpoint &&
+      JvmtiExport::can_hotswap_or_post_breakpoint()) {
+    return true;
+  }
+  if (!_jvmti_can_post_on_exceptions &&
+      JvmtiExport::can_post_on_exceptions()) {
+    return true;
+  }
+  if (!_jvmti_can_pop_frame &&
+      JvmtiExport::can_pop_frame()) {
+    return true;
+  }
+  return false;
 }
 
 // ------------------------------------------------------------------
@@ -940,13 +975,7 @@
     No_Safepoint_Verifier nsv;
 
     // Change in Jvmti state may invalidate compilation.
-    if (!failing() &&
-        ( (!jvmti_can_hotswap_or_post_breakpoint() &&
-           JvmtiExport::can_hotswap_or_post_breakpoint()) ||
-          (!jvmti_can_access_local_variables() &&
-           JvmtiExport::can_access_local_variables()) ||
-          (!jvmti_can_post_on_exceptions() &&
-           JvmtiExport::can_post_on_exceptions()) )) {
+    if (!failing() && jvmti_state_changed()) {
       record_failure("Jvmti state change invalidated dependencies");
     }
 
@@ -1147,6 +1176,33 @@
 
 // Don't change thread state and acquire any locks.
 // Safe to call from VM error reporter.
+
+void ciEnv::dump_compile_data(outputStream* out) {
+  CompileTask* task = this->task();
+  Method* method = task->method();
+  int entry_bci = task->osr_bci();
+  int comp_level = task->comp_level();
+  out->print("compile %s %s %s %d %d",
+                method->klass_name()->as_quoted_ascii(),
+                method->name()->as_quoted_ascii(),
+                method->signature()->as_quoted_ascii(),
+                entry_bci, comp_level);
+  if (compiler_data() != NULL) {
+    if (is_c2_compile(comp_level)) { // C2 or Shark
+#ifdef COMPILER2
+      // Dump C2 inlining data.
+      ((Compile*)compiler_data())->dump_inline_data(out);
+#endif
+    } else if (is_c1_compile(comp_level)) { // C1
+#ifdef COMPILER1
+      // Dump C1 inlining data.
+      ((Compilation*)compiler_data())->dump_inline_data(out);
+#endif
+    }
+  }
+  out->cr();
+}
+
 void ciEnv::dump_replay_data_unsafe(outputStream* out) {
   ResourceMark rm;
 #if INCLUDE_JVMTI
@@ -1160,16 +1216,7 @@
   for (int i = 0; i < objects->length(); i++) {
     objects->at(i)->dump_replay_data(out);
   }
-  CompileTask* task = this->task();
-  Method* method = task->method();
-  int entry_bci = task->osr_bci();
-  int comp_level = task->comp_level();
-  // Klass holder = method->method_holder();
-  out->print_cr("compile %s %s %s %d %d",
-                method->klass_name()->as_quoted_ascii(),
-                method->name()->as_quoted_ascii(),
-                method->signature()->as_quoted_ascii(),
-                entry_bci, comp_level);
+  dump_compile_data(out);
   out->flush();
 }
 
@@ -1179,3 +1226,45 @@
     dump_replay_data_unsafe(out);
   )
 }
+
+void ciEnv::dump_replay_data(int compile_id) {
+  static char buffer[O_BUFLEN];
+  int ret = jio_snprintf(buffer, O_BUFLEN, "replay_pid%p_compid%d.log", os::current_process_id(), compile_id);
+  if (ret > 0) {
+    int fd = open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0666);
+    if (fd != -1) {
+      FILE* replay_data_file = os::open(fd, "w");
+      if (replay_data_file != NULL) {
+        fileStream replay_data_stream(replay_data_file, /*need_close=*/true);
+        dump_replay_data(&replay_data_stream);
+        tty->print("# Compiler replay data is saved as: ");
+        tty->print_cr(buffer);
+      } else {
+        tty->print_cr("# Can't open file to dump replay data.");
+      }
+    }
+  }
+}
+
+void ciEnv::dump_inline_data(int compile_id) {
+  static char buffer[O_BUFLEN];
+  int ret = jio_snprintf(buffer, O_BUFLEN, "inline_pid%p_compid%d.log", os::current_process_id(), compile_id);
+  if (ret > 0) {
+    int fd = open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0666);
+    if (fd != -1) {
+      FILE* inline_data_file = os::open(fd, "w");
+      if (inline_data_file != NULL) {
+        fileStream replay_data_stream(inline_data_file, /*need_close=*/true);
+        GUARDED_VM_ENTRY(
+          MutexLocker ml(Compile_lock);
+          dump_compile_data(&replay_data_stream);
+        )
+        replay_data_stream.flush();
+        tty->print("# Compiler inline data is saved as: ");
+        tty->print_cr(buffer);
+      } else {
+        tty->print_cr("# Can't open file to dump inline data.");
+      }
+    }
+  }
+}
--- a/src/share/vm/ci/ciEnv.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciEnv.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,6 +69,7 @@
   bool  _jvmti_can_hotswap_or_post_breakpoint;
   bool  _jvmti_can_access_local_variables;
   bool  _jvmti_can_post_on_exceptions;
+  bool  _jvmti_can_pop_frame;
 
   // Cache DTrace flags
   bool  _dtrace_extended_probes;
@@ -332,8 +333,9 @@
 
   // Cache Jvmti state
   void  cache_jvmti_state();
+  bool  jvmti_state_changed() const;
+  bool  should_retain_local_variables() const;
   bool  jvmti_can_hotswap_or_post_breakpoint() const { return _jvmti_can_hotswap_or_post_breakpoint; }
-  bool  jvmti_can_access_local_variables()     const { return _jvmti_can_access_local_variables; }
   bool  jvmti_can_post_on_exceptions()         const { return _jvmti_can_post_on_exceptions; }
 
   // Cache DTrace flags
@@ -451,8 +453,11 @@
   void metadata_do(void f(Metadata*)) { _factory->metadata_do(f); }
 
   // Dump the compilation replay data for the ciEnv to the stream.
+  void dump_replay_data(int compile_id);
+  void dump_inline_data(int compile_id);
   void dump_replay_data(outputStream* out);
   void dump_replay_data_unsafe(outputStream* out);
+  void dump_compile_data(outputStream* out);
 };
 
 #endif // SHARE_VM_CI_CIENV_HPP
--- a/src/share/vm/ci/ciField.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciField.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -201,16 +201,10 @@
       return;
     }
 
-    // This field just may be constant.  The only cases where it will
-    // not be constant are:
-    //
-    // 1. The field holds a non-perm-space oop.  The field is, strictly
-    //    speaking, constant but we cannot embed non-perm-space oops into
-    //    generated code.  For the time being we need to consider the
-    //    field to be not constant.
-    // 2. The field is a *special* static&final field whose value
-    //    may change.  The three examples are java.lang.System.in,
-    //    java.lang.System.out, and java.lang.System.err.
+    // This field just may be constant.  The only case where it will
+    // not be constant is when the field is a *special* static&final field
+    // whose value may change.  The three examples are java.lang.System.in,
+    // java.lang.System.out, and java.lang.System.err.
 
     KlassHandle k = _holder->get_Klass();
     assert( SystemDictionary::System_klass() != NULL, "Check once per vm");
--- a/src/share/vm/ci/ciField.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciField.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -130,9 +130,7 @@
   //   1. The field is both static and final
   //   2. The canonical holder of the field has undergone
   //      static initialization.
-  //   3. If the field is an object or array, then the oop
-  //      in question is allocated in perm space.
-  //   4. The field is not one of the special static/final
+  //   3. The field is not one of the special static/final
   //      non-constant fields.  These are java.lang.System.in
   //      and java.lang.System.out.  Abomination.
   //
--- a/src/share/vm/ci/ciFlags.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciFlags.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciInstance.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciInstance.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciInstanceKlass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciInstanceKlass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciKlass.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciKlass.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciKlass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciKlass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciMethod.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciMethod.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -412,7 +412,7 @@
 // information.
 MethodLivenessResult ciMethod::liveness_at_bci(int bci) {
   MethodLivenessResult result = raw_liveness_at_bci(bci);
-  if (CURRENT_ENV->jvmti_can_access_local_variables() || DeoptimizeALot || CompileTheWorld) {
+  if (CURRENT_ENV->should_retain_local_variables() || DeoptimizeALot || CompileTheWorld) {
     // Keep all locals live for the user's edification and amusement.
     result.at_put_range(0, result.size(), true);
   }
@@ -1357,15 +1357,21 @@
 
 #undef FETCH_FLAG_FROM_VM
 
+void ciMethod::dump_name_as_ascii(outputStream* st) {
+  Method* method = get_Method();
+  st->print("%s %s %s",
+            method->klass_name()->as_quoted_ascii(),
+            method->name()->as_quoted_ascii(),
+            method->signature()->as_quoted_ascii());
+}
+
 void ciMethod::dump_replay_data(outputStream* st) {
   ResourceMark rm;
   Method* method = get_Method();
   MethodCounters* mcs = method->method_counters();
-  Klass*  holder = method->method_holder();
-  st->print_cr("ciMethod %s %s %s %d %d %d %d %d",
-               holder->name()->as_quoted_ascii(),
-               method->name()->as_quoted_ascii(),
-               method->signature()->as_quoted_ascii(),
+  st->print("ciMethod ");
+  dump_name_as_ascii(st);
+  st->print_cr(" %d %d %d %d %d",
                mcs == NULL ? 0 : mcs->invocation_counter()->raw_counter(),
                mcs == NULL ? 0 : mcs->backedge_counter()->raw_counter(),
                interpreter_invocation_count(),
--- a/src/share/vm/ci/ciMethod.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciMethod.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -310,10 +310,13 @@
   bool is_accessor    () const;
   bool is_initializer () const;
   bool can_be_statically_bound() const           { return _can_be_statically_bound; }
-  void dump_replay_data(outputStream* st);
   bool is_boxing_method() const;
   bool is_unboxing_method() const;
 
+  // Replay data methods
+  void dump_name_as_ascii(outputStream* st);
+  void dump_replay_data(outputStream* st);
+
   // Print the bytecodes of this method.
   void print_codes_on(outputStream* st);
   void print_codes() {
--- a/src/share/vm/ci/ciMethodData.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciMethodData.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,6 +78,35 @@
   _parameters = NULL;
 }
 
+void ciMethodData::load_extra_data() {
+  MethodData* mdo = get_MethodData();
+
+  // speculative trap entries also hold a pointer to a Method so need to be translated
+  DataLayout* dp_src  = mdo->extra_data_base();
+  DataLayout* end_src = mdo->extra_data_limit();
+  DataLayout* dp_dst  = extra_data_base();
+  for (;; dp_src = MethodData::next_extra(dp_src), dp_dst = MethodData::next_extra(dp_dst)) {
+    assert(dp_src < end_src, "moved past end of extra data");
+    assert(dp_src->tag() == dp_dst->tag(), err_msg("should be same tags %d != %d", dp_src->tag(), dp_dst->tag()));
+    switch(dp_src->tag()) {
+    case DataLayout::speculative_trap_data_tag: {
+      ciSpeculativeTrapData* data_dst = new ciSpeculativeTrapData(dp_dst);
+      SpeculativeTrapData* data_src = new SpeculativeTrapData(dp_src);
+      data_dst->translate_from(data_src);
+      break;
+    }
+    case DataLayout::bit_data_tag:
+      break;
+    case DataLayout::no_tag:
+    case DataLayout::arg_info_data_tag:
+      // An empty slot or ArgInfoData entry marks the end of the trap data
+      return;
+    default:
+      fatal(err_msg("bad tag = %d", dp_src->tag()));
+    }
+  }
+}
+
 void ciMethodData::load_data() {
   MethodData* mdo = get_MethodData();
   if (mdo == NULL) {
@@ -116,6 +145,8 @@
     parameters->translate_from(mdo->parameters_type_data());
   }
 
+  load_extra_data();
+
   // Note:  Extra data are all BitData, and do not need translation.
   _current_mileage = MethodData::mileage_of(mdo->method());
   _invocation_counter = mdo->invocation_count();
@@ -156,6 +187,12 @@
   set_type(translate_klass(k));
 }
 
+void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
+  Method* m = data->as_SpeculativeTrapData()->method();
+  ciMethod* ci_m = CURRENT_ENV->get_method(m);
+  set_method(ci_m);
+}
+
 // Get the data at an arbitrary (sort of) data index.
 ciProfileData* ciMethodData::data_at(int data_index) {
   if (out_of_bounds(data_index)) {
@@ -203,32 +240,64 @@
   return next;
 }
 
-// Translate a bci to its corresponding data, or NULL.
-ciProfileData* ciMethodData::bci_to_data(int bci) {
-  ciProfileData* data = data_before(bci);
-  for ( ; is_valid(data); data = next_data(data)) {
-    if (data->bci() == bci) {
-      set_hint_di(dp_to_di(data->dp()));
-      return data;
-    } else if (data->bci() > bci) {
-      break;
-    }
-  }
+ciProfileData* ciMethodData::bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots) {
   // bci_to_extra_data(bci) ...
   DataLayout* dp  = data_layout_at(data_size());
   DataLayout* end = data_layout_at(data_size() + extra_data_size());
-  for (; dp < end; dp = MethodData::next_extra(dp)) {
-    if (dp->tag() == DataLayout::no_tag) {
+  two_free_slots = false;
+  for (;dp < end; dp = MethodData::next_extra(dp)) {
+    switch(dp->tag()) {
+    case DataLayout::no_tag:
       _saw_free_extra_data = true;  // observed an empty slot (common case)
+      two_free_slots = (MethodData::next_extra(dp)->tag() == DataLayout::no_tag);
       return NULL;
+    case DataLayout::arg_info_data_tag:
+      return NULL; // ArgInfoData is at the end of extra data section.
+    case DataLayout::bit_data_tag:
+      if (m == NULL && dp->bci() == bci) {
+        return new ciBitData(dp);
+      }
+      break;
+    case DataLayout::speculative_trap_data_tag: {
+      ciSpeculativeTrapData* data = new ciSpeculativeTrapData(dp);
+      // data->method() might be null if the MDO is snapshotted
+      // concurrently with a trap
+      if (m != NULL && data->method() == m && dp->bci() == bci) {
+        return data;
+      }
+      break;
+    }
+    default:
+      fatal(err_msg("bad tag = %d", dp->tag()));
     }
-    if (dp->tag() == DataLayout::arg_info_data_tag) {
-      break; // ArgInfoData is at the end of extra data section.
+  }
+  return NULL;
+}
+
+// Translate a bci to its corresponding data, or NULL.
+ciProfileData* ciMethodData::bci_to_data(int bci, ciMethod* m) {
+  // If m is not NULL we look for a SpeculativeTrapData entry
+  if (m == NULL) {
+    ciProfileData* data = data_before(bci);
+    for ( ; is_valid(data); data = next_data(data)) {
+      if (data->bci() == bci) {
+        set_hint_di(dp_to_di(data->dp()));
+        return data;
+      } else if (data->bci() > bci) {
+        break;
+      }
     }
-    if (dp->bci() == bci) {
-      assert(dp->tag() == DataLayout::bit_data_tag, "sane");
-      return new ciBitData(dp);
-    }
+  }
+  bool two_free_slots = false;
+  ciProfileData* result = bci_to_extra_data(bci, m, two_free_slots);
+  if (result != NULL) {
+    return result;
+  }
+  if (m != NULL && !two_free_slots) {
+    // We were looking for a SpeculativeTrapData entry we didn't
+    // find. Room is not available for more SpeculativeTrapData
+    // entries, look in the non SpeculativeTrapData entries.
+    return bci_to_data(bci, NULL);
   }
   return NULL;
 }
@@ -525,18 +594,25 @@
   st->print_cr("--- Extra data:");
   DataLayout* dp  = data_layout_at(data_size());
   DataLayout* end = data_layout_at(data_size() + extra_data_size());
-  for (; dp < end; dp = MethodData::next_extra(dp)) {
-    if (dp->tag() == DataLayout::no_tag)  continue;
-    if (dp->tag() == DataLayout::bit_data_tag) {
+  for (;; dp = MethodData::next_extra(dp)) {
+    assert(dp < end, "moved past end of extra data");
+    switch (dp->tag()) {
+    case DataLayout::no_tag:
+      continue;
+    case DataLayout::bit_data_tag:
       data = new BitData(dp);
-    } else {
-      assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo");
+      break;
+    case DataLayout::arg_info_data_tag:
       data = new ciArgInfoData(dp);
       dp = end; // ArgInfoData is at the end of extra data section.
+      break;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
     }
     st->print("%d", dp_to_di(data->dp()));
     st->fill_to(6);
     data->print_data_on(st);
+    if (dp >= end) return;
   }
 }
 
@@ -569,8 +645,8 @@
   st->cr();
 }
 
-void ciCallTypeData::print_data_on(outputStream* st) const {
-  print_shared(st, "ciCallTypeData");
+void ciCallTypeData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ciCallTypeData", extra);
   if (has_arguments()) {
     tab(st, true);
     st->print("argument types");
@@ -599,18 +675,18 @@
   }
 }
 
-void ciReceiverTypeData::print_data_on(outputStream* st) const {
-  print_shared(st, "ciReceiverTypeData");
+void ciReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ciReceiverTypeData", extra);
   print_receiver_data_on(st);
 }
 
-void ciVirtualCallData::print_data_on(outputStream* st) const {
-  print_shared(st, "ciVirtualCallData");
+void ciVirtualCallData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ciVirtualCallData", extra);
   rtd_super()->print_receiver_data_on(st);
 }
 
-void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
-  print_shared(st, "ciVirtualCallTypeData");
+void ciVirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ciVirtualCallTypeData", extra);
   rtd_super()->print_receiver_data_on(st);
   if (has_arguments()) {
     tab(st, true);
@@ -624,8 +700,15 @@
   }
 }
 
-void ciParametersTypeData::print_data_on(outputStream* st) const {
-  st->print_cr("Parametertypes");
+void ciParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
+  st->print_cr("ciParametersTypeData");
   parameters()->print_data_on(st);
 }
+
+void ciSpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
+  st->print_cr("ciSpeculativeTrapData");
+  tab(st);
+  method()->print_short_name(st);
+  st->cr();
+}
 #endif
--- a/src/share/vm/ci/ciMethodData.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciMethodData.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
 #include "ci/ciUtilities.hpp"
 #include "oops/methodData.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/deoptimization.hpp"
 
 class ciBitData;
 class ciCounterData;
@@ -44,6 +45,7 @@
 class ciCallTypeData;
 class ciVirtualCallTypeData;
 class ciParametersTypeData;
+class ciSpeculativeTrapData;;
 
 typedef ProfileData ciProfileData;
 
@@ -173,7 +175,7 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra) const;
 #endif
 };
 
@@ -200,7 +202,7 @@
   }
   void translate_receiver_data_from(const ProfileData* data);
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra) const;
   void print_receiver_data_on(outputStream* st) const;
 #endif
 };
@@ -225,7 +227,7 @@
     rtd_super()->translate_receiver_data_from(data);
   }
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra) const;
 #endif
 };
 
@@ -287,7 +289,7 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra) const;
 #endif
 };
 
@@ -336,7 +338,26 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra) const;
+#endif
+};
+
+class ciSpeculativeTrapData : public SpeculativeTrapData {
+public:
+  ciSpeculativeTrapData(DataLayout* layout) : SpeculativeTrapData(layout) {}
+
+  virtual void translate_from(const ProfileData* data);
+
+  ciMethod* method() const {
+    return (ciMethod*)intptr_at(method_offset);
+  }
+
+  void set_method(ciMethod* m) {
+    set_intptr_at(method_offset, (intptr_t)m);
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st, const char* extra) const;
 #endif
 };
 
@@ -436,6 +457,16 @@
 
   ciArgInfoData *arg_info() const;
 
+  address data_base() const {
+    return (address) _data;
+  }
+  DataLayout* limit_data_position() const {
+    return (DataLayout*)((address)data_base() + _data_size);
+  }
+
+  void load_extra_data();
+  ciProfileData* bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots);
+
 public:
   bool is_method_data() const { return true; }
 
@@ -475,9 +506,11 @@
   ciProfileData* next_data(ciProfileData* current);
   bool is_valid(ciProfileData* current) { return current != NULL; }
 
-  // Get the data at an arbitrary bci, or NULL if there is none.
-  ciProfileData* bci_to_data(int bci);
-  ciProfileData* bci_to_extra_data(int bci, bool create_if_missing);
+  DataLayout* extra_data_base() const { return limit_data_position(); }
+
+  // Get the data at an arbitrary bci, or NULL if there is none. If m
+  // is not NULL look for a SpeculativeTrapData if any first.
+  ciProfileData* bci_to_data(int bci, ciMethod* m = NULL);
 
   uint overflow_trap_count() const {
     return _orig.overflow_trap_count();
@@ -496,12 +529,13 @@
 
   // Helpful query functions that decode trap_state.
   int has_trap_at(ciProfileData* data, int reason);
-  int has_trap_at(int bci, int reason) {
-    return has_trap_at(bci_to_data(bci), reason);
+  int has_trap_at(int bci, ciMethod* m, int reason) {
+    assert((m != NULL) == Deoptimization::reason_is_speculate(reason), "inconsistent method/reason");
+    return has_trap_at(bci_to_data(bci, m), reason);
   }
   int trap_recompiled_at(ciProfileData* data);
-  int trap_recompiled_at(int bci) {
-    return trap_recompiled_at(bci_to_data(bci));
+  int trap_recompiled_at(int bci, ciMethod* m) {
+    return trap_recompiled_at(bci_to_data(bci, m));
   }
 
   void clear_escape_info();
--- a/src/share/vm/ci/ciObjArrayKlass.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciObjArrayKlass.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciObjArrayKlass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciObjArrayKlass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciObjectFactory.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciObjectFactory.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciObjectFactory.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciObjectFactory.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciReplay.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciReplay.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -24,6 +24,8 @@
 #include "precompiled.hpp"
 #include "ci/ciMethodData.hpp"
 #include "ci/ciReplay.hpp"
+#include "ci/ciSymbol.hpp"
+#include "ci/ciKlass.hpp"
 #include "ci/ciUtilities.hpp"
 #include "compiler/compileBroker.hpp"
 #include "memory/allocation.inline.hpp"
@@ -37,74 +39,107 @@
 // ciReplay
 
 typedef struct _ciMethodDataRecord {
-  const char* klass;
-  const char* method;
-  const char* signature;
-  int state;
-  int current_mileage;
-  intptr_t* data;
-  int data_length;
-  char* orig_data;
-  int orig_data_length;
-  int oops_length;
-  jobject* oops_handles;
-  int* oops_offsets;
+  const char* _klass_name;
+  const char* _method_name;
+  const char* _signature;
+
+  int _state;
+  int _current_mileage;
+
+  intptr_t* _data;
+  char*     _orig_data;
+  jobject*  _oops_handles;
+  int*      _oops_offsets;
+  int       _data_length;
+  int       _orig_data_length;
+  int       _oops_length;
 } ciMethodDataRecord;
 
 typedef struct _ciMethodRecord {
-  const char* klass;
-  const char* method;
-  const char* signature;
-  int instructions_size;
-  int interpreter_invocation_count;
-  int interpreter_throwout_count;
-  int invocation_counter;
-  int backedge_counter;
+  const char* _klass_name;
+  const char* _method_name;
+  const char* _signature;
+
+  int _instructions_size;
+  int _interpreter_invocation_count;
+  int _interpreter_throwout_count;
+  int _invocation_counter;
+  int _backedge_counter;
 } ciMethodRecord;
 
-class CompileReplay;
+typedef struct _ciInlineRecord {
+  const char* _klass_name;
+  const char* _method_name;
+  const char* _signature;
+
+  int _inline_depth;
+  int _inline_bci;
+} ciInlineRecord;
+
+class  CompileReplay;
 static CompileReplay* replay_state;
 
 class CompileReplay : public StackObj {
  private:
-  FILE*   stream;
-  Thread* thread;
-  Handle  protection_domain;
-  Handle  loader;
+  FILE*   _stream;
+  Thread* _thread;
+  Handle  _protection_domain;
+  Handle  _loader;
 
-  GrowableArray<ciMethodRecord*>     ci_method_records;
-  GrowableArray<ciMethodDataRecord*> ci_method_data_records;
+  GrowableArray<ciMethodRecord*>     _ci_method_records;
+  GrowableArray<ciMethodDataRecord*> _ci_method_data_records;
+
+  // Use pointer because we may need to return inline records
+  // without destroying them.
+  GrowableArray<ciInlineRecord*>*    _ci_inline_records;
 
   const char* _error_message;
 
-  char* bufptr;
-  char* buffer;
-  int   buffer_length;
-  int   buffer_end;
-  int   line_no;
+  char* _bufptr;
+  char* _buffer;
+  int   _buffer_length;
+  int   _buffer_pos;
+
+  // "compile" data
+  ciKlass* _iklass;
+  Method*  _imethod;
+  int      _entry_bci;
+  int      _comp_level;
 
  public:
   CompileReplay(const char* filename, TRAPS) {
-    thread = THREAD;
-    loader = Handle(thread, SystemDictionary::java_system_loader());
-    stream = fopen(filename, "rt");
-    if (stream == NULL) {
+    _thread = THREAD;
+    _loader = Handle(_thread, SystemDictionary::java_system_loader());
+    _protection_domain = Handle();
+
+    _stream = fopen(filename, "rt");
+    if (_stream == NULL) {
       fprintf(stderr, "ERROR: Can't open replay file %s\n", filename);
     }
-    buffer_length = 32;
-    buffer = NEW_RESOURCE_ARRAY(char, buffer_length);
+
+    _ci_inline_records = NULL;
     _error_message = NULL;
 
+    _buffer_length = 32;
+    _buffer = NEW_RESOURCE_ARRAY(char, _buffer_length);
+    _bufptr = _buffer;
+    _buffer_pos = 0;
+
+    _imethod = NULL;
+    _iklass  = NULL;
+    _entry_bci  = 0;
+    _comp_level = 0;
+
     test();
   }
 
   ~CompileReplay() {
-    if (stream != NULL) fclose(stream);
+    if (_stream != NULL) fclose(_stream);
   }
 
   void test() {
-    strcpy(buffer, "1 2 foo 4 bar 0x9 \"this is it\"");
-    bufptr = buffer;
+    strcpy(_buffer, "1 2 foo 4 bar 0x9 \"this is it\"");
+    _bufptr = _buffer;
     assert(parse_int("test") == 1, "what");
     assert(parse_int("test") == 2, "what");
     assert(strcmp(parse_string(), "foo") == 0, "what");
@@ -115,18 +150,18 @@
   }
 
   bool had_error() {
-    return _error_message != NULL || thread->has_pending_exception();
+    return _error_message != NULL || _thread->has_pending_exception();
   }
 
   bool can_replay() {
-    return !(stream == NULL || had_error());
+    return !(_stream == NULL || had_error());
   }
 
   void report_error(const char* msg) {
     _error_message = msg;
-    // Restore the buffer contents for error reporting
-    for (int i = 0; i < buffer_end; i++) {
-      if (buffer[i] == '\0') buffer[i] = ' ';
+    // Restore the _buffer contents for error reporting
+    for (int i = 0; i < _buffer_pos; i++) {
+      if (_buffer[i] == '\0') _buffer[i] = ' ';
     }
   }
 
@@ -137,10 +172,10 @@
 
     int v = 0;
     int read;
-    if (sscanf(bufptr, "%i%n", &v, &read) != 1) {
+    if (sscanf(_bufptr, "%i%n", &v, &read) != 1) {
       report_error(label);
     } else {
-      bufptr += read;
+      _bufptr += read;
     }
     return v;
   }
@@ -152,31 +187,31 @@
 
     intptr_t v = 0;
     int read;
-    if (sscanf(bufptr, INTPTR_FORMAT "%n", &v, &read) != 1) {
+    if (sscanf(_bufptr, INTPTR_FORMAT "%n", &v, &read) != 1) {
       report_error(label);
     } else {
-      bufptr += read;
+      _bufptr += read;
     }
     return v;
   }
 
   void skip_ws() {
     // Skip any leading whitespace
-    while (*bufptr == ' ' || *bufptr == '\t') {
-      bufptr++;
+    while (*_bufptr == ' ' || *_bufptr == '\t') {
+      _bufptr++;
     }
   }
 
 
   char* scan_and_terminate(char delim) {
-    char* str = bufptr;
-    while (*bufptr != delim && *bufptr != '\0') {
-      bufptr++;
+    char* str = _bufptr;
+    while (*_bufptr != delim && *_bufptr != '\0') {
+      _bufptr++;
     }
-    if (*bufptr != '\0') {
-      *bufptr++ = '\0';
+    if (*_bufptr != '\0') {
+      *_bufptr++ = '\0';
     }
-    if (bufptr == str) {
+    if (_bufptr == str) {
       // nothing here
       return NULL;
     }
@@ -195,8 +230,8 @@
 
     skip_ws();
 
-    if (*bufptr == '"') {
-      bufptr++;
+    if (*_bufptr == '"') {
+      _bufptr++;
       return scan_and_terminate('"');
     } else {
       return scan_and_terminate(' ');
@@ -273,7 +308,12 @@
     const char* str = parse_escaped_string();
     Symbol* klass_name = SymbolTable::lookup(str, (int)strlen(str), CHECK_NULL);
     if (klass_name != NULL) {
-      Klass* k = SystemDictionary::resolve_or_fail(klass_name, loader, protection_domain, true, THREAD);
+      Klass* k = NULL;
+      if (_iklass != NULL) {
+        k = (Klass*)_iklass->find_klass(ciSymbol::make(klass_name->as_C_string()))->constant_encoding();
+      } else {
+        k = SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, THREAD);
+      }
       if (HAS_PENDING_EXCEPTION) {
         oop throwable = PENDING_EXCEPTION;
         java_lang_Throwable::print(throwable, tty);
@@ -289,7 +329,7 @@
   // Lookup a klass
   Klass* resolve_klass(const char* klass, TRAPS) {
     Symbol* klass_name = SymbolTable::lookup(klass, (int)strlen(klass), CHECK_NULL);
-    return SystemDictionary::resolve_or_fail(klass_name, loader, protection_domain, true, CHECK_NULL);
+    return SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, CHECK_NULL);
   }
 
   // Parse the standard tuple of <klass> <name> <signature>
@@ -304,40 +344,45 @@
     return m;
   }
 
+  int get_line(int c) {
+    while(c != EOF) {
+      if (_buffer_pos + 1 >= _buffer_length) {
+        int new_length = _buffer_length * 2;
+        // Next call will throw error in case of OOM.
+        _buffer = REALLOC_RESOURCE_ARRAY(char, _buffer, _buffer_length, new_length);
+        _buffer_length = new_length;
+      }
+      if (c == '\n') {
+        c = getc(_stream); // get next char
+        break;
+      } else if (c == '\r') {
+        // skip LF
+      } else {
+        _buffer[_buffer_pos++] = c;
+      }
+      c = getc(_stream);
+    }
+    // null terminate it, reset the pointer
+    _buffer[_buffer_pos] = '\0'; // NL or EOF
+    _buffer_pos = 0;
+    _bufptr = _buffer;
+    return c;
+  }
+
   // Process each line of the replay file executing each command until
   // the file ends.
   void process(TRAPS) {
-    line_no = 1;
-    int pos = 0;
-    int c = getc(stream);
+    int line_no = 1;
+    int c = getc(_stream);
     while(c != EOF) {
-      if (pos + 1 >= buffer_length) {
-        int newl = buffer_length * 2;
-        char* newb = NEW_RESOURCE_ARRAY(char, newl);
-        memcpy(newb, buffer, pos);
-        buffer = newb;
-        buffer_length = newl;
+      c = get_line(c);
+      process_command(CHECK);
+      if (had_error()) {
+        tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message);
+        tty->print_cr("%s", _buffer);
+        return;
       }
-      if (c == '\n') {
-        // null terminate it, reset the pointer and process the line
-        buffer[pos] = '\0';
-        buffer_end = pos++;
-        bufptr = buffer;
-        process_command(CHECK);
-        if (had_error()) {
-          tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message);
-          tty->print_cr("%s", buffer);
-          return;
-        }
-        pos = 0;
-        buffer_end = 0;
-        line_no++;
-      } else if (c == '\r') {
-        // skip LF
-      } else {
-        buffer[pos++] = c;
-      }
-      c = getc(stream);
+      line_no++;
     }
   }
 
@@ -396,7 +441,37 @@
     return true;
   }
 
-  // compile <klass> <name> <signature> <entry_bci> <comp_level>
+  // compile <klass> <name> <signature> <entry_bci> <comp_level> inline <count> <depth> <bci> <klass> <name> <signature> ...
+  void* process_inline(ciMethod* imethod, Method* m, int entry_bci, int comp_level, TRAPS) {
+    _imethod    = m;
+    _iklass     = imethod->holder();
+    _entry_bci  = entry_bci;
+    _comp_level = comp_level;
+    int line_no = 1;
+    int c = getc(_stream);
+    while(c != EOF) {
+      c = get_line(c);
+      // Expecting only lines with "compile" command in inline replay file.
+      char* cmd = parse_string();
+      if (cmd == NULL || strcmp("compile", cmd) != 0) {
+        return NULL;
+      }
+      process_compile(CHECK_NULL);
+      if (had_error()) {
+        tty->print_cr("Error while parsing line %d: %s\n", line_no, _error_message);
+        tty->print_cr("%s", _buffer);
+        return NULL;
+      }
+      if (_ci_inline_records != NULL && _ci_inline_records->length() > 0) {
+        // Found inlining record for the requested method.
+        return _ci_inline_records;
+      }
+      line_no++;
+    }
+    return NULL;
+  }
+
+  // compile <klass> <name> <signature> <entry_bci> <comp_level> inline <count> <depth> <bci> <klass> <name> <signature> ...
   void process_compile(TRAPS) {
     Method* method = parse_method(CHECK);
     if (had_error()) return;
@@ -410,6 +485,43 @@
     if (!is_valid_comp_level(comp_level)) {
       return;
     }
+    if (_imethod != NULL) {
+      // Replay Inlining
+      if (entry_bci != _entry_bci || comp_level != _comp_level) {
+        return;
+      }
+      const char* iklass_name  = _imethod->method_holder()->name()->as_utf8();
+      const char* imethod_name = _imethod->name()->as_utf8();
+      const char* isignature   = _imethod->signature()->as_utf8();
+      const char* klass_name   = method->method_holder()->name()->as_utf8();
+      const char* method_name  = method->name()->as_utf8();
+      const char* signature    = method->signature()->as_utf8();
+      if (strcmp(iklass_name,  klass_name)  != 0 ||
+          strcmp(imethod_name, method_name) != 0 ||
+          strcmp(isignature,   signature)   != 0) {
+        return;
+      }
+    }
+    int inline_count = 0;
+    if (parse_tag_and_count("inline", inline_count)) {
+      // Record inlining data
+      _ci_inline_records = new GrowableArray<ciInlineRecord*>();
+      for (int i = 0; i < inline_count; i++) {
+        int depth = parse_int("inline_depth");
+        int bci = parse_int("inline_bci");
+        if (had_error()) {
+          break;
+        }
+        Method* inl_method = parse_method(CHECK);
+        if (had_error()) {
+          break;
+        }
+        new_ciInlineRecord(inl_method, bci, depth);
+      }
+    }
+    if (_imethod != NULL) {
+      return; // Replay Inlining
+    }
     Klass* k = method->method_holder();
     ((InstanceKlass*)k)->initialize(THREAD);
     if (HAS_PENDING_EXCEPTION) {
@@ -442,11 +554,11 @@
     Method* method = parse_method(CHECK);
     if (had_error()) return;
     ciMethodRecord* rec = new_ciMethod(method);
-    rec->invocation_counter = parse_int("invocation_counter");
-    rec->backedge_counter = parse_int("backedge_counter");
-    rec->interpreter_invocation_count = parse_int("interpreter_invocation_count");
-    rec->interpreter_throwout_count = parse_int("interpreter_throwout_count");
-    rec->instructions_size = parse_int("instructions_size");
+    rec->_invocation_counter = parse_int("invocation_counter");
+    rec->_backedge_counter = parse_int("backedge_counter");
+    rec->_interpreter_invocation_count = parse_int("interpreter_invocation_count");
+    rec->_interpreter_throwout_count = parse_int("interpreter_throwout_count");
+    rec->_instructions_size = parse_int("instructions_size");
   }
 
   // ciMethodData <klass> <name> <signature> <state> <current mileage> orig <length> # # ... data <length> # # ... oops <length>
@@ -471,32 +583,32 @@
 
     // collect and record all the needed information for later
     ciMethodDataRecord* rec = new_ciMethodData(method);
-    rec->state = parse_int("state");
-    rec->current_mileage = parse_int("current_mileage");
+    rec->_state = parse_int("state");
+    rec->_current_mileage = parse_int("current_mileage");
 
-    rec->orig_data = parse_data("orig", rec->orig_data_length);
-    if (rec->orig_data == NULL) {
+    rec->_orig_data = parse_data("orig", rec->_orig_data_length);
+    if (rec->_orig_data == NULL) {
       return;
     }
-    rec->data = parse_intptr_data("data", rec->data_length);
-    if (rec->data == NULL) {
+    rec->_data = parse_intptr_data("data", rec->_data_length);
+    if (rec->_data == NULL) {
       return;
     }
-    if (!parse_tag_and_count("oops", rec->oops_length)) {
+    if (!parse_tag_and_count("oops", rec->_oops_length)) {
       return;
     }
-    rec->oops_handles = NEW_RESOURCE_ARRAY(jobject, rec->oops_length);
-    rec->oops_offsets = NEW_RESOURCE_ARRAY(int, rec->oops_length);
-    for (int i = 0; i < rec->oops_length; i++) {
+    rec->_oops_handles = NEW_RESOURCE_ARRAY(jobject, rec->_oops_length);
+    rec->_oops_offsets = NEW_RESOURCE_ARRAY(int, rec->_oops_length);
+    for (int i = 0; i < rec->_oops_length; i++) {
       int offset = parse_int("offset");
       if (had_error()) {
         return;
       }
       Klass* k = parse_klass(CHECK);
-      rec->oops_offsets[i] = offset;
+      rec->_oops_offsets[i] = offset;
       KlassHandle *kh = NEW_C_HEAP_OBJ(KlassHandle, mtCompiler);
       ::new ((void*)kh) KlassHandle(THREAD, k);
-      rec->oops_handles[i] = (jobject)kh;
+      rec->_oops_handles[i] = (jobject)kh;
     }
   }
 
@@ -570,6 +682,9 @@
         case JVM_CONSTANT_Utf8:
         case JVM_CONSTANT_Integer:
         case JVM_CONSTANT_Float:
+        case JVM_CONSTANT_MethodHandle:
+        case JVM_CONSTANT_MethodType:
+        case JVM_CONSTANT_InvokeDynamic:
           if (tag != cp->tag_at(i).value()) {
             report_error("tag mismatch: wrong class files?");
             return;
@@ -729,10 +844,10 @@
   // Create and initialize a record for a ciMethod
   ciMethodRecord* new_ciMethod(Method* method) {
     ciMethodRecord* rec = NEW_RESOURCE_OBJ(ciMethodRecord);
-    rec->klass =  method->method_holder()->name()->as_utf8();
-    rec->method = method->name()->as_utf8();
-    rec->signature = method->signature()->as_utf8();
-    ci_method_records.append(rec);
+    rec->_klass_name =  method->method_holder()->name()->as_utf8();
+    rec->_method_name = method->name()->as_utf8();
+    rec->_signature = method->signature()->as_utf8();
+    _ci_method_records.append(rec);
     return rec;
   }
 
@@ -741,11 +856,11 @@
     const char* klass_name =  method->method_holder()->name()->as_utf8();
     const char* method_name = method->name()->as_utf8();
     const char* signature = method->signature()->as_utf8();
-    for (int i = 0; i < ci_method_records.length(); i++) {
-      ciMethodRecord* rec = ci_method_records.at(i);
-      if (strcmp(rec->klass, klass_name) == 0 &&
-          strcmp(rec->method, method_name) == 0 &&
-          strcmp(rec->signature, signature) == 0) {
+    for (int i = 0; i < _ci_method_records.length(); i++) {
+      ciMethodRecord* rec = _ci_method_records.at(i);
+      if (strcmp(rec->_klass_name, klass_name) == 0 &&
+          strcmp(rec->_method_name, method_name) == 0 &&
+          strcmp(rec->_signature, signature) == 0) {
         return rec;
       }
     }
@@ -755,10 +870,10 @@
   // Create and initialize a record for a ciMethodData
   ciMethodDataRecord* new_ciMethodData(Method* method) {
     ciMethodDataRecord* rec = NEW_RESOURCE_OBJ(ciMethodDataRecord);
-    rec->klass =  method->method_holder()->name()->as_utf8();
-    rec->method = method->name()->as_utf8();
-    rec->signature = method->signature()->as_utf8();
-    ci_method_data_records.append(rec);
+    rec->_klass_name =  method->method_holder()->name()->as_utf8();
+    rec->_method_name = method->name()->as_utf8();
+    rec->_signature = method->signature()->as_utf8();
+    _ci_method_data_records.append(rec);
     return rec;
   }
 
@@ -767,25 +882,65 @@
     const char* klass_name =  method->method_holder()->name()->as_utf8();
     const char* method_name = method->name()->as_utf8();
     const char* signature = method->signature()->as_utf8();
-    for (int i = 0; i < ci_method_data_records.length(); i++) {
-      ciMethodDataRecord* rec = ci_method_data_records.at(i);
-      if (strcmp(rec->klass, klass_name) == 0 &&
-          strcmp(rec->method, method_name) == 0 &&
-          strcmp(rec->signature, signature) == 0) {
+    for (int i = 0; i < _ci_method_data_records.length(); i++) {
+      ciMethodDataRecord* rec = _ci_method_data_records.at(i);
+      if (strcmp(rec->_klass_name, klass_name) == 0 &&
+          strcmp(rec->_method_name, method_name) == 0 &&
+          strcmp(rec->_signature, signature) == 0) {
         return rec;
       }
     }
     return NULL;
   }
 
+  // Create and initialize a record for a ciInlineRecord
+  ciInlineRecord* new_ciInlineRecord(Method* method, int bci, int depth) {
+    ciInlineRecord* rec = NEW_RESOURCE_OBJ(ciInlineRecord);
+    rec->_klass_name =  method->method_holder()->name()->as_utf8();
+    rec->_method_name = method->name()->as_utf8();
+    rec->_signature = method->signature()->as_utf8();
+    rec->_inline_bci = bci;
+    rec->_inline_depth = depth;
+    _ci_inline_records->append(rec);
+    return rec;
+  }
+
+  // Lookup inlining data for a ciMethod
+  ciInlineRecord* find_ciInlineRecord(Method* method, int bci, int depth) {
+    if (_ci_inline_records != NULL) {
+      return find_ciInlineRecord(_ci_inline_records, method, bci, depth);
+    }
+    return NULL;
+  }
+
+  static ciInlineRecord* find_ciInlineRecord(GrowableArray<ciInlineRecord*>*  records,
+                                      Method* method, int bci, int depth) {
+    if (records != NULL) {
+      const char* klass_name  = method->method_holder()->name()->as_utf8();
+      const char* method_name = method->name()->as_utf8();
+      const char* signature   = method->signature()->as_utf8();
+      for (int i = 0; i < records->length(); i++) {
+        ciInlineRecord* rec = records->at(i);
+        if ((rec->_inline_bci == bci) &&
+            (rec->_inline_depth == depth) &&
+            (strcmp(rec->_klass_name, klass_name) == 0) &&
+            (strcmp(rec->_method_name, method_name) == 0) &&
+            (strcmp(rec->_signature, signature) == 0)) {
+          return rec;
+        }
+      }
+    }
+    return NULL;
+  }
+
   const char* error_message() {
     return _error_message;
   }
 
   void reset() {
     _error_message = NULL;
-    ci_method_records.clear();
-    ci_method_data_records.clear();
+    _ci_method_records.clear();
+    _ci_method_data_records.clear();
   }
 
   // Take an ascii string contain \u#### escapes and convert it to utf8
@@ -845,6 +1000,37 @@
   vm_exit(exit_code);
 }
 
+void* ciReplay::load_inline_data(ciMethod* method, int entry_bci, int comp_level) {
+  if (FLAG_IS_DEFAULT(InlineDataFile)) {
+    tty->print_cr("ERROR: no inline replay data file specified (use -XX:InlineDataFile=inline_pid12345.txt).");
+    return NULL;
+  }
+
+  VM_ENTRY_MARK;
+  // Load and parse the replay data
+  CompileReplay rp(InlineDataFile, THREAD);
+  if (!rp.can_replay()) {
+    tty->print_cr("ciReplay: !rp.can_replay()");
+    return NULL;
+  }
+  void* data = rp.process_inline(method, method->get_Method(), entry_bci, comp_level, THREAD);
+  if (HAS_PENDING_EXCEPTION) {
+    oop throwable = PENDING_EXCEPTION;
+    CLEAR_PENDING_EXCEPTION;
+    java_lang_Throwable::print(throwable, tty);
+    tty->cr();
+    java_lang_Throwable::print_stack_trace(throwable, tty);
+    tty->cr();
+    return NULL;
+  }
+
+  if (rp.had_error()) {
+    tty->print_cr("ciReplay: Failed on %s", rp.error_message());
+    return NULL;
+  }
+  return data;
+}
+
 int ciReplay::replay_impl(TRAPS) {
   HandleMark hm;
   ResourceMark rm;
@@ -890,7 +1076,6 @@
   return exit_code;
 }
 
-
 void ciReplay::initialize(ciMethodData* m) {
   if (replay_state == NULL) {
     return;
@@ -909,28 +1094,28 @@
     method->print_name(tty);
     tty->cr();
   } else {
-    m->_state = rec->state;
-    m->_current_mileage = rec->current_mileage;
-    if (rec->data_length != 0) {
-      assert(m->_data_size == rec->data_length * (int)sizeof(rec->data[0]), "must agree");
+    m->_state = rec->_state;
+    m->_current_mileage = rec->_current_mileage;
+    if (rec->_data_length != 0) {
+      assert(m->_data_size == rec->_data_length * (int)sizeof(rec->_data[0]), "must agree");
 
       // Write the correct ciObjects back into the profile data
       ciEnv* env = ciEnv::current();
-      for (int i = 0; i < rec->oops_length; i++) {
-        KlassHandle *h = (KlassHandle *)rec->oops_handles[i];
-        *(ciMetadata**)(rec->data + rec->oops_offsets[i]) =
+      for (int i = 0; i < rec->_oops_length; i++) {
+        KlassHandle *h = (KlassHandle *)rec->_oops_handles[i];
+        *(ciMetadata**)(rec->_data + rec->_oops_offsets[i]) =
           env->get_metadata((*h)());
       }
       // Copy the updated profile data into place as intptr_ts
 #ifdef _LP64
-      Copy::conjoint_jlongs_atomic((jlong *)rec->data, (jlong *)m->_data, rec->data_length);
+      Copy::conjoint_jlongs_atomic((jlong *)rec->_data, (jlong *)m->_data, rec->_data_length);
 #else
-      Copy::conjoint_jints_atomic((jint *)rec->data, (jint *)m->_data, rec->data_length);
+      Copy::conjoint_jints_atomic((jint *)rec->_data, (jint *)m->_data, rec->_data_length);
 #endif
     }
 
     // copy in the original header
-    Copy::conjoint_jbytes(rec->orig_data, (char*)&m->_orig, rec->orig_data_length);
+    Copy::conjoint_jbytes(rec->_orig_data, (char*)&m->_orig, rec->_orig_data_length);
   }
 }
 
@@ -939,12 +1124,38 @@
   if (replay_state == NULL) {
     return false;
   }
-
   VM_ENTRY_MARK;
   // ciMethod without a record shouldn't be inlined.
   return replay_state->find_ciMethodRecord(method->get_Method()) == NULL;
 }
 
+bool ciReplay::should_inline(void* data, ciMethod* method, int bci, int inline_depth) {
+  if (data != NULL) {
+    GrowableArray<ciInlineRecord*>*  records = (GrowableArray<ciInlineRecord*>*)data;
+    VM_ENTRY_MARK;
+    // Inline record are ordered by bci and depth.
+    return CompileReplay::find_ciInlineRecord(records, method->get_Method(), bci, inline_depth) != NULL;
+  } else if (replay_state != NULL) {
+    VM_ENTRY_MARK;
+    // Inline record are ordered by bci and depth.
+    return replay_state->find_ciInlineRecord(method->get_Method(), bci, inline_depth) != NULL;
+  }
+  return false;
+}
+
+bool ciReplay::should_not_inline(void* data, ciMethod* method, int bci, int inline_depth) {
+  if (data != NULL) {
+    GrowableArray<ciInlineRecord*>*  records = (GrowableArray<ciInlineRecord*>*)data;
+    VM_ENTRY_MARK;
+    // Inline record are ordered by bci and depth.
+    return CompileReplay::find_ciInlineRecord(records, method->get_Method(), bci, inline_depth) == NULL;
+  } else if (replay_state != NULL) {
+    VM_ENTRY_MARK;
+    // Inline record are ordered by bci and depth.
+    return replay_state->find_ciInlineRecord(method->get_Method(), bci, inline_depth) == NULL;
+  }
+  return false;
+}
 
 void ciReplay::initialize(ciMethod* m) {
   if (replay_state == NULL) {
@@ -965,14 +1176,14 @@
     tty->cr();
   } else {
     EXCEPTION_CONTEXT;
-    // m->_instructions_size = rec->instructions_size;
+    // m->_instructions_size = rec->_instructions_size;
     m->_instructions_size = -1;
-    m->_interpreter_invocation_count = rec->interpreter_invocation_count;
-    m->_interpreter_throwout_count = rec->interpreter_throwout_count;
+    m->_interpreter_invocation_count = rec->_interpreter_invocation_count;
+    m->_interpreter_throwout_count = rec->_interpreter_throwout_count;
     MethodCounters* mcs = method->get_method_counters(CHECK_AND_CLEAR);
     guarantee(mcs != NULL, "method counters allocation failed");
-    mcs->invocation_counter()->_counter = rec->invocation_counter;
-    mcs->backedge_counter()->_counter = rec->backedge_counter;
+    mcs->invocation_counter()->_counter = rec->_invocation_counter;
+    mcs->backedge_counter()->_counter = rec->_backedge_counter;
   }
 }
 
--- a/src/share/vm/ci/ciReplay.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciReplay.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -29,6 +29,73 @@
 
 // ciReplay
 
+//
+// Replay compilation of a java method by using an information in replay file.
+// Replay inlining decisions during compilation by using an information in inline file.
+//
+// NOTE: these replay functions only exist in debug version of VM.
+//
+// Replay compilation.
+// -------------------
+//
+// Replay data file replay.txt can be created by Serviceability Agent
+// from a core file, see agent/doc/cireplay.html
+//
+// $ java -cp <jdk>/lib/sa-jdi.jar sun.jvm.hotspot.CLHSDB
+// hsdb> attach <jdk>/bin/java ./core
+// hsdb> threads
+// t@10 Service Thread
+// t@9 C2 CompilerThread0
+// t@8 Signal Dispatcher
+// t@7 Finalizer
+// t@6 Reference Handler
+// t@2 main
+// hsdb> dumpreplaydata t@9 > replay.txt
+// hsdb> quit
+//
+// (Note: SA could be also used to extract app.jar and boot.jar files
+//  from core file to replay compilation if only core file is available)
+//
+// Replay data file replay_pid%p.log is also created when VM crashes
+// in Compiler thread during compilation. It is controlled by
+// DumpReplayDataOnError flag which is ON by default.
+//
+// Replay file replay_pid%p_compid%d.log can be created
+// for the specified java method during normal execution using
+// CompileCommand option DumpReplay:
+//
+// -XX:CompileCommand=option,Benchmark::test,DumpReplay
+//
+// In this case the file name has additional compilation id "_compid%d"
+// because the method could be compiled several times.
+//
+// To replay compilation the replay file should be specified:
+//
+// -XX:+ReplayCompiles -XX:ReplayDataFile=replay_pid2133.log
+//
+// VM thread reads data from the file immediately after VM initialization
+// and puts the compilation task on compile queue. After that it goes into
+// wait state (BackgroundCompilation flag is set to false) since there is no
+// a program to execute. VM exits when the compilation is finished.
+//
+//
+// Replay inlining.
+// ----------------
+//
+// Replay inlining file inline_pid%p_compid%d.log is created for
+// a specific java method during normal execution of a java program
+// using CompileCommand option DumpInline:
+//
+// -XX:CompileCommand=option,Benchmark::test,DumpInline
+//
+// To replay inlining the replay file and the method should be specified:
+//
+// -XX:CompileCommand=option,Benchmark::test,ReplayInline -XX:InlineDataFile=inline_pid3244_compid6.log
+//
+// The difference from replay compilation is that replay inlining
+// is performed during normal java program execution.
+//
+
 class ciReplay {
   CI_PACKAGE_ACCESS
 
@@ -37,7 +104,11 @@
   static int replay_impl(TRAPS);
 
  public:
+  // Replay specified compilation and exit VM.
   static void replay(TRAPS);
+  // Load inlining decisions from file and use them
+  // during compilation of specified method.
+  static void* load_inline_data(ciMethod* method, int entry_bci, int comp_level);
 
   // These are used by the CI to fill in the cached data from the
   // replay file when replaying compiles.
@@ -48,6 +119,8 @@
   static bool is_loaded(Klass* klass);
 
   static bool should_not_inline(ciMethod* method);
+  static bool should_inline(void* data, ciMethod* method, int bci, int inline_depth);
+  static bool should_not_inline(void* data, ciMethod* method, int bci, int inline_depth);
 
 #endif
 };
--- a/src/share/vm/ci/ciStreams.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciStreams.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciType.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciType.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciType.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciType.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciTypeArray.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciTypeArray.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciTypeArrayKlass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciTypeArrayKlass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciTypeFlow.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciTypeFlow.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/ci/ciUtilities.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/ci/ciUtilities.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/altHashing.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/altHashing.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,18 +39,18 @@
 }
 
 // Seed value used for each alternative hash calculated.
-jint AltHashing::compute_seed() {
+juint AltHashing::compute_seed() {
   jlong nanos = os::javaTimeNanos();
   jlong now = os::javaTimeMillis();
-  jint SEED_MATERIAL[8] = {
-            (jint) object_hash(SystemDictionary::String_klass()),
-            (jint) object_hash(SystemDictionary::System_klass()),
-            (jint) os::random(),  // current thread isn't a java thread
-            (jint) (((julong)nanos) >> 32),
-            (jint) nanos,
-            (jint) (((julong)now) >> 32),
-            (jint) now,
-            (jint) (os::javaTimeNanos() >> 2)
+  int SEED_MATERIAL[8] = {
+            (int) object_hash(SystemDictionary::String_klass()),
+            (int) object_hash(SystemDictionary::System_klass()),
+            (int) os::random(),  // current thread isn't a java thread
+            (int) (((julong)nanos) >> 32),
+            (int) nanos,
+            (int) (((julong)now) >> 32),
+            (int) now,
+            (int) (os::javaTimeNanos() >> 2)
   };
 
   return murmur3_32(SEED_MATERIAL, 8);
@@ -58,14 +58,14 @@
 
 
 // Murmur3 hashing for Symbol
-jint AltHashing::murmur3_32(jint seed, const jbyte* data, int len) {
-  jint h1 = seed;
+juint AltHashing::murmur3_32(juint seed, const jbyte* data, int len) {
+  juint h1 = seed;
   int count = len;
   int offset = 0;
 
   // body
   while (count >= 4) {
-    jint k1 = (data[offset] & 0x0FF)
+    juint k1 = (data[offset] & 0x0FF)
         | (data[offset + 1] & 0x0FF) << 8
         | (data[offset + 2] & 0x0FF) << 16
         | data[offset + 3] << 24;
@@ -85,7 +85,7 @@
   // tail
 
   if (count > 0) {
-    jint k1 = 0;
+    juint k1 = 0;
 
     switch (count) {
       case 3:
@@ -109,18 +109,18 @@
   h1 ^= len;
 
   // finalization mix force all bits of a hash block to avalanche
-  h1 ^= ((unsigned int)h1) >> 16;
+  h1 ^= h1 >> 16;
   h1 *= 0x85ebca6b;
-  h1 ^= ((unsigned int)h1) >> 13;
+  h1 ^= h1 >> 13;
   h1 *= 0xc2b2ae35;
-  h1 ^= ((unsigned int)h1) >> 16;
+  h1 ^= h1 >> 16;
 
   return h1;
 }
 
 // Murmur3 hashing for Strings
-jint AltHashing::murmur3_32(jint seed, const jchar* data, int len) {
-  jint h1 = seed;
+juint AltHashing::murmur3_32(juint seed, const jchar* data, int len) {
+  juint h1 = seed;
 
   int off = 0;
   int count = len;
@@ -129,7 +129,7 @@
   while (count >= 2) {
     jchar d1 = data[off++] & 0xFFFF;
     jchar d2 = data[off++];
-    jint k1 = (d1 | d2 << 16);
+    juint k1 = (d1 | d2 << 16);
 
     count -= 2;
 
@@ -145,7 +145,7 @@
   // tail
 
   if (count > 0) {
-    int k1 = data[off];
+    juint k1 = (juint)data[off];
 
     k1 *= 0xcc9e2d51;
     k1 = Integer_rotateLeft(k1, 15);
@@ -157,25 +157,25 @@
   h1 ^= len * 2; // (Character.SIZE / Byte.SIZE);
 
   // finalization mix force all bits of a hash block to avalanche
-  h1 ^= ((unsigned int)h1) >> 16;
+  h1 ^= h1 >> 16;
   h1 *= 0x85ebca6b;
-  h1 ^= ((unsigned int)h1) >> 13;
+  h1 ^= h1 >> 13;
   h1 *= 0xc2b2ae35;
-  h1 ^= ((unsigned int)h1) >> 16;
+  h1 ^= h1 >> 16;
 
   return h1;
 }
 
 // Hash used for the seed.
-jint AltHashing::murmur3_32(jint seed, const int* data, int len) {
-  jint h1 = seed;
+juint AltHashing::murmur3_32(juint seed, const int* data, int len) {
+  juint h1 = seed;
 
   int off = 0;
   int end = len;
 
   // body
   while (off < end) {
-    jint k1 = data[off++];
+    juint k1 = (juint)data[off++];
 
     k1 *= 0xcc9e2d51;
     k1 = Integer_rotateLeft(k1, 15);
@@ -193,26 +193,26 @@
   h1 ^= len * 4; // (Integer.SIZE / Byte.SIZE);
 
   // finalization mix force all bits of a hash block to avalanche
-  h1 ^= ((juint)h1) >> 16;
+  h1 ^= h1 >> 16;
   h1 *= 0x85ebca6b;
-  h1 ^= ((juint)h1) >> 13;
+  h1 ^= h1 >> 13;
   h1 *= 0xc2b2ae35;
-  h1 ^= ((juint)h1) >> 16;
+  h1 ^= h1 >> 16;
 
   return h1;
 }
 
-jint AltHashing::murmur3_32(const int* data, int len) {
+juint AltHashing::murmur3_32(const int* data, int len) {
   return murmur3_32(0, data, len);
 }
 
 #ifndef PRODUCT
 // Overloaded versions for internal test.
-jint AltHashing::murmur3_32(const jbyte* data, int len) {
+juint AltHashing::murmur3_32(const jbyte* data, int len) {
   return murmur3_32(0, data, len);
 }
 
-jint AltHashing::murmur3_32(const jchar* data, int len) {
+juint AltHashing::murmur3_32(const jchar* data, int len) {
   return murmur3_32(0, data, len);
 }
 
@@ -251,11 +251,11 @@
 
   // Hash subranges {}, {0}, {0,1}, {0,1,2}, ..., {0,...,255}
   for (int i = 0; i < 256; i++) {
-    jint hash = murmur3_32(256 - i, vector, i);
+    juint hash = murmur3_32(256 - i, vector, i);
     hashes[i * 4] = (jbyte) hash;
-    hashes[i * 4 + 1] = (jbyte) (((juint)hash) >> 8);
-    hashes[i * 4 + 2] = (jbyte) (((juint)hash) >> 16);
-    hashes[i * 4 + 3] = (jbyte) (((juint)hash) >> 24);
+    hashes[i * 4 + 1] = (jbyte)(hash >> 8);
+    hashes[i * 4 + 2] = (jbyte)(hash >> 16);
+    hashes[i * 4 + 3] = (jbyte)(hash >> 24);
   }
 
   // hash to get const result.
@@ -269,7 +269,7 @@
 }
 
 void AltHashing::testEquivalentHashes() {
-  jint jbytes, jchars, ints;
+  juint jbytes, jchars, ints;
 
   // printf("testEquivalentHashes\n");
 
--- a/src/share/vm/classfile/altHashing.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/altHashing.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,24 +39,24 @@
 class AltHashing : AllStatic {
 
   // utility function copied from java/lang/Integer
-  static jint Integer_rotateLeft(jint i, int distance) {
-    return (i << distance) | (((juint)i) >> (32-distance));
+  static juint Integer_rotateLeft(juint i, int distance) {
+    return (i << distance) | (i >> (32-distance));
   }
-  static jint murmur3_32(const int* data, int len);
-  static jint murmur3_32(jint seed, const int* data, int len);
+  static juint murmur3_32(const int* data, int len);
+  static juint murmur3_32(juint seed, const int* data, int len);
 
 #ifndef PRODUCT
   // Hashing functions used for internal testing
-  static jint murmur3_32(const jbyte* data, int len);
-  static jint murmur3_32(const jchar* data, int len);
+  static juint murmur3_32(const jbyte* data, int len);
+  static juint murmur3_32(const jchar* data, int len);
   static void testMurmur3_32_ByteArray();
   static void testEquivalentHashes();
 #endif // PRODUCT
 
  public:
-  static jint compute_seed();
-  static jint murmur3_32(jint seed, const jbyte* data, int len);
-  static jint murmur3_32(jint seed, const jchar* data, int len);
+  static juint compute_seed();
+  static juint murmur3_32(juint seed, const jbyte* data, int len);
+  static juint murmur3_32(juint seed, const jchar* data, int len);
   NOT_PRODUCT(static void test_alt_hash();)
 };
 #endif // SHARE_VM_CLASSFILE_ALTHASHING_HPP
--- a/src/share/vm/classfile/bytecodeAssembler.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/bytecodeAssembler.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/classFileParser.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/classFileParser.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3746,18 +3746,24 @@
       Exceptions::fthrow(
         THREAD_AND_LOCATION,
         vmSymbols::java_lang_UnsupportedClassVersionError(),
-        "Unsupported major.minor version %u.%u",
+        "Unsupported class file version %u.%u, "
+        "this version of the Java Runtime only recognizes class file versions up to %u.%u",
         major_version,
-        minor_version);
+        minor_version,
+        JAVA_MAX_SUPPORTED_VERSION,
+        JAVA_MAX_SUPPORTED_MINOR_VERSION);
     } else {
       ResourceMark rm(THREAD);
       Exceptions::fthrow(
         THREAD_AND_LOCATION,
         vmSymbols::java_lang_UnsupportedClassVersionError(),
-        "%s : Unsupported major.minor version %u.%u",
+        "%s has been compiled by a more recent version of the Java Runtime (class file version %u.%u), "
+        "this version of the Java Runtime only recognizes class file versions up to %u.%u",
         name->as_C_string(),
         major_version,
-        minor_version);
+        minor_version,
+        JAVA_MAX_SUPPORTED_VERSION,
+        JAVA_MAX_SUPPORTED_MINOR_VERSION);
     }
     return nullHandle;
   }
@@ -4098,8 +4104,12 @@
         tty->print("[Loaded %s from %s]\n", this_klass->external_name(),
                    cfs->source());
       } else if (class_loader.is_null()) {
-        if (THREAD->is_Java_thread()) {
-          Klass* caller = ((JavaThread*)THREAD)->security_get_caller_class(1);
+        Klass* caller =
+            THREAD->is_Java_thread()
+                ? ((JavaThread*)THREAD)->security_get_caller_class(1)
+                : NULL;
+        // caller can be NULL, for example, during a JVMTI VM_Init hook
+        if (caller != NULL) {
           tty->print("[Loaded %s by instance of %s]\n",
                      this_klass->external_name(),
                      InstanceKlass::cast(caller)->external_name());
@@ -4500,8 +4510,8 @@
             break; // didn't find any match; get out
           }
 
-          if (super_m->is_final() &&
-              // matching method in super is final
+          if (super_m->is_final() && !super_m->is_static() &&
+              // matching method in super is final, and not static
               (Reflection::verify_field_access(this_klass(),
                                                super_m->method_holder(),
                                                super_m->method_holder(),
--- a/src/share/vm/classfile/classFileStream.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/classFileStream.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/classFileStream.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/classFileStream.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/classLoader.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/classLoader.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -68,6 +68,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
--- a/src/share/vm/classfile/classLoaderData.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/classLoaderData.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,7 +73,11 @@
 
 ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
   _class_loader(h_class_loader()),
-  _is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
+  _is_anonymous(is_anonymous),
+  // An anonymous class loader data doesn't have anything to keep
+  // it from being unloaded during parsing of the anonymous class.
+  // The null-class-loader should always be kept alive.
+  _keep_alive(is_anonymous || h_class_loader.is_null()),
   _metaspace(NULL), _unloading(false), _klasses(NULL),
   _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
   _next(NULL), _dependencies(dependencies),
@@ -317,11 +321,15 @@
   }
 }
 
+oop ClassLoaderData::keep_alive_object() const {
+  assert(!keep_alive(), "Don't use with CLDs that are artificially kept alive");
+  return is_anonymous() ? _klasses->java_mirror() : class_loader();
+}
+
 bool ClassLoaderData::is_alive(BoolObjectClosure* is_alive_closure) const {
-  bool alive =
-    is_anonymous() ?
-       is_alive_closure->do_object_b(_klasses->java_mirror()) :
-       class_loader() == NULL || is_alive_closure->do_object_b(class_loader());
+  bool alive = keep_alive() // null class loader and incomplete anonymous klasses.
+      || is_alive_closure->do_object_b(keep_alive_object());
+
   assert(!alive || claimed(), "must be claimed");
   return alive;
 }
@@ -520,6 +528,13 @@
   }
 }
 
+bool ClassLoaderData::contains_klass(Klass* klass) {
+  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+    if (k == klass) return true;
+  }
+  return false;
+}
+
 
 // GC root of class loader data created.
 ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
@@ -591,8 +606,6 @@
 
 void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
   if (ClassUnloading) {
-    ClassLoaderData::the_null_class_loader_data()->oops_do(f, klass_closure, must_claim);
-    // keep any special CLDs alive.
     ClassLoaderDataGraph::keep_alive_oops_do(f, klass_closure, must_claim);
   } else {
     ClassLoaderDataGraph::oops_do(f, klass_closure, must_claim);
@@ -648,12 +661,12 @@
   return array;
 }
 
-#ifndef PRODUCT
-// for debugging and hsfind(x)
-bool ClassLoaderDataGraph::contains(address x) {
-  // I think we need the _metaspace_lock taken here because the class loader
-  // data graph could be changing while we are walking it (new entries added,
-  // new entries being unloaded, etc).
+// For profiling and hsfind() only.  Otherwise, this is unsafe (and slow).  This
+// is done lock free to avoid lock inversion problems.  It is safe because
+// new ClassLoaderData are added to the end of the CLDG, and only removed at
+// safepoint.  The _unloading list can be deallocated concurrently with CMS so
+// this doesn't look in metaspace for classes that have been unloaded.
+bool ClassLoaderDataGraph::contains(const void* x) {
   if (DumpSharedSpaces) {
     // There are only two metaspaces to worry about.
     ClassLoaderData* ncld = ClassLoaderData::the_null_class_loader_data();
@@ -670,16 +683,11 @@
     }
   }
 
-  // Could also be on an unloading list which is okay, ie. still allocated
-  // for a little while.
-  for (ClassLoaderData* ucld = _unloading; ucld != NULL; ucld = ucld->next()) {
-    if (ucld->metaspace_or_null() != NULL && ucld->metaspace_or_null()->contains(x)) {
-      return true;
-    }
-  }
+  // Do not check unloading list because deallocation can be concurrent.
   return false;
 }
 
+#ifndef PRODUCT
 bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
   for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
     if (loader_data == data) {
@@ -703,7 +711,7 @@
   bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
   MetadataOnStackMark md_on_stack;
   while (data != NULL) {
-    if (data->keep_alive() || data->is_alive(is_alive_closure)) {
+    if (data->is_alive(is_alive_closure)) {
       if (has_redefined_a_class) {
         data->classes_do(InstanceKlass::purge_previous_versions);
       }
--- a/src/share/vm/classfile/classLoaderData.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/classLoaderData.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,9 +90,9 @@
   static void dump() { dump_on(tty); }
   static void verify();
 
+  // expensive test for pointer in metaspace for debugging
+  static bool contains(const void* x);
 #ifndef PRODUCT
-  // expensive test for pointer in metaspace for debugging
-  static bool contains(address x);
   static bool contains_loader_data(ClassLoaderData* loader_data);
 #endif
 
@@ -139,7 +139,7 @@
                            // classes in the class loader are allocated.
   Mutex* _metaspace_lock;  // Locks the metaspace for allocations and setup.
   bool _unloading;         // true if this class loader goes away
-  bool _keep_alive;        // if this CLD can be unloaded for anonymous loaders
+  bool _keep_alive;        // if this CLD is kept alive without a keep_alive_object().
   bool _is_anonymous;      // if this CLD is for an anonymous class
   volatile int _claimed;   // true if claimed, for example during GC traces.
                            // To avoid applying oop closure more than once.
@@ -230,13 +230,16 @@
 
   oop class_loader() const      { return _class_loader; }
 
+  // The object the GC is using to keep this ClassLoaderData alive.
+  oop keep_alive_object() const;
+
   // Returns true if this class loader data is for a loader going away.
   bool is_unloading() const     {
     assert(!(is_the_null_class_loader_data() && _unloading), "The null class loader can never be unloaded");
     return _unloading;
   }
-  // Anonymous class loader data doesn't have anything to keep them from
-  // being unloaded during parsing the anonymous class.
+
+  // Used to make sure that this CLD is not unloaded.
   void set_keep_alive(bool value) { _keep_alive = value; }
 
   unsigned int identity_hash() {
@@ -260,6 +263,7 @@
   jobject add_handle(Handle h);
   void add_class(Klass* k);
   void remove_class(Klass* k);
+  bool contains_klass(Klass* k);
   void record_dependency(Klass* to, TRAPS);
   void init_dependencies(TRAPS);
 
--- a/src/share/vm/classfile/classLoaderData.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/classLoaderData.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/defaultMethods.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/defaultMethods.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -349,6 +349,7 @@
   }
 
   Symbol* generate_no_defaults_message(TRAPS) const;
+  Symbol* generate_method_message(Symbol *klass_name, Method* method, TRAPS) const;
   Symbol* generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const;
 
  public:
@@ -389,6 +390,20 @@
   Symbol* get_exception_message() { return _exception_message; }
   Symbol* get_exception_name() { return _exception_name; }
 
+  // Return true if the specified klass has a static method that matches
+  // the name and signature of the target method.
+  bool has_matching_static(InstanceKlass* root) {
+    if (_members.length() > 0) {
+      Pair<Method*,QualifiedState> entry = _members.at(0);
+      Method* impl = root->find_method(entry.first->name(),
+                                       entry.first->signature());
+      if ((impl != NULL) && impl->is_static()) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   // Either sets the target or the exception error message
   void determine_target(InstanceKlass* root, TRAPS) {
     if (has_target() || throws_exception()) {
@@ -414,13 +429,25 @@
       }
     }
 
-    if (qualified_methods.length() == 0) {
-      _exception_message = generate_no_defaults_message(CHECK);
-      _exception_name = vmSymbols::java_lang_AbstractMethodError();
+    if (num_defaults == 0) {
+      // If the root klass has a static method with matching name and signature
+      // then do not generate an overpass method because it will hide the
+      // static method during resolution.
+      if (!has_matching_static(root)) {
+        if (qualified_methods.length() == 0) {
+          _exception_message = generate_no_defaults_message(CHECK);
+        } else {
+          assert(root != NULL, "Null root class");
+          _exception_message = generate_method_message(root->name(), qualified_methods.at(0), CHECK);
+        }
+        _exception_name = vmSymbols::java_lang_AbstractMethodError();
+      }
+
     // If only one qualified method is default, select that
     } else if (num_defaults == 1) {
         _selected_target = qualified_methods.at(default_index);
-    } else if (num_defaults > 1) {
+
+    } else if (num_defaults > 1 && !has_matching_static(root)) {
       _exception_message = generate_conflicts_message(&qualified_methods,CHECK);
       _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
       if (TraceDefaultMethods) {
@@ -428,7 +455,6 @@
         tty->print_cr("");
       }
     }
-    // leave abstract methods alone, they will be found via normal search path
   }
 
   bool contains_signature(Symbol* query) {
@@ -486,6 +512,19 @@
   return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL);
 }
 
+Symbol* MethodFamily::generate_method_message(Symbol *klass_name, Method* method, TRAPS) const {
+  stringStream ss;
+  ss.print("Method ");
+  Symbol* name = method->name();
+  Symbol* signature = method->signature();
+  ss.write((const char*)klass_name->bytes(), klass_name->utf8_length());
+  ss.print(".");
+  ss.write((const char*)name->bytes(), name->utf8_length());
+  ss.write((const char*)signature->bytes(), signature->utf8_length());
+  ss.print(" is abstract");
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
+}
+
 Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
   stringStream ss;
   ss.print("Conflicting default methods:");
@@ -1026,7 +1065,8 @@
   Array<Method*>* merged_methods = MetadataFactory::new_array<Method*>(
       klass->class_loader_data(), new_size, NULL, CHECK);
 
-  if (original_ordering != NULL && original_ordering->length() > 0) {
+  // original_ordering might be empty if this class has no methods of its own
+  if (JvmtiExport::can_maintain_original_method_order() || DumpSharedSpaces) {
     merged_ordering = MetadataFactory::new_array<int>(
         klass->class_loader_data(), new_size, CHECK);
   }
@@ -1053,6 +1093,8 @@
       merged_methods->at_put(i, orig_method);
       original_methods->at_put(orig_idx, NULL);
       if (merged_ordering->length() > 0) {
+        assert(original_ordering != NULL && original_ordering->length() > 0,
+               "should have original order information for this method");
         merged_ordering->at_put(i, original_ordering->at(orig_idx));
       }
       ++orig_idx;
@@ -1081,13 +1123,14 @@
   // Replace klass methods with new merged lists
   klass->set_methods(merged_methods);
   klass->set_initial_method_idnum(new_size);
+  klass->set_method_ordering(merged_ordering);
 
+  // Free metadata
   ClassLoaderData* cld = klass->class_loader_data();
-  if (original_methods ->length() > 0) {
+  if (original_methods->length() > 0) {
     MetadataFactory::free_array(cld, original_methods);
   }
-  if (original_ordering->length() > 0) {
-    klass->set_method_ordering(merged_ordering);
+  if (original_ordering != NULL && original_ordering->length() > 0) {
     MetadataFactory::free_array(cld, original_ordering);
   }
 }
--- a/src/share/vm/classfile/dictionary.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/dictionary.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -707,7 +707,7 @@
                 loader_data->class_loader() == NULL ||
                 loader_data->class_loader()->is_instance(),
                 "checking type of class_loader");
-      e->verify(/*check_dictionary*/false);
+      e->verify();
       probe->verify_protection_domain_set();
       element_count++;
     }
--- a/src/share/vm/classfile/javaClasses.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/javaClasses.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -461,12 +461,11 @@
   return true;
 }
 
-void java_lang_String::print(Handle java_string, outputStream* st) {
-  oop          obj    = java_string();
-  assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
-  typeArrayOop value  = java_lang_String::value(obj);
-  int          offset = java_lang_String::offset(obj);
-  int          length = java_lang_String::length(obj);
+void java_lang_String::print(oop java_string, outputStream* st) {
+  assert(java_string->klass() == SystemDictionary::String_klass(), "must be java_string");
+  typeArrayOop value  = java_lang_String::value(java_string);
+  int          offset = java_lang_String::offset(java_string);
+  int          length = java_lang_String::length(java_string);
 
   int end = MIN2(length, 100);
   if (value == NULL) {
@@ -3285,7 +3284,7 @@
     sun_reflect_ConstantPool::compute_offsets();
     sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
   }
-  if (JDK_Version::is_jdk18x_version())
+  if (JDK_Version::is_gte_jdk18x_version())
     java_lang_reflect_Parameter::compute_offsets();
 
   // generated interpreter code wants to know about the offsets we just computed:
--- a/src/share/vm/classfile/javaClasses.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/javaClasses.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -198,7 +198,7 @@
   }
 
   // Debugging
-  static void print(Handle java_string, outputStream* st);
+  static void print(oop java_string, outputStream* st);
   friend class JavaClasses;
 };
 
--- a/src/share/vm/classfile/symbolTable.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/symbolTable.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -38,6 +38,9 @@
 
 // --------------------------------------------------------------------------
 
+// the number of buckets a thread claims
+const int ClaimChunkSize = 32;
+
 SymbolTable* SymbolTable::_the_table = NULL;
 // Static arena for symbols that are not deallocated
 Arena* SymbolTable::_arena = NULL;
@@ -83,16 +86,12 @@
   }
 }
 
-int SymbolTable::symbols_removed = 0;
-int SymbolTable::symbols_counted = 0;
+int SymbolTable::_symbols_removed = 0;
+int SymbolTable::_symbols_counted = 0;
+volatile int SymbolTable::_parallel_claimed_idx = 0;
 
-// Remove unreferenced symbols from the symbol table
-// This is done late during GC.
-void SymbolTable::unlink() {
-  int removed = 0;
-  int total = 0;
-  size_t memory_total = 0;
-  for (int i = 0; i < the_table()->table_size(); ++i) {
+void SymbolTable::buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total) {
+  for (int i = start_idx; i < end_idx; ++i) {
     HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i);
     HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i);
     while (entry != NULL) {
@@ -104,14 +103,14 @@
         break;
       }
       Symbol* s = entry->literal();
-      memory_total += s->size();
-      total++;
+      (*memory_total) += s->size();
+      (*processed)++;
       assert(s != NULL, "just checking");
       // If reference count is zero, remove.
       if (s->refcount() == 0) {
         assert(!entry->is_shared(), "shared entries should be kept live");
         delete s;
-        removed++;
+        (*removed)++;
         *p = entry->next();
         the_table()->free_entry(entry);
       } else {
@@ -121,12 +120,45 @@
       entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p);
     }
   }
-  symbols_removed += removed;
-  symbols_counted += total;
+}
+
+// Remove unreferenced symbols from the symbol table
+// This is done late during GC.
+void SymbolTable::unlink(int* processed, int* removed) {
+  size_t memory_total = 0;
+  buckets_unlink(0, the_table()->table_size(), processed, removed, &memory_total);
+  _symbols_removed += *removed;
+  _symbols_counted += *processed;
   // Exclude printing for normal PrintGCDetails because people parse
   // this output.
   if (PrintGCDetails && Verbose && WizardMode) {
-    gclog_or_tty->print(" [Symbols=%d size=" SIZE_FORMAT "K] ", total,
+    gclog_or_tty->print(" [Symbols=%d size=" SIZE_FORMAT "K] ", *processed,
+                        (memory_total*HeapWordSize)/1024);
+  }
+}
+
+void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) {
+  const int limit = the_table()->table_size();
+
+  size_t memory_total = 0;
+
+  for (;;) {
+    // Grab next set of buckets to scan
+    int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
+    if (start_idx >= limit) {
+      // End of table
+      break;
+    }
+
+    int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
+    buckets_unlink(start_idx, end_idx, processed, removed, &memory_total);
+  }
+  Atomic::add(*processed, &_symbols_counted);
+  Atomic::add(*removed, &_symbols_removed);
+  // Exclude printing for normal PrintGCDetails because people parse
+  // this output.
+  if (PrintGCDetails && Verbose && WizardMode) {
+    gclog_or_tty->print(" [Symbols: scanned=%d removed=%d size=" SIZE_FORMAT "K] ", *processed, *removed,
                         (memory_total*HeapWordSize)/1024);
   }
 }
@@ -494,11 +526,11 @@
   tty->print_cr("Total number of symbols  %5d", count);
   tty->print_cr("Total size in memory     %5dK",
           (memory_total*HeapWordSize)/1024);
-  tty->print_cr("Total counted            %5d", symbols_counted);
-  tty->print_cr("Total removed            %5d", symbols_removed);
-  if (symbols_counted > 0) {
+  tty->print_cr("Total counted            %5d", _symbols_counted);
+  tty->print_cr("Total removed            %5d", _symbols_removed);
+  if (_symbols_counted > 0) {
     tty->print_cr("Percent removed          %3.2f",
-          ((float)symbols_removed/(float)symbols_counted)* 100);
+          ((float)_symbols_removed/(float)_symbols_counted)* 100);
   }
   tty->print_cr("Reference counts         %5d", Symbol::_total_count);
   tty->print_cr("Symbol arena size        %5d used %5d",
@@ -739,39 +771,38 @@
   return result;
 }
 
-void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
+void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
+  buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), processed, removed);
+}
+
+void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) {
   // Readers of the table are unlocked, so we should only be removing
   // entries at a safepoint.
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  for (int i = 0; i < the_table()->table_size(); ++i) {
-    HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
-    HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
-    while (entry != NULL) {
-      assert(!entry->is_shared(), "CDS not used for the StringTable");
+  const int limit = the_table()->table_size();
 
-      if (is_alive->do_object_b(entry->literal())) {
-        if (f != NULL) {
-          f->do_oop((oop*)entry->literal_addr());
-        }
-        p = entry->next_addr();
-      } else {
-        *p = entry->next();
-        the_table()->free_entry(entry);
-      }
-      entry = *p;
+  for (;;) {
+    // Grab next set of buckets to scan
+    int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
+    if (start_idx >= limit) {
+      // End of table
+      break;
     }
+
+    int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
+    buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, processed, removed);
   }
 }
 
-void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) {
+void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) {
   const int limit = the_table()->table_size();
 
   assert(0 <= start_idx && start_idx <= limit,
-         err_msg("start_idx (" INT32_FORMAT ") oob?", start_idx));
+         err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
   assert(0 <= end_idx && end_idx <= limit,
-         err_msg("end_idx (" INT32_FORMAT ") oob?", end_idx));
+         err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
   assert(start_idx <= end_idx,
-         err_msg("Ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
+         err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
                  start_idx, end_idx));
 
   for (int i = start_idx; i < end_idx; i += 1) {
@@ -786,12 +817,44 @@
   }
 }
 
+void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed) {
+  const int limit = the_table()->table_size();
+
+  assert(0 <= start_idx && start_idx <= limit,
+         err_msg("start_idx (" INT32_FORMAT ") is out of bounds", start_idx));
+  assert(0 <= end_idx && end_idx <= limit,
+         err_msg("end_idx (" INT32_FORMAT ") is out of bounds", end_idx));
+  assert(start_idx <= end_idx,
+         err_msg("Index ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
+                 start_idx, end_idx));
+
+  for (int i = start_idx; i < end_idx; ++i) {
+    HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i);
+    HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
+    while (entry != NULL) {
+      assert(!entry->is_shared(), "CDS not used for the StringTable");
+
+      if (is_alive->do_object_b(entry->literal())) {
+        if (f != NULL) {
+          f->do_oop((oop*)entry->literal_addr());
+        }
+        p = entry->next_addr();
+      } else {
+        *p = entry->next();
+        the_table()->free_entry(entry);
+        (*removed)++;
+      }
+      (*processed)++;
+      entry = *p;
+    }
+  }
+}
+
 void StringTable::oops_do(OopClosure* f) {
-  buckets_do(f, 0, the_table()->table_size());
+  buckets_oops_do(f, 0, the_table()->table_size());
 }
 
 void StringTable::possibly_parallel_oops_do(OopClosure* f) {
-  const int ClaimChunkSize = 32;
   const int limit = the_table()->table_size();
 
   for (;;) {
@@ -803,7 +866,7 @@
     }
 
     int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
-    buckets_do(f, start_idx, end_idx);
+    buckets_oops_do(f, start_idx, end_idx);
   }
 }
 
--- a/src/share/vm/classfile/symbolTable.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/symbolTable.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -86,8 +86,8 @@
   static bool _needs_rehashing;
 
   // For statistics
-  static int symbols_removed;
-  static int symbols_counted;
+  static int _symbols_removed;
+  static int _symbols_counted;
 
   Symbol* allocate_symbol(const u1* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F
 
@@ -121,6 +121,11 @@
   static Arena* arena() { return _arena; }  // called for statistics
 
   static void initialize_symbols(int arena_alloc_size = 0);
+
+  static volatile int _parallel_claimed_idx;
+
+  // Release any dead symbols
+  static void buckets_unlink(int start_idx, int end_idx, int* processed, int* removed, size_t* memory_total);
 public:
   enum {
     symbol_alloc_batch_size = 8,
@@ -177,7 +182,14 @@
                   unsigned int* hashValues, TRAPS);
 
   // Release any dead symbols
-  static void unlink();
+  static void unlink() {
+    int processed = 0;
+    int removed = 0;
+    unlink(&processed, &removed);
+  }
+  static void unlink(int* processed, int* removed);
+  // Release any dead symbols, possibly parallel version
+  static void possibly_parallel_unlink(int* processed, int* removed);
 
   // iterate over symbols
   static void symbols_do(SymbolClosure *cl);
@@ -235,6 +247,9 @@
   // Rehash the symbol table if it gets out of balance
   static void rehash_table();
   static bool needs_rehashing()         { return _needs_rehashing; }
+  // Parallel chunked scanning
+  static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
+  static int parallel_claimed_index()        { return _parallel_claimed_idx; }
 };
 
 class StringTable : public Hashtable<oop, mtSymbol> {
@@ -258,7 +273,10 @@
 
   // Apply the give oop closure to the entries to the buckets
   // in the range [start_idx, end_idx).
-  static void buckets_do(OopClosure* f, int start_idx, int end_idx);
+  static void buckets_oops_do(OopClosure* f, int start_idx, int end_idx);
+  // Unlink or apply the give oop closure to the entries to the buckets
+  // in the range [start_idx, end_idx).
+  static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed);
 
   StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
                               sizeof (HashtableEntry<oop, mtSymbol>)) {}
@@ -280,15 +298,28 @@
 
   // GC support
   //   Delete pointers to otherwise-unreachable objects.
-  static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f);
+  static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f) {
+    int processed = 0;
+    int removed = 0;
+    unlink_or_oops_do(cl, f, &processed, &removed);
+  }
   static void unlink(BoolObjectClosure* cl) {
-    unlink_or_oops_do(cl, NULL);
+    int processed = 0;
+    int removed = 0;
+    unlink_or_oops_do(cl, NULL, &processed, &removed);
   }
-
+  static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed);
+  static void unlink(BoolObjectClosure* cl, int* processed, int* removed) {
+    unlink_or_oops_do(cl, NULL, processed, removed);
+  }
   // Serially invoke "f->do_oop" on the locations of all oops in the table.
   static void oops_do(OopClosure* f);
 
-  // Possibly parallel version of the above
+  // Possibly parallel versions of the above
+  static void possibly_parallel_unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed);
+  static void possibly_parallel_unlink(BoolObjectClosure* cl, int* processed, int* removed) {
+    possibly_parallel_unlink_or_oops_do(cl, NULL, processed, removed);
+  }
   static void possibly_parallel_oops_do(OopClosure* f);
 
   // Hashing algorithm, used as the hash value used by the
@@ -349,5 +380,6 @@
 
   // Parallel chunked scanning
   static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
+  static int parallel_claimed_index() { return _parallel_claimed_idx; }
 };
 #endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
--- a/src/share/vm/classfile/systemDictionary.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/systemDictionary.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1049,6 +1049,9 @@
       add_to_hierarchy(k, CHECK_NULL); // No exception, but can block
 
       // But, do not add to system dictionary.
+
+      // compiled code dependencies need to be validated anyway
+      notice_modification();
     }
 
     // Rewrite and patch constant pool here.
@@ -2647,23 +2650,6 @@
   constraints()->verify(dictionary(), placeholders());
 }
 
-
-void SystemDictionary::verify_obj_klass_present(Symbol* class_name,
-                                                ClassLoaderData* loader_data) {
-  GCMutexLocker mu(SystemDictionary_lock);
-  Symbol* name;
-
-  Klass* probe = find_class(class_name, loader_data);
-  if (probe == NULL) {
-    probe = SystemDictionary::find_shared_class(class_name);
-    if (probe == NULL) {
-      name = find_placeholder(class_name, loader_data);
-    }
-  }
-  guarantee(probe != NULL || name != NULL,
-            "Loaded klasses should be in SystemDictionary");
-}
-
 // utility function for class load event
 void SystemDictionary::post_class_load_event(const Ticks& start_time,
                                              instanceKlassHandle k,
--- a/src/share/vm/classfile/systemDictionary.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/systemDictionary.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -428,10 +428,6 @@
   static bool is_internal_format(Symbol* class_name);
 #endif
 
-  // Verify class is in dictionary
-  static void verify_obj_klass_present(Symbol* class_name,
-                                       ClassLoaderData* loader_data);
-
   // Initialization
   static void initialize(TRAPS);
 
--- a/src/share/vm/classfile/vmSymbols.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/vmSymbols.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/classfile/vmSymbols.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/classfile/vmSymbols.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -738,9 +738,9 @@
   do_intrinsic(_addExactI,                java_lang_Math,         addExact_name, int2_int_signature,             F_S)   \
   do_intrinsic(_addExactL,                java_lang_Math,         addExact_name, long2_long_signature,           F_S)   \
   do_intrinsic(_decrementExactI,          java_lang_Math,         decrementExact_name, int_int_signature,        F_S)   \
-  do_intrinsic(_decrementExactL,          java_lang_Math,         decrementExact_name, long2_long_signature,     F_S)   \
+  do_intrinsic(_decrementExactL,          java_lang_Math,         decrementExact_name, long_long_signature,      F_S)   \
   do_intrinsic(_incrementExactI,          java_lang_Math,         incrementExact_name, int_int_signature,        F_S)   \
-  do_intrinsic(_incrementExactL,          java_lang_Math,         incrementExact_name, long2_long_signature,     F_S)   \
+  do_intrinsic(_incrementExactL,          java_lang_Math,         incrementExact_name, long_long_signature,      F_S)   \
   do_intrinsic(_multiplyExactI,           java_lang_Math,         multiplyExact_name, int2_int_signature,        F_S)   \
   do_intrinsic(_multiplyExactL,           java_lang_Math,         multiplyExact_name, long2_long_signature,      F_S)   \
   do_intrinsic(_negateExactI,             java_lang_Math,         negateExact_name, int_int_signature,           F_S)   \
@@ -871,7 +871,7 @@
    do_intrinsic(_cipherBlockChaining_decryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, decrypt_name, byteArray_int_int_byteArray_int_signature, F_R)   \
    do_name(     encrypt_name,                                      "encrypt")                                           \
    do_name(     decrypt_name,                                      "decrypt")                                           \
-   do_signature(byteArray_int_int_byteArray_int_signature,         "([BII[BI)V")                                        \
+   do_signature(byteArray_int_int_byteArray_int_signature,         "([BII[BI)I")                                        \
                                                                                                                         \
   /* support for java.util.zip */                                                                                       \
   do_class(java_util_zip_CRC32,           "java/util/zip/CRC32")                                                        \
--- a/src/share/vm/code/codeCache.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/codeCache.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -595,21 +595,11 @@
   }
 }
 
-#ifndef PRODUCT
-// used to keep track of how much time is spent in mark_for_deoptimization
-static elapsedTimer dependentCheckTime;
-static int dependentCheckCount = 0;
-#endif // PRODUCT
-
+// Keeps track of time spent for checking dependencies
+NOT_PRODUCT(static elapsedTimer dependentCheckTime;)
 
 int CodeCache::mark_for_deoptimization(DepChange& changes) {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-
-#ifndef PRODUCT
-  dependentCheckTime.start();
-  dependentCheckCount++;
-#endif // PRODUCT
-
   int number_of_marked_CodeBlobs = 0;
 
   // search the hierarchy looking for nmethods which are affected by the loading of this class
@@ -617,32 +607,23 @@
   // then search the interfaces this class implements looking for nmethods
   // which might be dependent of the fact that an interface only had one
   // implementor.
-
-  { No_Safepoint_Verifier nsv;
-    for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
-      Klass* d = str.klass();
-      number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
-    }
-  }
-
-  if (VerifyDependencies) {
-    // Turn off dependency tracing while actually testing deps.
-    NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
-    FOR_ALL_ALIVE_NMETHODS(nm) {
-      if (!nm->is_marked_for_deoptimization() &&
-          nm->check_all_dependencies()) {
-        ResourceMark rm;
-        tty->print_cr("Should have been marked for deoptimization:");
-        changes.print();
-        nm->print();
-        nm->print_dependencies();
-      }
-    }
+  // nmethod::check_all_dependencies works only correctly, if no safepoint
+  // can happen
+  No_Safepoint_Verifier nsv;
+  for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
+    Klass* d = str.klass();
+    number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
   }
 
 #ifndef PRODUCT
-  dependentCheckTime.stop();
-#endif // PRODUCT
+  if (VerifyDependencies) {
+    // Object pointers are used as unique identifiers for dependency arguments. This
+    // is only possible if no safepoint, i.e., GC occurs during the verification code.
+    dependentCheckTime.start();
+    nmethod::check_all_dependencies(changes);
+    dependentCheckTime.stop();
+  }
+#endif
 
   return number_of_marked_CodeBlobs;
 }
@@ -899,9 +880,7 @@
   }
 
   tty->print_cr("CodeCache:");
-
-  tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
-                dependentCheckTime.seconds() / dependentCheckCount);
+  tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds());
 
   if (!live.is_empty()) {
     live.print("live");
--- a/src/share/vm/code/compiledIC.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/compiledIC.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/compressedStream.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/compressedStream.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/debugInfo.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/debugInfo.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/dependencies.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/dependencies.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -867,8 +867,6 @@
   } else {
     o = _deps->oop_recorder()->metadata_at(i);
   }
-  assert(o == NULL || o->is_metaspace_object(),
-         err_msg("Should be metadata " PTR_FORMAT, o));
   return o;
 }
 
@@ -892,6 +890,17 @@
   return result;
 }
 
+/**
+ * Returns a unique identifier for each dependency argument.
+ */
+uintptr_t Dependencies::DepStream::get_identifier(int i) {
+  if (has_oop_argument()) {
+    return (uintptr_t)(oopDesc*)argument_oop(i);
+  } else {
+    return (uintptr_t)argument(i);
+  }
+}
+
 oop Dependencies::DepStream::argument_oop(int i) {
   oop result = recorded_oop_at(argument_index(i));
   assert(result == NULL || result->is_oop(), "must be");
@@ -927,6 +936,20 @@
   return NULL;
 }
 
+// ----------------- DependencySignature --------------------------------------
+bool DependencySignature::equals(DependencySignature* sig) const {
+  if ((type() != sig->type()) || (args_count() != sig->args_count())) {
+    return false;
+  }
+
+  for (int i = 0; i < sig->args_count(); i++) {
+    if (arg(i) != sig->arg(i)) {
+      return false;
+    }
+  }
+  return true;
+}
+
 /// Checking dependencies:
 
 // This hierarchy walker inspects subtypes of a given type,
@@ -1373,11 +1396,9 @@
 
   // We could also return false if m does not yet appear to be
   // executed, if the VM version supports this distinction also.
+  // Default methods are considered "concrete" as well.
   return !m->is_abstract() &&
-         !InstanceKlass::cast(m->method_holder())->is_interface();
-         // TODO: investigate whether default methods should be
-         // considered as "concrete" in this situation.  For now they
-         // are not.
+         !m->is_overpass(); // error functions aren't concrete
 }
 
 
--- a/src/share/vm/code/dependencies.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/dependencies.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -32,6 +32,7 @@
 #include "code/compressedStream.hpp"
 #include "code/nmethod.hpp"
 #include "utilities/growableArray.hpp"
+#include "utilities/hashtable.hpp"
 
 //** Dependencies represent assertions (approximate invariants) within
 // the runtime system, e.g. class hierarchy changes.  An example is an
@@ -570,6 +571,9 @@
     bool next();
 
     DepType type()               { return _type; }
+    bool has_oop_argument()      { return type() == call_site_target_value; }
+    uintptr_t get_identifier(int i);
+
     int argument_count()         { return dep_args(type()); }
     int argument_index(int i)    { assert(0 <= i && i < argument_count(), "oob");
                                    return _xi[i]; }
@@ -613,6 +617,30 @@
 };
 
 
+class DependencySignature : public GenericHashtableEntry<DependencySignature, ResourceObj> {
+ private:
+  int                   _args_count;
+  uintptr_t             _argument_hash[Dependencies::max_arg_count];
+  Dependencies::DepType _type;
+
+ public:
+  DependencySignature(Dependencies::DepStream& dep) {
+    _args_count = dep.argument_count();
+    _type = dep.type();
+    for (int i = 0; i < _args_count; i++) {
+      _argument_hash[i] = dep.get_identifier(i);
+    }
+  }
+
+  bool equals(DependencySignature* sig) const;
+  uintptr_t key() const { return _argument_hash[0] >> 2; }
+
+  int args_count()             const { return _args_count; }
+  uintptr_t arg(int idx)       const { return _argument_hash[idx]; }
+  Dependencies::DepType type() const { return _type; }
+};
+
+
 // Every particular DepChange is a sub-class of this class.
 class DepChange : public StackObj {
  public:
--- a/src/share/vm/code/icBuffer.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/icBuffer.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/nmethod.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/nmethod.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -53,27 +53,6 @@
 
 // Only bother with this argument setup if dtrace is available
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
-  const char*, int, const char*, int, const char*, int, void*, size_t);
-
-HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
-  char*, int, char*, int, char*, int);
-
-#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
-  {                                                                       \
-    Method* m = (method);                                                 \
-    if (m != NULL) {                                                      \
-      Symbol* klass_name = m->klass_name();                               \
-      Symbol* name = m->name();                                           \
-      Symbol* signature = m->signature();                                 \
-      HS_DTRACE_PROBE6(hotspot, compiled__method__unload,                 \
-        klass_name->bytes(), klass_name->utf8_length(),                   \
-        name->bytes(), name->utf8_length(),                               \
-        signature->bytes(), signature->utf8_length());                    \
-    }                                                                     \
-  }
-#else /* USDT2 */
 #define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
   {                                                                       \
     Method* m = (method);                                                 \
@@ -87,7 +66,6 @@
         (char *) signature->bytes(), signature->utf8_length());                    \
     }                                                                     \
   }
-#endif /* USDT2 */
 
 #else //  ndef DTRACE_ENABLED
 
@@ -675,7 +653,7 @@
         InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
       }
       NOT_PRODUCT(if (nm != NULL)  note_java_nmethod(nm));
-      if (PrintAssembly) {
+      if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) {
         Disassembler::decode(nm);
       }
     }
@@ -1622,16 +1600,6 @@
 void nmethod::post_compiled_method_load_event() {
 
   Method* moop = method();
-#ifndef USDT2
-  HS_DTRACE_PROBE8(hotspot, compiled__method__load,
-      moop->klass_name()->bytes(),
-      moop->klass_name()->utf8_length(),
-      moop->name()->bytes(),
-      moop->name()->utf8_length(),
-      moop->signature()->bytes(),
-      moop->signature()->utf8_length(),
-      insts_begin(), insts_size());
-#else /* USDT2 */
   HOTSPOT_COMPILED_METHOD_LOAD(
       (char *) moop->klass_name()->bytes(),
       moop->klass_name()->utf8_length(),
@@ -1640,7 +1608,6 @@
       (char *) moop->signature()->bytes(),
       moop->signature()->utf8_length(),
       insts_begin(), insts_size());
-#endif /* USDT2 */
 
   if (JvmtiExport::should_post_compiled_method_load() ||
       JvmtiExport::should_post_compiled_method_unload()) {
@@ -2293,16 +2260,37 @@
 }
 
 
-bool nmethod::check_all_dependencies() {
-  bool found_check = false;
-  // wholesale check of all dependencies
-  for (Dependencies::DepStream deps(this); deps.next(); ) {
-    if (deps.check_dependency() != NULL) {
-      found_check = true;
-      NOT_DEBUG(break);
+void nmethod::check_all_dependencies(DepChange& changes) {
+  // Checked dependencies are allocated into this ResourceMark
+  ResourceMark rm;
+
+  // Turn off dependency tracing while actually testing dependencies.
+  NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
+
+ GenericHashtable<DependencySignature, ResourceObj>* table = new GenericHashtable<DependencySignature, ResourceObj>(11027);
+  // Iterate over live nmethods and check dependencies of all nmethods that are not
+  // marked for deoptimization. A particular dependency is only checked once.
+  for(nmethod* nm = CodeCache::alive_nmethod(CodeCache::first()); nm != NULL; nm = CodeCache::alive_nmethod(CodeCache::next(nm))) {
+    if (!nm->is_marked_for_deoptimization()) {
+      for (Dependencies::DepStream deps(nm); deps.next(); ) {
+        // Construct abstraction of a dependency.
+        DependencySignature* current_sig = new DependencySignature(deps);
+        // Determine if 'deps' is already checked. table->add() returns
+        // 'true' if the dependency was added (i.e., was not in the hashtable).
+        if (table->add(current_sig)) {
+          if (deps.check_dependency() != NULL) {
+            // Dependency checking failed. Print out information about the failed
+            // dependency and finally fail with an assert. We can fail here, since
+            // dependency checking is never done in a product build.
+            changes.print();
+            nm->print();
+            nm->print_dependencies();
+            assert(false, "Should have been marked for deoptimization");
+          }
+        }
+      }
     }
   }
-  return found_check;  // tell caller if we found anything
 }
 
 bool nmethod::check_dependency_on(DepChange& changes) {
--- a/src/share/vm/code/nmethod.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/nmethod.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -710,7 +710,7 @@
 
   // tells if any of this method's dependencies have been invalidated
   // (this is expensive!)
-  bool check_all_dependencies();
+  static void check_all_dependencies(DepChange& changes);
 
   // tells if this compiled method is dependent on the given changes,
   // and the changes have invalidated it
--- a/src/share/vm/code/relocInfo.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/relocInfo.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -582,6 +582,18 @@
   _static_call = address_from_scaled_offset(unpack_1_int(), base);
 }
 
+void trampoline_stub_Relocation::pack_data_to(CodeSection* dest ) {
+  short* p = (short*) dest->locs_end();
+  CodeSection* insts = dest->outer()->insts();
+  normalize_address(_owner, insts);
+  p = pack_1_int_to(p, scaled_offset(_owner, insts->start()));
+  dest->set_locs_end((relocInfo*) p);
+}
+
+void trampoline_stub_Relocation::unpack_data() {
+  address base = binding()->section_start(CodeBuffer::SECT_INSTS);
+  _owner = address_from_scaled_offset(unpack_1_int(), base);
+}
 
 void external_word_Relocation::pack_data_to(CodeSection* dest) {
   short* p = (short*) dest->locs_end();
@@ -811,6 +823,25 @@
   return NULL;
 }
 
+// Finds the trampoline address for a call. If no trampoline stub is
+// found NULL is returned which can be handled by the caller.
+address trampoline_stub_Relocation::get_trampoline_for(address call, nmethod* code) {
+  // There are no relocations available when the code gets relocated
+  // because of CodeBuffer expansion.
+  if (code->relocation_size() == 0)
+    return NULL;
+
+  RelocIterator iter(code, call);
+  while (iter.next()) {
+    if (iter.type() == relocInfo::trampoline_stub_type) {
+      if (iter.trampoline_stub_reloc()->owner() == call) {
+        return iter.addr();
+      }
+    }
+  }
+
+  return NULL;
+}
 
 void static_stub_Relocation::clear_inline_cache() {
   // Call stub is only used when calling the interpreted code.
@@ -975,6 +1006,12 @@
       tty->print(" | [static_call=" INTPTR_FORMAT "]", r->static_call());
       break;
     }
+  case relocInfo::trampoline_stub_type:
+    {
+      trampoline_stub_Relocation* r = (trampoline_stub_Relocation*) reloc();
+      tty->print(" | [trampoline owner=" INTPTR_FORMAT "]", r->owner());
+      break;
+    }
   }
   tty->cr();
 }
--- a/src/share/vm/code/relocInfo.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/relocInfo.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -265,8 +265,8 @@
     poll_type               = 10, // polling instruction for safepoints
     poll_return_type        = 11, // polling instruction for safepoints at return
     metadata_type           = 12, // metadata that used to be oops
-    yet_unused_type_1       = 13, // Still unused
-    yet_unused_type_2       = 14, // Still unused
+    trampoline_stub_type    = 13, // stub-entry for trampoline
+    yet_unused_type_1       = 14, // Still unused
     data_prefix_tag         = 15, // tag for a prefix (carries data arguments)
     type_mask               = 15  // A mask which selects only the above values
   };
@@ -306,6 +306,7 @@
     visitor(poll) \
     visitor(poll_return) \
     visitor(section_word) \
+    visitor(trampoline_stub) \
 
 
  public:
@@ -464,7 +465,7 @@
   return relocInfo(relocInfo::none, relocInfo::offset_limit() - relocInfo::offset_unit);
 }
 
-inline relocInfo prefix_relocInfo(int datalen) {
+inline relocInfo prefix_relocInfo(int datalen = 0) {
   assert(relocInfo::fits_into_immediate(datalen), "datalen in limits");
   return relocInfo(relocInfo::data_prefix_tag, relocInfo::RAW_BITS, relocInfo::datalen_tag | datalen);
 }
@@ -1155,6 +1156,43 @@
  public:
 };
 
+// Trampoline Relocations.
+// A trampoline allows to encode a small branch in the code, even if there
+// is the chance that this branch can not reach all possible code locations.
+// If the relocation finds that a branch is too far for the instruction
+// in the code, it can patch it to jump to the trampoline where is
+// sufficient space for a far branch. Needed on PPC.
+class trampoline_stub_Relocation : public Relocation {
+  relocInfo::relocType type() { return relocInfo::trampoline_stub_type; }
+
+ public:
+  static RelocationHolder spec(address static_call) {
+    RelocationHolder rh = newHolder();
+    return (new (rh) trampoline_stub_Relocation(static_call));
+  }
+
+ private:
+  address _owner;    // Address of the NativeCall that owns the trampoline.
+
+  trampoline_stub_Relocation(address owner) {
+    _owner = owner;
+  }
+
+  friend class RelocIterator;
+  trampoline_stub_Relocation() { }
+
+ public:
+
+  // Return the address of the NativeCall that owns the trampoline.
+  address owner() { return _owner; }
+
+  void pack_data_to(CodeSection * dest);
+  void unpack_data();
+
+  // Find the trampoline stub for a call.
+  static address get_trampoline_for(address call, nmethod* code);
+};
+
 class external_word_Relocation : public DataRelocation {
   relocInfo::relocType type() { return relocInfo::external_word_type; }
 
--- a/src/share/vm/code/scopeDesc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/scopeDesc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -87,8 +87,6 @@
 
   // Tells whether sender() returns NULL
   bool is_top() const;
-  // Tells whether sd is equal to this
-  bool is_equal(ScopeDesc* sd) const;
 
  private:
   // Alternative constructor
--- a/src/share/vm/code/stubs.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/stubs.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/code/stubs.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/stubs.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
--- a/src/share/vm/code/vmreg.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/vmreg.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -47,8 +47,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/adGlobals_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/adGlobals_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/adGlobals_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/adGlobals_ppc_64.hpp"
 #endif
 #endif
 
@@ -70,7 +73,7 @@
 // friend class Location;
 private:
   enum {
-    BAD = -1
+    BAD_REG = -1
   };
 
 
@@ -83,7 +86,7 @@
 
 public:
 
-  static VMReg  as_VMReg(int val, bool bad_ok = false) { assert(val > BAD || bad_ok, "invalid"); return (VMReg) (intptr_t) val; }
+  static VMReg  as_VMReg(int val, bool bad_ok = false) { assert(val > BAD_REG || bad_ok, "invalid"); return (VMReg) (intptr_t) val; }
 
   const char*  name() {
     if (is_reg()) {
@@ -95,8 +98,8 @@
       return "STACKED REG";
     }
   }
-  static VMReg Bad() { return (VMReg) (intptr_t) BAD; }
-  bool is_valid() const { return ((intptr_t) this) != BAD; }
+  static VMReg Bad() { return (VMReg) (intptr_t) BAD_REG; }
+  bool is_valid() const { return ((intptr_t) this) != BAD_REG; }
   bool is_stack() const { return (intptr_t) this >= (intptr_t) stack0; }
   bool is_reg()   const { return is_valid() && !is_stack(); }
 
--- a/src/share/vm/code/vtableStubs.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/vtableStubs.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -55,6 +55,9 @@
   const int chunk_factor = 32;
   if (_chunk == NULL || _chunk + real_size > _chunk_end) {
     const int bytes = chunk_factor * real_size + pd_code_alignment();
+
+   // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
+   // If changing the name, update the other file accordingly.
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
       return NULL;
@@ -62,12 +65,6 @@
     _chunk = blob->content_begin();
     _chunk_end = _chunk + bytes;
     Forte::register_stub("vtable stub", _chunk, _chunk_end);
-    // Notify JVMTI about this stub. The event will be recorded by the enclosing
-    // JvmtiDynamicCodeEventCollector and posted when this thread has released
-    // all locks.
-    if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
-    }
     align_chunk();
   }
   assert(_chunk + real_size <= _chunk_end, "bad allocation");
@@ -130,6 +127,13 @@
                     is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location());
       Disassembler::decode(s->code_begin(), s->code_end());
     }
+    // Notify JVMTI about this stub. The event will be recorded by the enclosing
+    // JvmtiDynamicCodeEventCollector and posted when this thread has released
+    // all locks.
+    if (JvmtiExport::should_post_dynamic_code_generated()) {
+      JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
+                                                                   s->code_begin(), s->code_end());
+    }
   }
   return s->entry_point();
 }
@@ -195,6 +199,14 @@
   VtableStubs::initialize();
 }
 
+void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
+    for (int i = 0; i < N; i++) {
+        for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
+            f(s);
+        }
+    }
+}
+
 
 //-----------------------------------------------------------------------------------------------------
 // Non-product code
--- a/src/share/vm/code/vtableStubs.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/code/vtableStubs.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -131,6 +131,7 @@
   static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
   static int         number_of_vtable_stubs() { return _number_of_vtable_stubs; }
   static void        initialize();
+  static void        vtable_stub_do(void f(VtableStub*));            // iterates over all vtable stubs
 };
 
 #endif // SHARE_VM_CODE_VTABLESTUBS_HPP
--- a/src/share/vm/compiler/abstractCompiler.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/compiler/abstractCompiler.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/abstractCompiler.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/compiler/abstractCompiler.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,10 +78,14 @@
   // This thread will initialize the compiler runtime.
   bool should_perform_init();
 
-  // The (closed set) of concrete compiler classes. Using an tag like this
-  // avoids a confusing use of macros around the definition of the
-  // 'is_<compiler type>' methods.
-  enum Type { c1, c2, shark, graal };
+  // The (closed set) of concrete compiler classes.
+  enum Type {
+    none,
+    c1,
+    c2,
+    shark,
+    graal
+  };
 
  private:
   Type _type;
@@ -106,8 +110,10 @@
   virtual bool supports_native()                 { return true; }
   virtual bool supports_osr   ()                 { return true; }
   virtual bool can_compile_method(methodHandle method)  { return true; }
-  bool is_c1   ()                                { return _type == c1; }
-  bool is_c2   ()                                { return _type == c2; }
+
+  // Compiler type queries.
+  bool is_c1()                                   { return _type == c1; }
+  bool is_c2()                                   { return _type == c2; }
   bool is_shark()                                { return _type == shark; }
   bool is_graal()                                { return _type == graal; }
 
--- a/src/share/vm/compiler/compileBroker.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/compiler/compileBroker.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -63,45 +63,13 @@
 
 // Only bother with this argument setup if dtrace is available
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL8(hotspot, method__compile__begin,
-  char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t);
-HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
-  char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool);
-
-#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)             \
-  {                                                                      \
-    Symbol* klass_name = (method)->klass_name();                         \
-    Symbol* name = (method)->name();                                     \
-    Symbol* signature = (method)->signature();                           \
-    HS_DTRACE_PROBE8(hotspot, method__compile__begin,                    \
-      comp_name, strlen(comp_name),                                      \
-      klass_name->bytes(), klass_name->utf8_length(),                    \
-      name->bytes(), name->utf8_length(),                                \
-      signature->bytes(), signature->utf8_length());                     \
-  }
-
-#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)      \
-  {                                                                      \
-    Symbol* klass_name = (method)->klass_name();                         \
-    Symbol* name = (method)->name();                                     \
-    Symbol* signature = (method)->signature();                           \
-    HS_DTRACE_PROBE9(hotspot, method__compile__end,                      \
-      comp_name, strlen(comp_name),                                      \
-      klass_name->bytes(), klass_name->utf8_length(),                    \
-      name->bytes(), name->utf8_length(),                                \
-      signature->bytes(), signature->utf8_length(), (success));          \
-  }
-
-#else /* USDT2 */
-
 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)             \
   {                                                                      \
     Symbol* klass_name = (method)->klass_name();                         \
     Symbol* name = (method)->name();                                     \
     Symbol* signature = (method)->signature();                           \
     HOTSPOT_METHOD_COMPILE_BEGIN(                                        \
-      comp_name, strlen(comp_name),                                      \
+      (char *) comp_name, strlen(comp_name),                             \
       (char *) klass_name->bytes(), klass_name->utf8_length(),           \
       (char *) name->bytes(), name->utf8_length(),                       \
       (char *) signature->bytes(), signature->utf8_length());            \
@@ -113,12 +81,11 @@
     Symbol* name = (method)->name();                                     \
     Symbol* signature = (method)->signature();                           \
     HOTSPOT_METHOD_COMPILE_END(                                          \
-      comp_name, strlen(comp_name),                                      \
+      (char *) comp_name, strlen(comp_name),                             \
       (char *) klass_name->bytes(), klass_name->utf8_length(),           \
       (char *) name->bytes(), name->utf8_length(),                       \
       (char *) signature->bytes(), signature->utf8_length(), (success)); \
   }
-#endif /* USDT2 */
 
 #else //  ndef DTRACE_ENABLED
 
@@ -135,9 +102,9 @@
 // The installed compiler(s)
 AbstractCompiler* CompileBroker::_compilers[2];
 
-// These counters are used for assigning id's to each compilation
-uint CompileBroker::_compilation_id        = 0;
-uint CompileBroker::_osr_compilation_id    = 0;
+// These counters are used to assign an unique ID to each compilation.
+volatile jint CompileBroker::_compilation_id     = 0;
+volatile jint CompileBroker::_osr_compilation_id = 0;
 
 // Debugging information
 int  CompileBroker::_last_compile_type     = no_compile;
@@ -966,7 +933,7 @@
 
     if (compiler_thread == NULL || compiler_thread->osthread() == NULL){
       vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    "unable to create new native thread");
+                                    os::native_thread_creation_failed_msg());
     }
 
     java_lang_Thread::set_thread(thread_oop(), compiler_thread);
@@ -1188,7 +1155,7 @@
     // We now know that this compilation is not pending, complete,
     // or prohibited.  Assign a compile_id to this compilation
     // and check to see if it is in our [Start..Stop) range.
-    uint compile_id = assign_compile_id(method, osr_bci);
+    int compile_id = assign_compile_id(method, osr_bci);
     if (compile_id == 0) {
       // The compilation falls outside the allowed range.
       return;
@@ -1335,18 +1302,12 @@
   // do the compilation
   if (method->is_native()) {
     if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) {
-      // Acquire our lock.
-      int compile_id;
-      {
-        MutexLocker locker(MethodCompileQueue_lock, THREAD);
-        compile_id = assign_compile_id(method, standard_entry_bci);
-      }
       // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that
       // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime).
       //
       // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter
       // in this case.  If we can't generate one and use it we can not execute the out-of-line method handle calls.
-      (void) AdapterHandlerLibrary::create_native_wrapper(method, compile_id);
+      AdapterHandlerLibrary::create_native_wrapper(method);
     } else {
       return NULL;
     }
@@ -1449,27 +1410,28 @@
   return false;
 }
 
-
-// ------------------------------------------------------------------
-// CompileBroker::assign_compile_id
-//
-// Assign a serialized id number to this compilation request.  If the
-// number falls out of the allowed range, return a 0.  OSR
-// compilations may be numbered separately from regular compilations
-// if certain debugging flags are used.
-uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
-  assert(MethodCompileQueue_lock->owner() == Thread::current(),
-         "must hold the compilation queue lock");
+/**
+ * Generate serialized IDs for compilation requests. If certain debugging flags are used
+ * and the ID is not within the specified range, the method is not compiled and 0 is returned.
+ * The function also allows to generate separate compilation IDs for OSR compilations.
+ */
+int CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
+#ifdef ASSERT
   bool is_osr = (osr_bci != standard_entry_bci);
-  uint id;
-  if (CICountOSR && is_osr) {
-    id = ++_osr_compilation_id;
-    if ((uint)CIStartOSR <= id && id < (uint)CIStopOSR) {
+  int id;
+  if (method->is_native()) {
+    assert(!is_osr, "can't be osr");
+    // Adapters, native wrappers and method handle intrinsics
+    // should be generated always.
+    return Atomic::add(1, &_compilation_id);
+  } else if (CICountOSR && is_osr) {
+    id = Atomic::add(1, &_osr_compilation_id);
+    if (CIStartOSR <= id && id < CIStopOSR) {
       return id;
     }
   } else {
-    id = ++_compilation_id;
-    if ((uint)CIStart <= id && id < (uint)CIStop) {
+    id = Atomic::add(1, &_compilation_id);
+    if (CIStart <= id && id < CIStop) {
       return id;
     }
   }
@@ -1477,6 +1439,11 @@
   // Method was not in the appropriate compilation range.
   method->set_not_compilable_quietly();
   return 0;
+#else
+  // CICountOSR is a develop flag and set to 'false' by default. In a product built,
+  // only _compilation_id is incremented.
+  return Atomic::add(1, &_compilation_id);
+#endif
 }
 
 
--- a/src/share/vm/compiler/compileBroker.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/compiler/compileBroker.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -246,6 +246,8 @@
 
   // Compile type Information for print_last_compile() and CompilerCounters
   enum { no_compile, normal_compile, osr_compile, native_compile };
+  static int assign_compile_id (methodHandle method, int osr_bci);
+
 
  private:
   static bool _initialized;
@@ -258,9 +260,8 @@
   static AbstractCompiler* _compilers[2];
 
   // These counters are used for assigning id's to each compilation
-  static uint _compilation_id;
-  static uint _osr_compilation_id;
-  static uint _native_compilation_id;
+  static volatile jint _compilation_id;
+  static volatile jint _osr_compilation_id;
 
   static int  _last_compile_type;
   static int  _last_compile_level;
@@ -321,7 +322,6 @@
   static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
   static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
-  static uint assign_compile_id        (methodHandle method, int osr_bci);
   static bool is_compile_blocking      (methodHandle method, int osr_bci);
   static void preload_classes          (methodHandle method, TRAPS);
 
--- a/src/share/vm/compiler/compileLog.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/compiler/compileLog.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/compileLog.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/compiler/compileLog.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/compilerOracle.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/compiler/compilerOracle.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/compiler/disassembler.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/compiler/disassembler.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,163 +1,163 @@
-/*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/javaClasses.hpp"
-#include "code/codeCache.hpp"
-#include "compiler/disassembler.hpp"
-#include "gc_interface/collectedHeap.hpp"
-#include "memory/cardTableModRefBS.hpp"
-#include "runtime/fprofiler.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/stubCodeGenerator.hpp"
-#include "runtime/stubRoutines.hpp"
-#ifdef TARGET_ARCH_x86
-# include "depChecker_x86.hpp"
-#endif
-#ifdef TARGET_ARCH_sparc
-# include "depChecker_sparc.hpp"
-#endif
-#ifdef TARGET_ARCH_zero
-# include "depChecker_zero.hpp"
-#endif
-#ifdef TARGET_ARCH_arm
-# include "depChecker_arm.hpp"
-#endif
-#ifdef TARGET_ARCH_ppc
-# include "depChecker_ppc.hpp"
-#endif
-#ifdef SHARK
-#include "shark/sharkEntry.hpp"
-#endif
-
-void*       Disassembler::_library               = NULL;
-bool        Disassembler::_tried_to_load_library = false;
-
-// This routine is in the shared library:
-Disassembler::decode_func_virtual Disassembler::_decode_instructions_virtual = NULL;
-Disassembler::decode_func Disassembler::_decode_instructions = NULL;
-
-static const char hsdis_library_name[] = "hsdis-"HOTSPOT_LIB_ARCH;
-static const char decode_instructions_virtual_name[] = "decode_instructions_virtual";
-static const char decode_instructions_name[] = "decode_instructions";
-static bool use_new_version = true;
-#define COMMENT_COLUMN  40 LP64_ONLY(+8) /*could be an option*/
-#define BYTES_COMMENT   ";..."  /* funky byte display comment */
-
-bool Disassembler::load_library() {
-  if (_decode_instructions_virtual != NULL || _decode_instructions != NULL) {
-    // Already succeeded.
-    return true;
-  }
-  if (_tried_to_load_library) {
-    // Do not try twice.
-    // To force retry in debugger: assign _tried_to_load_library=0
-    return false;
-  }
-  // Try to load it.
-  char ebuf[1024];
-  char buf[JVM_MAXPATHLEN];
-  os::jvm_path(buf, sizeof(buf));
-  int jvm_offset = -1;
-  int lib_offset = -1;
-  {
-    // Match "jvm[^/]*" in jvm_path.
-    const char* base = buf;
-    const char* p = strrchr(buf, '/');
-    if (p != NULL) lib_offset = p - base + 1;
-    p = strstr(p ? p : base, "jvm");
-    if (p != NULL)  jvm_offset = p - base;
-  }
-  // Find the disassembler shared library.
-  // Search for several paths derived from libjvm, in this order:
-  // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so  (for compatibility)
-  // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
-  // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
-  // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
-  if (jvm_offset >= 0) {
-    // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so
-    strcpy(&buf[jvm_offset], hsdis_library_name);
-    strcat(&buf[jvm_offset], os::dll_file_extension());
-    _library = os::dll_load(buf, ebuf, sizeof ebuf);
-    if (_library == NULL) {
-      // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
-      strcpy(&buf[lib_offset], hsdis_library_name);
-      strcat(&buf[lib_offset], os::dll_file_extension());
-      _library = os::dll_load(buf, ebuf, sizeof ebuf);
-    }
-    if (_library == NULL) {
-      // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
-      buf[lib_offset - 1] = '\0';
-      const char* p = strrchr(buf, '/');
-      if (p != NULL) {
-        lib_offset = p - buf + 1;
-        strcpy(&buf[lib_offset], hsdis_library_name);
-        strcat(&buf[lib_offset], os::dll_file_extension());
-        _library = os::dll_load(buf, ebuf, sizeof ebuf);
-      }
-    }
-  }
-  if (_library == NULL) {
-    // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
-    strcpy(&buf[0], hsdis_library_name);
-    strcat(&buf[0], os::dll_file_extension());
-    _library = os::dll_load(buf, ebuf, sizeof ebuf);
-  }
-  if (_library != NULL) {
-    _decode_instructions_virtual = CAST_TO_FN_PTR(Disassembler::decode_func_virtual,
-                                          os::dll_lookup(_library, decode_instructions_virtual_name));
-  }
-  if (_decode_instructions_virtual == NULL) {
-    // could not spot in new version, try old version
-    _decode_instructions = CAST_TO_FN_PTR(Disassembler::decode_func,
-                                          os::dll_lookup(_library, decode_instructions_name));
-    use_new_version = false;
-  } else {
-    use_new_version = true;
-  }
-  _tried_to_load_library = true;
-  if (_decode_instructions_virtual == NULL && _decode_instructions == NULL) {
-    tty->print_cr("Could not load %s; %s; %s", buf,
-                  ((_library != NULL)
-                   ? "entry point is missing"
-                   : (WizardMode || PrintMiscellaneous)
-                   ? (const char*)ebuf
-                   : "library not loadable"),
-                  "PrintAssembly is disabled");
-    return false;
-  }
-
-  // Success.
-  tty->print_cr("Loaded disassembler from %s", buf);
-  return true;
-}
-
-
-class decode_env {
- private:
-  nmethod*      _nm;
-  CodeBlob*     _code;
+/*
+ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
+#include "code/codeCache.hpp"
+#include "compiler/disassembler.hpp"
+#include "gc_interface/collectedHeap.hpp"
+#include "memory/cardTableModRefBS.hpp"
+#include "runtime/fprofiler.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/stubCodeGenerator.hpp"
+#include "runtime/stubRoutines.hpp"
+#ifdef TARGET_ARCH_x86
+# include "depChecker_x86.hpp"
+#endif
+#ifdef TARGET_ARCH_sparc
+# include "depChecker_sparc.hpp"
+#endif
+#ifdef TARGET_ARCH_zero
+# include "depChecker_zero.hpp"
+#endif
+#ifdef TARGET_ARCH_arm
+# include "depChecker_arm.hpp"
+#endif
+#ifdef TARGET_ARCH_ppc
+# include "depChecker_ppc.hpp"
+#endif
+#ifdef SHARK
+#include "shark/sharkEntry.hpp"
+#endif
+
+void*       Disassembler::_library               = NULL;
+bool        Disassembler::_tried_to_load_library = false;
+
+// This routine is in the shared library:
+Disassembler::decode_func_virtual Disassembler::_decode_instructions_virtual = NULL;
+Disassembler::decode_func Disassembler::_decode_instructions = NULL;
+
+static const char hsdis_library_name[] = "hsdis-"HOTSPOT_LIB_ARCH;
+static const char decode_instructions_virtual_name[] = "decode_instructions_virtual";
+static const char decode_instructions_name[] = "decode_instructions";
+static bool use_new_version = true;
+#define COMMENT_COLUMN  40 LP64_ONLY(+8) /*could be an option*/
+#define BYTES_COMMENT   ";..."  /* funky byte display comment */
+
+bool Disassembler::load_library() {
+  if (_decode_instructions_virtual != NULL || _decode_instructions != NULL) {
+    // Already succeeded.
+    return true;
+  }
+  if (_tried_to_load_library) {
+    // Do not try twice.
+    // To force retry in debugger: assign _tried_to_load_library=0
+    return false;
+  }
+  // Try to load it.
+  char ebuf[1024];
+  char buf[JVM_MAXPATHLEN];
+  os::jvm_path(buf, sizeof(buf));
+  int jvm_offset = -1;
+  int lib_offset = -1;
+  {
+    // Match "jvm[^/]*" in jvm_path.
+    const char* base = buf;
+    const char* p = strrchr(buf, '/');
+    if (p != NULL) lib_offset = p - base + 1;
+    p = strstr(p ? p : base, "jvm");
+    if (p != NULL)  jvm_offset = p - base;
+  }
+  // Find the disassembler shared library.
+  // Search for several paths derived from libjvm, in this order:
+  // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so  (for compatibility)
+  // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
+  // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
+  // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
+  if (jvm_offset >= 0) {
+    // 1. <home>/jre/lib/<arch>/<vm>/libhsdis-<arch>.so
+    strcpy(&buf[jvm_offset], hsdis_library_name);
+    strcat(&buf[jvm_offset], os::dll_file_extension());
+    _library = os::dll_load(buf, ebuf, sizeof ebuf);
+    if (_library == NULL) {
+      // 2. <home>/jre/lib/<arch>/<vm>/hsdis-<arch>.so
+      strcpy(&buf[lib_offset], hsdis_library_name);
+      strcat(&buf[lib_offset], os::dll_file_extension());
+      _library = os::dll_load(buf, ebuf, sizeof ebuf);
+    }
+    if (_library == NULL) {
+      // 3. <home>/jre/lib/<arch>/hsdis-<arch>.so
+      buf[lib_offset - 1] = '\0';
+      const char* p = strrchr(buf, '/');
+      if (p != NULL) {
+        lib_offset = p - buf + 1;
+        strcpy(&buf[lib_offset], hsdis_library_name);
+        strcat(&buf[lib_offset], os::dll_file_extension());
+        _library = os::dll_load(buf, ebuf, sizeof ebuf);
+      }
+    }
+  }
+  if (_library == NULL) {
+    // 4. hsdis-<arch>.so  (using LD_LIBRARY_PATH)
+    strcpy(&buf[0], hsdis_library_name);
+    strcat(&buf[0], os::dll_file_extension());
+    _library = os::dll_load(buf, ebuf, sizeof ebuf);
+  }
+  if (_library != NULL) {
+    _decode_instructions_virtual = CAST_TO_FN_PTR(Disassembler::decode_func_virtual,
+                                          os::dll_lookup(_library, decode_instructions_virtual_name));
+  }
+  if (_decode_instructions_virtual == NULL) {
+    // could not spot in new version, try old version
+    _decode_instructions = CAST_TO_FN_PTR(Disassembler::decode_func,
+                                          os::dll_lookup(_library, decode_instructions_name));
+    use_new_version = false;
+  } else {
+    use_new_version = true;
+  }
+  _tried_to_load_library = true;
+  if (_decode_instructions_virtual == NULL && _decode_instructions == NULL) {
+    tty->print_cr("Could not load %s; %s; %s", buf,
+                  ((_library != NULL)
+                   ? "entry point is missing"
+                   : (WizardMode || PrintMiscellaneous)
+                   ? (const char*)ebuf
+                   : "library not loadable"),
+                  "PrintAssembly is disabled");
+    return false;
+  }
+
+  // Success.
+  tty->print_cr("Loaded disassembler from %s", buf);
+  return true;
+}
+
+
+class decode_env {
+ private:
+  nmethod*      _nm;
+  CodeBlob*     _code;
   CodeStrings   _strings;
   outputStream* _output;
   address       _start, _end;
--- a/src/share/vm/compiler/disassembler.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/compiler/disassembler.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -122,7 +122,7 @@
 
 template <class Chunk>
 void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
-  return_chunk_at_tail(chunk, true);
+  AdaptiveFreeList<Chunk>::return_chunk_at_tail(chunk, true);
 }
 
 template <class Chunk>
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -37,6 +37,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
@@ -466,7 +469,7 @@
 void CMSAdaptiveSizePolicy::checkpoint_roots_final_begin() {
   _STW_timer.stop();
   _latest_cms_initial_mark_end_to_remark_start_secs = _STW_timer.seconds();
-  // Start accumumlating time for the remark in the STW timer.
+  // Start accumulating time for the remark in the STW timer.
   _STW_timer.reset();
   _STW_timer.start();
 }
@@ -537,8 +540,8 @@
       avg_msc_pause()->sample(msc_pause_in_seconds);
       double mutator_time_in_seconds = 0.0;
       if (_latest_cms_collection_end_to_collection_start_secs == 0.0) {
-        // This assertion may fail because of time stamp gradularity.
-        // Comment it out and investiage it at a later time.  The large
+        // This assertion may fail because of time stamp granularity.
+        // Comment it out and investigate it at a later time.  The large
         // time stamp granularity occurs on some older linux systems.
 #ifndef CLOCK_GRANULARITY_TOO_LARGE
         assert((_latest_cms_concurrent_marking_time_secs == 0.0) &&
@@ -700,7 +703,7 @@
     double latest_cms_sum_concurrent_phases_time_secs =
       concurrent_collection_time();
     if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::ms_collecton_end "
+      gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::ms_collection_end "
         "STW_in_foreground_in_seconds %f "
         "_latest_cms_initial_mark_start_to_end_time_secs %f "
         "_latest_cms_remark_start_to_end_time_secs %f "
@@ -836,7 +839,7 @@
 
 void CMSAdaptiveSizePolicy::ms_collection_marking_begin() {
   _STW_timer.stop();
-  // Start accumumlating time for the marking in the STW timer.
+  // Start accumulating time for the marking in the STW timer.
   _STW_timer.reset();
   _STW_timer.start();
 }
@@ -1227,7 +1230,7 @@
     // We use the tenuring threshold to equalize the cost of major
     // and minor collections.
     // ThresholdTolerance is used to indicate how sensitive the
-    // tenuring threshold is to differences in cost betweent the
+    // tenuring threshold is to differences in cost between the
     // collection types.
 
     // Get the times of interest. This involves a little work, so
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -356,7 +356,7 @@
   void concurrent_sweeping_begin();
   void concurrent_sweeping_end();
   // Similar to the above (e.g., concurrent_marking_end()) and
-  // is used for both the precleaning an abortable precleaing
+  // is used for both the precleaning an abortable precleaning
   // phases.
   void concurrent_precleaning_begin();
   void concurrent_precleaning_end();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -88,8 +88,7 @@
   // of the tenured generation.
   PerfVariable* _avg_msc_pause_counter;
   // Average for the time between the most recent end of a
-  // MSC collection and the beginning of the next
-  // MSC collection.
+  // MSC collection and the beginning of the next MSC collection.
   PerfVariable* _avg_msc_interval_counter;
   // Average for the GC cost of a MSC collection based on
   // _avg_msc_pause_counter and _avg_msc_interval_counter.
@@ -99,8 +98,7 @@
   // of the tenured generation.
   PerfVariable* _avg_ms_pause_counter;
   // Average for the time between the most recent end of a
-  // MS collection and the beginning of the next
-  // MS collection.
+  // MS collection and the beginning of the next MS collection.
   PerfVariable* _avg_ms_interval_counter;
   // Average for the GC cost of a MS collection based on
   // _avg_ms_pause_counter and _avg_ms_interval_counter.
@@ -108,9 +106,9 @@
 
   // Average of the bytes promoted per minor collection.
   PerfVariable* _promoted_avg_counter;
-  // Average of the deviation of the promoted average
+  // Average of the deviation of the promoted average.
   PerfVariable* _promoted_avg_dev_counter;
-  // Padded average of the bytes promoted per minor colleciton
+  // Padded average of the bytes promoted per minor collection.
   PerfVariable* _promoted_padded_avg_counter;
 
   // See description of the _change_young_gen_for_maj_pauses
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -258,10 +258,10 @@
   bool take_from_overflow_list();
 };
 
-// Tn this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
+// In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
 // stack and the bitMap are shared, so access needs to be suitably
-// sycnhronized. An OopTaskQueue structure, supporting efficient
-// workstealing, replaces a CMSMarkStack for storing grey objects.
+// synchronized. An OopTaskQueue structure, supporting efficient
+// work stealing, replaces a CMSMarkStack for storing grey objects.
 class Par_MarkRefsIntoAndScanClosure: public CMSOopsInGenClosure {
  private:
   MemRegion              _span;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -407,8 +407,8 @@
   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
                        (size_t) SmallForLinearAlloc - 1));
   // XXX the following could potentially be pretty slow;
-  // should one, pesimally for the rare cases when res
-  // caclulated above is less than IndexSetSize,
+  // should one, pessimistically for the rare cases when res
+  // calculated above is less than IndexSetSize,
   // just return res calculated above? My reasoning was that
   // those cases will be so rare that the extra time spent doesn't
   // really matter....
@@ -759,7 +759,7 @@
 // Note on locking for the space iteration functions:
 // since the collector's iteration activities are concurrent with
 // allocation activities by mutators, absent a suitable mutual exclusion
-// mechanism the iterators may go awry. For instace a block being iterated
+// mechanism the iterators may go awry. For instance a block being iterated
 // may suddenly be allocated or divided up and part of it allocated and
 // so on.
 
@@ -997,6 +997,13 @@
     if (FreeChunk::indicatesFreeChunk(p)) {
       volatile FreeChunk* fc = (volatile FreeChunk*)p;
       size_t res = fc->size();
+
+      // Bugfix for systems with weak memory model (PPC64/IA64). The
+      // block's free bit was set and we have read the size of the
+      // block. Acquire and check the free bit again. If the block is
+      // still free, the read size is correct.
+      OrderAccess::acquire();
+
       // If the object is still a free chunk, return the size, else it
       // has been allocated so try again.
       if (FreeChunk::indicatesFreeChunk(p)) {
@@ -1010,6 +1017,12 @@
         assert(k->is_klass(), "Should really be klass oop.");
         oop o = (oop)p;
         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
+
+        // Bugfix for systems with weak memory model (PPC64/IA64).
+        // The object o may be an array. Acquire to make sure that the array
+        // size (third word) is consistent.
+        OrderAccess::acquire();
+
         size_t res = o->size_given_klass(k);
         res = adjustObjectSize(res);
         assert(res != 0, "Block size should not be 0");
@@ -1040,6 +1053,13 @@
     if (FreeChunk::indicatesFreeChunk(p)) {
       volatile FreeChunk* fc = (volatile FreeChunk*)p;
       size_t res = fc->size();
+
+      // Bugfix for systems with weak memory model (PPC64/IA64). The
+      // free bit of the block was set and we have read the size of
+      // the block. Acquire and check the free bit again. If the
+      // block is still free, the read size is correct.
+      OrderAccess::acquire();
+
       if (FreeChunk::indicatesFreeChunk(p)) {
         assert(res != 0, "Block size should not be 0");
         assert(loops == 0, "Should be 0");
@@ -1055,6 +1075,12 @@
         assert(k->is_klass(), "Should really be klass oop.");
         oop o = (oop)p;
         assert(o->is_oop(), "Should be an oop");
+
+        // Bugfix for systems with weak memory model (PPC64/IA64).
+        // The object o may be an array. Acquire to make sure that the array
+        // size (third word) is consistent.
+        OrderAccess::acquire();
+
         size_t res = o->size_given_klass(k);
         res = adjustObjectSize(res);
         assert(res != 0, "Block size should not be 0");
@@ -1704,8 +1730,8 @@
   _dictionary->return_chunk(chunk);
 #ifndef PRODUCT
   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
-    TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
-    TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
+    TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
+    TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
     tl->verify_stats();
   }
 #endif // PRODUCT
@@ -2090,7 +2116,7 @@
 
 // Support for concurrent collection policy decisions.
 bool CompactibleFreeListSpace::should_concurrent_collect() const {
-  // In the future we might want to add in frgamentation stats --
+  // In the future we might want to add in fragmentation stats --
   // including erosion of the "mountain" into this decision as well.
   return !adaptive_freelists() && linearAllocationWouldFail();
 }
@@ -2099,7 +2125,7 @@
 
 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
-  // prepare_for_compaction() uses the space between live objects
+  // Prepare_for_compaction() uses the space between live objects
   // so that later phase can skip dead space quickly.  So verification
   // of the free lists doesn't work after.
 }
@@ -2122,7 +2148,7 @@
   SCAN_AND_COMPACT(obj_size);
 }
 
-// fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
+// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
 // where fbs is free block sizes
 double CompactibleFreeListSpace::flsFrag() const {
   size_t itabFree = totalSizeInIndexedFreeLists();
@@ -2515,10 +2541,10 @@
 
 #ifndef PRODUCT
 void CompactibleFreeListSpace::check_free_list_consistency() const {
-  assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
+  assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
     "Some sizes can't be allocated without recourse to"
     " linear allocation buffers");
-  assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
+  assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
     "else MIN_TREE_CHUNK_SIZE is wrong");
   assert(IndexSetStart != 0, "IndexSetStart not initialized");
   assert(IndexSetStride != 0, "IndexSetStride not initialized");
@@ -2651,7 +2677,7 @@
   // changes on-the-fly during a scavenge and avoid such a phase-change
   // pothole. The following code is a heuristic attempt to do that.
   // It is protected by a product flag until we have gained
-  // enough experience with this heuristic and fine-tuned its behaviour.
+  // enough experience with this heuristic and fine-tuned its behavior.
   // WARNING: This might increase fragmentation if we overreact to
   // small spikes, so some kind of historical smoothing based on
   // previous experience with the greater reactivity might be useful.
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,7 +58,7 @@
   HeapWord* _ptr;
   size_t    _word_size;
   size_t    _refillSize;
-  size_t    _allocation_size_limit;  // largest size that will be allocated
+  size_t    _allocation_size_limit;  // Largest size that will be allocated
 
   void print_on(outputStream* st) const;
 };
@@ -116,14 +116,14 @@
 
   PromotionInfo _promoInfo;
 
-  // helps to impose a global total order on freelistLock ranks;
+  // Helps to impose a global total order on freelistLock ranks;
   // assumes that CFLSpace's are allocated in global total order
   static int   _lockRank;
 
-  // a lock protecting the free lists and free blocks;
+  // A lock protecting the free lists and free blocks;
   // mutable because of ubiquity of locking even for otherwise const methods
   mutable Mutex _freelistLock;
-  // locking verifier convenience function
+  // Locking verifier convenience function
   void assert_locked() const PRODUCT_RETURN;
   void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
 
@@ -131,12 +131,13 @@
   LinearAllocBlock _smallLinearAllocBlock;
 
   FreeBlockDictionary<FreeChunk>::DictionaryChoice _dictionaryChoice;
-  AFLBinaryTreeDictionary* _dictionary;    // ptr to dictionary for large size blocks
+  AFLBinaryTreeDictionary* _dictionary;    // Pointer to dictionary for large size blocks
 
+  // Indexed array for small size blocks
   AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
-                                       // indexed array for small size blocks
-  // allocation stategy
-  bool       _fitStrategy;      // Use best fit strategy.
+
+  // Allocation strategy
+  bool       _fitStrategy;        // Use best fit strategy
   bool       _adaptive_freelists; // Use adaptive freelists
 
   // This is an address close to the largest free chunk in the heap.
@@ -157,7 +158,7 @@
 
   // Extra stuff to manage promotion parallelism.
 
-  // a lock protecting the dictionary during par promotion allocation.
+  // A lock protecting the dictionary during par promotion allocation.
   mutable Mutex _parDictionaryAllocLock;
   Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
 
@@ -275,26 +276,26 @@
   }
 
  protected:
-  // reset the indexed free list to its initial empty condition.
+  // Reset the indexed free list to its initial empty condition.
   void resetIndexedFreeListArray();
-  // reset to an initial state with a single free block described
+  // Reset to an initial state with a single free block described
   // by the MemRegion parameter.
   void reset(MemRegion mr);
   // Return the total number of words in the indexed free lists.
   size_t     totalSizeInIndexedFreeLists() const;
 
  public:
-  // Constructor...
+  // Constructor
   CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr,
                            bool use_adaptive_freelists,
                            FreeBlockDictionary<FreeChunk>::DictionaryChoice);
-  // accessors
+  // Accessors
   bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
   FreeBlockDictionary<FreeChunk>* dictionary() const { return _dictionary; }
   HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
   void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
 
-  // Set CMS global values
+  // Set CMS global values.
   static void set_cms_values();
 
   // Return the free chunk at the end of the space.  If no such
@@ -305,7 +306,7 @@
 
   void set_collector(CMSCollector* collector) { _collector = collector; }
 
-  // Support for parallelization of rescan and marking
+  // Support for parallelization of rescan and marking.
   const size_t rescan_task_size()  const { return _rescan_task_size;  }
   const size_t marking_task_size() const { return _marking_task_size; }
   SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
@@ -346,7 +347,7 @@
   // Resizing support
   void set_end(HeapWord* value);  // override
 
-  // mutual exclusion support
+  // Mutual exclusion support
   Mutex* freelistLock() const { return &_freelistLock; }
 
   // Iteration support
@@ -370,7 +371,7 @@
   // If the iteration encounters an unparseable portion of the region,
   // terminate the iteration and return the address of the start of the
   // subregion that isn't done.  Return of "NULL" indicates that the
-  // interation completed.
+  // iteration completed.
   virtual HeapWord*
        object_iterate_careful_m(MemRegion mr,
                                 ObjectClosureCareful* cl);
@@ -393,11 +394,11 @@
   size_t block_size_nopar(const HeapWord* p) const;
   bool block_is_obj_nopar(const HeapWord* p) const;
 
-  // iteration support for promotion
+  // Iteration support for promotion
   void save_marks();
   bool no_allocs_since_save_marks();
 
-  // iteration support for sweeping
+  // Iteration support for sweeping
   void save_sweep_limit() {
     _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
                    unallocated_block() : end();
@@ -457,7 +458,7 @@
 
   FreeChunk* allocateScratch(size_t size);
 
-  // returns true if either the small or large linear allocation buffer is empty.
+  // Returns true if either the small or large linear allocation buffer is empty.
   bool       linearAllocationWouldFail() const;
 
   // Adjust the chunk for the minimum size.  This version is called in
@@ -477,18 +478,18 @@
   void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
               bool coalesced);
 
-  // Support for decisions regarding concurrent collection policy
+  // Support for decisions regarding concurrent collection policy.
   bool should_concurrent_collect() const;
 
-  // Support for compaction
+  // Support for compaction.
   void prepare_for_compaction(CompactPoint* cp);
   void adjust_pointers();
   void compact();
-  // reset the space to reflect the fact that a compaction of the
+  // Reset the space to reflect the fact that a compaction of the
   // space has been done.
   virtual void reset_after_compaction();
 
-  // Debugging support
+  // Debugging support.
   void print()                            const;
   void print_on(outputStream* st)         const;
   void prepare_for_verify();
@@ -500,7 +501,7 @@
   // i.e. either the binary tree dictionary, the indexed free lists
   // or the linear allocation block.
   bool verify_chunk_in_free_list(FreeChunk* fc) const;
-  // Verify that the given chunk is the linear allocation block
+  // Verify that the given chunk is the linear allocation block.
   bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
   // Do some basic checks on the the free lists.
   void check_free_list_consistency()      const PRODUCT_RETURN;
@@ -516,7 +517,7 @@
     size_t sumIndexedFreeListArrayReturnedBytes();
     // Return the total number of chunks in the indexed free lists.
     size_t totalCountInIndexedFreeLists() const;
-    // Return the total numberof chunks in the space.
+    // Return the total number of chunks in the space.
     size_t totalCount();
   )
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -117,10 +117,10 @@
 // hide the naked CGC_lock manipulation in the baton-passing code
 // further below. That's something we should try to do. Also, the proof
 // of correctness of this 2-level locking scheme is far from obvious,
-// and potentially quite slippery. We have an uneasy supsicion, for instance,
+// and potentially quite slippery. We have an uneasy suspicion, for instance,
 // that there may be a theoretical possibility of delay/starvation in the
 // low-level lock/wait/notify scheme used for the baton-passing because of
-// potential intereference with the priority scheme embodied in the
+// potential interference with the priority scheme embodied in the
 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 // invocation further below and marked with "XXX 20011219YSR".
 // Indeed, as we note elsewhere, this may become yet more slippery
@@ -259,7 +259,7 @@
   // Ideally, in the calculation below, we'd compute the dilatation
   // factor as: MinChunkSize/(promoting_gen's min object size)
   // Since we do not have such a general query interface for the
-  // promoting generation, we'll instead just use the mimimum
+  // promoting generation, we'll instead just use the minimum
   // object size (which today is a header's worth of space);
   // note that all arithmetic is in units of HeapWords.
   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
@@ -274,7 +274,7 @@
 //
 //   Let "f" be MinHeapFreeRatio in
 //
-//    _intiating_occupancy = 100-f +
+//    _initiating_occupancy = 100-f +
 //                           f * (CMSTriggerRatio/100)
 //   where CMSTriggerRatio is the argument "tr" below.
 //
@@ -958,7 +958,7 @@
         desired_free_percentage);
       gclog_or_tty->print_cr("  Maximum free fraction %f",
         maximum_free_percentage);
-      gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
+      gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity()/1000);
       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
         desired_capacity/1000);
       int prev_level = level() - 1;
@@ -2671,7 +2671,7 @@
 // that it's responsible for collecting, while itself doing any
 // work common to all generations it's responsible for. A similar
 // comment applies to the  gc_epilogue()'s.
-// The role of the varaible _between_prologue_and_epilogue is to
+// The role of the variable _between_prologue_and_epilogue is to
 // enforce the invocation protocol.
 void CMSCollector::gc_prologue(bool full) {
   // Call gc_prologue_work() for the CMSGen
@@ -2878,10 +2878,10 @@
 // Check reachability of the given heap address in CMS generation,
 // treating all other generations as roots.
 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
-  // We could "guarantee" below, rather than assert, but i'll
+  // We could "guarantee" below, rather than assert, but I'll
   // leave these as "asserts" so that an adventurous debugger
   // could try this in the product build provided some subset of
-  // the conditions were met, provided they were intersted in the
+  // the conditions were met, provided they were interested in the
   // results and knew that the computation below wouldn't interfere
   // with other concurrent computations mutating the structures
   // being read or written.
@@ -2982,7 +2982,7 @@
   // This is as intended, because by this time
   // GC must already have cleared any refs that need to be cleared,
   // and traced those that need to be marked; moreover,
-  // the marking done here is not going to intefere in any
+  // the marking done here is not going to interfere in any
   // way with the marking information used by GC.
   NoRefDiscovery no_discovery(ref_processor());
 
@@ -3000,7 +3000,7 @@
 
   if (CMSRemarkVerifyVariant == 1) {
     // In this first variant of verification, we complete
-    // all marking, then check if the new marks-verctor is
+    // all marking, then check if the new marks-vector is
     // a subset of the CMS marks-vector.
     verify_after_remark_work_1();
   } else if (CMSRemarkVerifyVariant == 2) {
@@ -3033,10 +3033,8 @@
   gch->gen_process_strong_roots(_cmsGen->level(),
                                 true,   // younger gens are roots
                                 true,   // activate StrongRootsScope
-                                false,  // not scavenging
                                 SharedHeap::ScanningOption(roots_scanning_options()),
                                 &notOlder,
-                                true,   // walk code active on stacks
                                 NULL,
                                 NULL); // SSS: Provide correct closure
 
@@ -3101,10 +3099,8 @@
   gch->gen_process_strong_roots(_cmsGen->level(),
                                 true,   // younger gens are roots
                                 true,   // activate StrongRootsScope
-                                false,  // not scavenging
                                 SharedHeap::ScanningOption(roots_scanning_options()),
                                 &notOlder,
-                                true,   // walk code active on stacks
                                 NULL,
                                 &klass_closure);
 
@@ -3303,7 +3299,7 @@
 void CMSCollector::setup_cms_unloading_and_verification_state() {
   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
                              || VerifyBeforeExit;
-  const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+  const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
 
   // We set the proper root for this CMS cycle here.
   if (should_unload_classes()) {   // Should unload classes this cycle
@@ -3315,7 +3311,7 @@
   }
 
   // Not unloading classes this cycle
-  assert(!should_unload_classes(), "Inconsitency!");
+  assert(!should_unload_classes(), "Inconsistency!");
   remove_root_scanning_option(SharedHeap::SO_SystemClasses);
   add_root_scanning_option(SharedHeap::SO_AllClasses);
 
@@ -3401,7 +3397,7 @@
       CMSExpansionCause::_allocate_par_lab);
     // Now go around the loop and try alloc again;
     // A competing par_promote might beat us to the expansion space,
-    // so we may go around the loop again if promotion fails agaion.
+    // so we may go around the loop again if promotion fails again.
     if (GCExpandToAllocateDelayMillis > 0) {
       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
     }
@@ -3682,12 +3678,6 @@
   ResourceMark rm;
   HandleMark  hm;
 
-  FalseClosure falseClosure;
-  // In the case of a synchronous collection, we will elide the
-  // remark step, so it's important to catch all the nmethod oops
-  // in this step.
-  // The final 'true' flag to gen_process_strong_roots will ensure this.
-  // If 'async' is true, we can relax the nmethod tracing.
   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
@@ -3738,10 +3728,8 @@
       gch->gen_process_strong_roots(_cmsGen->level(),
                                     true,   // younger gens are roots
                                     true,   // activate StrongRootsScope
-                                    false,  // not scavenging
                                     SharedHeap::ScanningOption(roots_scanning_options()),
                                     &notOlder,
-                                    true,   // walk all of code cache if (so & SO_CodeCache)
                                     NULL,
                                     &klass_closure);
     }
@@ -4373,7 +4361,7 @@
   // should really use wait/notify, which is the recommended
   // way of doing this type of interaction. Additionally, we should
   // consolidate the eight methods that do the yield operation and they
-  // are almost identical into one for better maintenability and
+  // are almost identical into one for better maintainability and
   // readability. See 6445193.
   //
   // Tony 2006.06.29
@@ -4541,7 +4529,7 @@
   // If Eden's current occupancy is below this threshold,
   // immediately schedule the remark; else preclean
   // past the next scavenge in an effort to
-  // schedule the pause as described avove. By choosing
+  // schedule the pause as described above. By choosing
   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
   // we will never do an actual abortable preclean cycle.
   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
@@ -5238,14 +5226,12 @@
   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
                                 false,     // yg was scanned above
                                 false,     // this is parallel code
-                                false,     // not scavenging
                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
                                 &par_mri_cl,
-                                true,   // walk all of code cache if (so & SO_CodeCache)
                                 NULL,
                                 &klass_closure);
   assert(_collector->should_unload_classes()
-         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
+         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   _timer.stop();
   if (PrintCMSStatistics != 0) {
@@ -5375,14 +5361,12 @@
   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
                                 false,     // yg was scanned above
                                 false,     // this is parallel code
-                                false,     // not scavenging
                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
                                 &par_mrias_cl,
-                                true,   // walk all of code cache if (so & SO_CodeCache)
                                 NULL,
                                 NULL);     // The dirty klasses will be handled below
   assert(_collector->should_unload_classes()
-         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
+         || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   _timer.stop();
   if (PrintCMSStatistics != 0) {
@@ -5537,8 +5521,8 @@
   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
   // CAUTION: This closure has state that persists across calls to
   // the work method dirty_range_iterate_clear() in that it has
-  // imbedded in it a (subtype of) UpwardsObjectClosure. The
-  // use of that state in the imbedded UpwardsObjectClosure instance
+  // embedded in it a (subtype of) UpwardsObjectClosure. The
+  // use of that state in the embedded UpwardsObjectClosure instance
   // assumes that the cards are always iterated (even if in parallel
   // by several threads) in monotonically increasing order per each
   // thread. This is true of the implementation below which picks
@@ -5553,7 +5537,7 @@
   // sure that the changes there do not run counter to the
   // assumptions made here and necessary for correctness and
   // efficiency. Note also that this code might yield inefficient
-  // behaviour in the case of very large objects that span one or
+  // behavior in the case of very large objects that span one or
   // more work chunks. Such objects would potentially be scanned
   // several times redundantly. Work on 4756801 should try and
   // address that performance anomaly if at all possible. XXX
@@ -5579,7 +5563,7 @@
 
   while (!pst->is_task_claimed(/* reference */ nth_task)) {
     // Having claimed the nth_task, compute corresponding mem-region,
-    // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
+    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
     // The alignment restriction ensures that we do not need any
     // synchronization with other gang-workers while setting or
     // clearing bits in thus chunk of the MUT.
@@ -5966,15 +5950,13 @@
     gch->gen_process_strong_roots(_cmsGen->level(),
                                   true,  // younger gens as roots
                                   false, // use the local StrongRootsScope
-                                  false, // not scavenging
                                   SharedHeap::ScanningOption(roots_scanning_options()),
                                   &mrias_cl,
-                                  true,   // walk code active on stacks
                                   NULL,
                                   NULL);  // The dirty klasses will be handled below
 
     assert(should_unload_classes()
-           || (roots_scanning_options() & SharedHeap::SO_CodeCache),
+           || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
   }
 
@@ -6371,7 +6353,7 @@
   _inter_sweep_timer.reset();
   _inter_sweep_timer.start();
 
-  // We need to use a monotonically non-deccreasing time in ms
+  // We need to use a monotonically non-decreasing time in ms
   // or we will see time-warp warnings and os::javaTimeMillis()
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@@ -6732,7 +6714,7 @@
     warning("CMS bit map allocation failure");
     return false;
   }
-  // For now we'll just commit all of the bit map up fromt.
+  // For now we'll just commit all of the bit map up front.
   // Later on we'll try to be more parsimonious with swap.
   if (!_virtual_space.initialize(brs, brs.size())) {
     warning("CMS bit map backing store failure");
@@ -6839,8 +6821,8 @@
 
 // XXX FIX ME !!! In the MT case we come in here holding a
 // leaf lock. For printing we need to take a further lock
-// which has lower rank. We need to recallibrate the two
-// lock-ranks involved in order to be able to rpint the
+// which has lower rank. We need to recalibrate the two
+// lock-ranks involved in order to be able to print the
 // messages below. (Or defer the printing to the caller.
 // For now we take the expedient path of just disabling the
 // messages for the problematic case.)
@@ -7180,7 +7162,7 @@
           }
         #endif // ASSERT
     } else {
-      // an unitialized object
+      // An uninitialized object.
       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
       size = pointer_delta(nextOneAddr + 1, addr);
@@ -7188,7 +7170,7 @@
              "alignment problem");
       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
       // will dirty the card when the klass pointer is installed in the
-      // object (signalling the completion of initialization).
+      // object (signaling the completion of initialization).
     }
   } else {
     // Either a not yet marked object or an uninitialized object
@@ -7249,7 +7231,7 @@
   HeapWord* addr = (HeapWord*)p;
   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
   assert(!_span.contains(addr), "we are scanning the survivor spaces");
-  assert(p->klass_or_null() != NULL, "object should be initializd");
+  assert(p->klass_or_null() != NULL, "object should be initialized");
   // an initialized object; ignore mark word in verification below
   // since we are running concurrent with mutators
   assert(p->is_oop(true), "should be an oop");
@@ -7999,7 +7981,7 @@
          // we need to dirty all of the cards that the object spans,
          // since the rescan of object arrays will be limited to the
          // dirty cards.
-         // Note that no one can be intefering with us in this action
+         // Note that no one can be interfering with us in this action
          // of dirtying the mod union table, so no locking or atomics
          // are required.
          if (obj->is_objArray()) {
@@ -9025,7 +9007,7 @@
 
 // It's OK to call this multi-threaded;  the worst thing
 // that can happen is that we'll get a bunch of closely
-// spaced simulated oveflows, but that's OK, in fact
+// spaced simulated overflows, but that's OK, in fact
 // probably good as it would exercise the overflow code
 // under contention.
 bool CMSCollector::simulate_overflow() {
@@ -9145,7 +9127,7 @@
       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
     }
   } else {
-    // Chop off the suffix and rerturn it to the global list.
+    // Chop off the suffix and return it to the global list.
     assert(cur->mark() != BUSY, "Error");
     oop suffix_head = cur->mark(); // suffix will be put back on global list
     cur->set_mark(NULL);           // break off suffix
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -171,19 +171,19 @@
 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
 class CMSMarkStack: public CHeapObj<mtGC>  {
   //
-  friend class CMSCollector;   // to get at expasion stats further below
+  friend class CMSCollector;   // To get at expansion stats further below.
   //
 
-  VirtualSpace _virtual_space;  // space for the stack
-  oop*   _base;      // bottom of stack
-  size_t _index;     // one more than last occupied index
-  size_t _capacity;  // max #elements
-  Mutex  _par_lock;  // an advisory lock used in case of parallel access
-  NOT_PRODUCT(size_t _max_depth;)  // max depth plumbed during run
+  VirtualSpace _virtual_space;  // Space for the stack
+  oop*   _base;      // Bottom of stack
+  size_t _index;     // One more than last occupied index
+  size_t _capacity;  // Max #elements
+  Mutex  _par_lock;  // An advisory lock used in case of parallel access
+  NOT_PRODUCT(size_t _max_depth;)  // Max depth plumbed during run
 
  protected:
-  size_t _hit_limit;      // we hit max stack size limit
-  size_t _failed_double;  // we failed expansion before hitting limit
+  size_t _hit_limit;      // We hit max stack size limit
+  size_t _failed_double;  // We failed expansion before hitting limit
 
  public:
   CMSMarkStack():
@@ -238,7 +238,7 @@
     _index = 0;
   }
 
-  // Expand the stack, typically in response to an overflow condition
+  // Expand the stack, typically in response to an overflow condition.
   void expand();
 
   // Compute the least valued stack element.
@@ -250,7 +250,7 @@
      return least;
   }
 
-  // Exposed here to allow stack expansion in || case
+  // Exposed here to allow stack expansion in || case.
   Mutex* par_lock() { return &_par_lock; }
 };
 
@@ -557,7 +557,7 @@
   // Manipulated with CAS in the parallel/multi-threaded case.
   oop _overflow_list;
   // The following array-pair keeps track of mark words
-  // displaced for accomodating overflow list above.
+  // displaced for accommodating overflow list above.
   // This code will likely be revisited under RFE#4922830.
   Stack<oop, mtGC>     _preserved_oop_stack;
   Stack<markOop, mtGC> _preserved_mark_stack;
@@ -599,7 +599,7 @@
   void verify_after_remark_work_1();
   void verify_after_remark_work_2();
 
-  // true if any verification flag is on.
+  // True if any verification flag is on.
   bool _verifying;
   bool verifying() const { return _verifying; }
   void set_verifying(bool v) { _verifying = v; }
@@ -611,9 +611,9 @@
   void set_did_compact(bool v);
 
   // XXX Move these to CMSStats ??? FIX ME !!!
-  elapsedTimer _inter_sweep_timer;   // time between sweeps
-  elapsedTimer _intra_sweep_timer;   // time _in_ sweeps
-  // padded decaying average estimates of the above
+  elapsedTimer _inter_sweep_timer;   // Time between sweeps
+  elapsedTimer _intra_sweep_timer;   // Time _in_ sweeps
+  // Padded decaying average estimates of the above
   AdaptivePaddedAverage _inter_sweep_estimate;
   AdaptivePaddedAverage _intra_sweep_estimate;
 
@@ -632,16 +632,16 @@
   void report_heap_summary(GCWhen::Type when);
 
  protected:
-  ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
-  MemRegion                      _span;    // span covering above two
-  CardTableRS*                   _ct;      // card table
+  ConcurrentMarkSweepGeneration* _cmsGen;  // Old gen (CMS)
+  MemRegion                      _span;    // Span covering above two
+  CardTableRS*                   _ct;      // Card table
 
   // CMS marking support structures
   CMSBitMap     _markBitMap;
   CMSBitMap     _modUnionTable;
   CMSMarkStack  _markStack;
 
-  HeapWord*     _restart_addr; // in support of marking stack overflow
+  HeapWord*     _restart_addr; // In support of marking stack overflow
   void          lower_restart_addr(HeapWord* low);
 
   // Counters in support of marking stack / work queue overflow handling:
@@ -656,12 +656,12 @@
   size_t        _par_kac_ovflw;
   NOT_PRODUCT(ssize_t _num_par_pushes;)
 
-  // ("Weak") Reference processing support
+  // ("Weak") Reference processing support.
   ReferenceProcessor*            _ref_processor;
   CMSIsAliveClosure              _is_alive_closure;
-      // keep this textually after _markBitMap and _span; c'tor dependency
+  // Keep this textually after _markBitMap and _span; c'tor dependency.
 
-  ConcurrentMarkSweepThread*     _cmsThread;   // the thread doing the work
+  ConcurrentMarkSweepThread*     _cmsThread;   // The thread doing the work
   ModUnionClosure    _modUnionClosure;
   ModUnionClosurePar _modUnionClosurePar;
 
@@ -697,7 +697,7 @@
   // State related to prologue/epilogue invocation for my generations
   bool _between_prologue_and_epilogue;
 
-  // Signalling/State related to coordination between fore- and backgroud GC
+  // Signaling/State related to coordination between fore- and background GC
   // Note: When the baton has been passed from background GC to foreground GC,
   // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
   static bool _foregroundGCIsActive;    // true iff foreground collector is active or
@@ -712,13 +712,13 @@
   int    _numYields;
   size_t _numDirtyCards;
   size_t _sweep_count;
-  // number of full gc's since the last concurrent gc.
+  // Number of full gc's since the last concurrent gc.
   uint   _full_gcs_since_conc_gc;
 
-  // occupancy used for bootstrapping stats
+  // Occupancy used for bootstrapping stats
   double _bootstrap_occupancy;
 
-  // timer
+  // Timer
   elapsedTimer _timer;
 
   // Timing, allocation and promotion statistics, used for scheduling.
@@ -770,7 +770,7 @@
                                    int no_of_gc_threads);
   void push_on_overflow_list(oop p);
   void par_push_on_overflow_list(oop p);
-  // the following is, obviously, not, in general, "MT-stable"
+  // The following is, obviously, not, in general, "MT-stable"
   bool overflow_list_is_empty() const;
 
   void preserve_mark_if_necessary(oop p);
@@ -778,24 +778,24 @@
   void preserve_mark_work(oop p, markOop m);
   void restore_preserved_marks_if_any();
   NOT_PRODUCT(bool no_preserved_marks() const;)
-  // in support of testing overflow code
+  // In support of testing overflow code
   NOT_PRODUCT(int _overflow_counter;)
-  NOT_PRODUCT(bool simulate_overflow();)       // sequential
+  NOT_PRODUCT(bool simulate_overflow();)       // Sequential
   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
 
   // CMS work methods
-  void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
+  void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
 
-  // a return value of false indicates failure due to stack overflow
-  bool markFromRootsWork(bool asynch);  // concurrent marking work
+  // A return value of false indicates failure due to stack overflow
+  bool markFromRootsWork(bool asynch);  // Concurrent marking work
 
  public:   // FIX ME!!! only for testing
-  bool do_marking_st(bool asynch);      // single-threaded marking
-  bool do_marking_mt(bool asynch);      // multi-threaded  marking
+  bool do_marking_st(bool asynch);      // Single-threaded marking
+  bool do_marking_mt(bool asynch);      // Multi-threaded  marking
 
  private:
 
-  // concurrent precleaning work
+  // Concurrent precleaning work
   size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
                                   ScanMarkedObjectsAgainCarefullyClosure* cl);
   size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
@@ -811,26 +811,26 @@
   // Resets (i.e. clears) the per-thread plab sample vectors
   void reset_survivor_plab_arrays();
 
-  // final (second) checkpoint work
+  // Final (second) checkpoint work
   void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
                                 bool init_mark_was_synchronous);
-  // work routine for parallel version of remark
+  // Work routine for parallel version of remark
   void do_remark_parallel();
-  // work routine for non-parallel version of remark
+  // Work routine for non-parallel version of remark
   void do_remark_non_parallel();
-  // reference processing work routine (during second checkpoint)
+  // Reference processing work routine (during second checkpoint)
   void refProcessingWork(bool asynch, bool clear_all_soft_refs);
 
-  // concurrent sweeping work
+  // Concurrent sweeping work
   void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
 
-  // (concurrent) resetting of support data structures
+  // (Concurrent) resetting of support data structures
   void reset(bool asynch);
 
   // Clear _expansion_cause fields of constituent generations
   void clear_expansion_cause();
 
-  // An auxilliary method used to record the ends of
+  // An auxiliary method used to record the ends of
   // used regions of each generation to limit the extent of sweep
   void save_sweep_limits();
 
@@ -854,7 +854,7 @@
   bool is_external_interruption();
   void report_concurrent_mode_interruption();
 
-  // If the backgrould GC is active, acquire control from the background
+  // If the background GC is active, acquire control from the background
   // GC and do the collection.
   void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
 
@@ -893,7 +893,7 @@
 
   ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
 
-  // locking checks
+  // Locking checks
   NOT_PRODUCT(static bool have_cms_token();)
 
   // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
@@ -958,7 +958,7 @@
   CMSBitMap* markBitMap()  { return &_markBitMap; }
   void directAllocated(HeapWord* start, size_t size);
 
-  // main CMS steps and related support
+  // Main CMS steps and related support
   void checkpointRootsInitial(bool asynch);
   bool markFromRoots(bool asynch);  // a return value of false indicates failure
                                     // due to stack overflow
@@ -977,7 +977,7 @@
   // Performance Counter Support
   CollectorCounters* counters()    { return _gc_counters; }
 
-  // timer stuff
+  // Timer stuff
   void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
   void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
   void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
@@ -1014,18 +1014,18 @@
 
   static void print_on_error(outputStream* st);
 
-  // debugging
+  // Debugging
   void verify();
   bool verify_after_remark(bool silent = VerifySilently);
   void verify_ok_to_terminate() const PRODUCT_RETURN;
   void verify_work_stacks_empty() const PRODUCT_RETURN;
   void verify_overflow_empty() const PRODUCT_RETURN;
 
-  // convenience methods in support of debugging
+  // Convenience methods in support of debugging
   static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
   HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
 
-  // accessors
+  // Accessors
   CMSMarkStack* verification_mark_stack() { return &_markStack; }
   CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
 
@@ -1109,7 +1109,7 @@
 
   CollectionTypes _debug_collection_type;
 
-  // True if a compactiing collection was done.
+  // True if a compacting collection was done.
   bool _did_compact;
   bool did_compact() { return _did_compact; }
 
@@ -1203,7 +1203,7 @@
 
   // Support for compaction
   CompactibleSpace* first_compaction_space() const;
-  // Adjust quantites in the generation affected by
+  // Adjust quantities in the generation affected by
   // the compaction.
   void reset_after_compaction();
 
@@ -1301,7 +1301,7 @@
   void setNearLargestChunk();
   bool isNearLargestChunk(HeapWord* addr);
 
-  // Get the chunk at the end of the space.  Delagates to
+  // Get the chunk at the end of the space.  Delegates to
   // the space.
   FreeChunk* find_chunk_at_end();
 
@@ -1383,13 +1383,6 @@
 // Closures of various sorts used by CMS to accomplish its work
 //
 
-// This closure is used to check that a certain set of oops is empty.
-class FalseClosure: public OopClosure {
- public:
-  void do_oop(oop* p)       { guarantee(false, "Should be an empty set"); }
-  void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
-};
-
 // This closure is used to do concurrent marking from the roots
 // following the first checkpoint.
 class MarkFromRootsClosure: public BitMapClosure {
@@ -1422,7 +1415,6 @@
 // marking from the roots following the first checkpoint.
 // XXX This should really be a subclass of The serial version
 // above, but i have not had the time to refactor things cleanly.
-// That willbe done for Dolphin.
 class Par_MarkFromRootsClosure: public BitMapClosure {
   CMSCollector*  _collector;
   MemRegion      _whole_span;
@@ -1780,7 +1772,7 @@
   void do_already_free_chunk(FreeChunk *fc);
   // Work method called when processing an already free or a
   // freshly garbage chunk to do a lookahead and possibly a
-  // premptive flush if crossing over _limit.
+  // preemptive flush if crossing over _limit.
   void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
   // Process a garbage chunk during sweeping.
   size_t do_garbage_chunk(FreeChunk *fc);
@@ -1879,7 +1871,7 @@
 };
 
 // Allow yielding or short-circuiting of reference list
-// prelceaning work.
+// precleaning work.
 class CMSPrecleanRefsYieldClosure: public YieldClosure {
   CMSCollector* _collector;
   void do_yield_work();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -197,13 +197,13 @@
 }
 
 
-// Return the HeapWord address corrsponding to the next "0" bit
+// Return the HeapWord address corresponding to the next "0" bit
 // (inclusive).
 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
   return getNextUnmarkedWordAddress(addr, endWord());
 }
 
-// Return the HeapWord address corrsponding to the next "0" bit
+// Return the HeapWord address corresponding to the next "0" bit
 // (inclusive).
 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
   HeapWord* start_addr, HeapWord* end_addr) const {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -164,7 +164,7 @@
   //  _pending_yields that holds the sum (of both sync and async requests), and
   //  a second counter _pending_decrements that only holds the async requests,
   //  for greater efficiency, since in a typical CMS run, there are many more
-  //  pontential (i.e. static) yield points than there are actual
+  //  potential (i.e. static) yield points than there are actual
   //  (i.e. dynamic) yields because of requests, which are few and far between.
   //
   // Note that, while "_pending_yields >= _pending_decrements" is an invariant,
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -279,7 +279,7 @@
 // When _spoolTail is NULL, then the set of slots with displaced headers
 // is all those starting at the slot <_spoolHead, _firstIndex> and
 // going up to the last slot of last block in the linked list.
-// In this lartter case, _splice_point points to the tail block of
+// In this latter case, _splice_point points to the tail block of
 // this linked list of blocks holding displaced headers.
 void PromotionInfo::verify() const {
   // Verify the following:
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -35,14 +35,6 @@
 #include "utilities/dtrace.hpp"
 
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__begin);
-HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__end);
-
-HS_DTRACE_PROBE_DECL(hs_private, cms__remark__begin);
-HS_DTRACE_PROBE_DECL(hs_private, cms__remark__end);
-#endif /* !USDT2 */
-
 //////////////////////////////////////////////////////////
 // Methods in abstract class VM_CMS_Operation
 //////////////////////////////////////////////////////////
@@ -138,12 +130,7 @@
     // Nothing to do.
     return;
   }
-#ifndef USDT2
-  HS_DTRACE_PROBE(hs_private, cms__initmark__begin);
-#else /* USDT2 */
-  HS_PRIVATE_CMS_INITMARK_BEGIN(
-                                );
-#endif /* USDT2 */
+  HS_PRIVATE_CMS_INITMARK_BEGIN();
 
   _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
 
@@ -159,12 +146,7 @@
 
   _collector->_gc_timer_cm->register_gc_pause_end();
 
-#ifndef USDT2
-  HS_DTRACE_PROBE(hs_private, cms__initmark__end);
-#else /* USDT2 */
-  HS_PRIVATE_CMS_INITMARK_END(
-                                );
-#endif /* USDT2 */
+  HS_PRIVATE_CMS_INITMARK_END();
 }
 
 //////////////////////////////////////////////////////////
@@ -175,12 +157,7 @@
     // Nothing to do.
     return;
   }
-#ifndef USDT2
-  HS_DTRACE_PROBE(hs_private, cms__remark__begin);
-#else /* USDT2 */
-  HS_PRIVATE_CMS_REMARK_BEGIN(
-                                );
-#endif /* USDT2 */
+  HS_PRIVATE_CMS_REMARK_BEGIN();
 
   _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
 
@@ -197,12 +174,7 @@
   _collector->save_heap_summary();
   _collector->_gc_timer_cm->register_gc_pause_end();
 
-#ifndef USDT2
-  HS_DTRACE_PROBE(hs_private, cms__remark__end);
-#else /* USDT2 */
-  HS_PRIVATE_CMS_REMARK_END(
-                                );
-#endif /* USDT2 */
+  HS_PRIVATE_CMS_REMARK_END();
 }
 
 // VM operation to invoke a concurrent collection of a
@@ -258,7 +230,7 @@
       // No need to do a young gc, we'll just nudge the CMS thread
       // in the doit() method above, to be executed soon.
       assert(_gc_count_before < gch->total_collections(),
-             "total_collections() should be monotnically increasing");
+             "total_collections() should be monotonically increasing");
       return false;  // no need for foreground young gc
     }
   }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
   nonstatic_field(LinearAllocBlock,            _word_size,                                    size_t)                                \
   nonstatic_field(AFLBinaryTreeDictionary,     _total_size,                                   size_t)                                \
   nonstatic_field(CompactibleFreeListSpace,    _dictionary,                                   AFLBinaryTreeDictionary*)              \
-  nonstatic_field(CompactibleFreeListSpace,    _indexedFreeList[0],                           FreeList<FreeChunk>)                   \
+  nonstatic_field(CompactibleFreeListSpace,    _indexedFreeList[0],                           AdaptiveFreeList<FreeChunk>)           \
   nonstatic_field(CompactibleFreeListSpace,    _smallLinearAllocBlock,                        LinearAllocBlock)
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/bufferingOopClosure.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,271 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/bufferingOopClosure.hpp"
+#include "memory/iterator.hpp"
+#include "utilities/debug.hpp"
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestBufferingOopClosure {
+
+  // Helper class to fake a set of oop*s and narrowOop*s.
+  class FakeRoots {
+   public:
+    // Used for sanity checking of the values passed to the do_oops functions in the test.
+    static const uintptr_t NarrowOopMarker = uintptr_t(1) << (BitsPerWord -1);
+
+    int    _num_narrow;
+    int    _num_full;
+    void** _narrow;
+    void** _full;
+
+    FakeRoots(int num_narrow, int num_full) :
+        _num_narrow(num_narrow),
+        _num_full(num_full),
+        _narrow((void**)::malloc(sizeof(void*) * num_narrow)),
+        _full((void**)::malloc(sizeof(void*) * num_full)) {
+
+      for (int i = 0; i < num_narrow; i++) {
+        _narrow[i] = (void*)(NarrowOopMarker + (uintptr_t)i);
+      }
+      for (int i = 0; i < num_full; i++) {
+        _full[i] = (void*)(uintptr_t)i;
+      }
+    }
+
+    ~FakeRoots() {
+      ::free(_narrow);
+      ::free(_full);
+    }
+
+    void oops_do_narrow_then_full(OopClosure* cl) {
+      for (int i = 0; i < _num_narrow; i++) {
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+      for (int i = 0; i < _num_full; i++) {
+        cl->do_oop((oop*)_full[i]);
+      }
+    }
+
+    void oops_do_full_then_narrow(OopClosure* cl) {
+      for (int i = 0; i < _num_full; i++) {
+        cl->do_oop((oop*)_full[i]);
+      }
+      for (int i = 0; i < _num_narrow; i++) {
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+    }
+
+    void oops_do_mixed(OopClosure* cl) {
+      int i;
+      for (i = 0; i < _num_full && i < _num_narrow; i++) {
+        cl->do_oop((oop*)_full[i]);
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+      for (int j = i; j < _num_full; j++) {
+        cl->do_oop((oop*)_full[i]);
+      }
+      for (int j = i; j < _num_narrow; j++) {
+        cl->do_oop((narrowOop*)_narrow[i]);
+      }
+    }
+
+    static const int MaxOrder = 2;
+
+    void oops_do(OopClosure* cl, int do_oop_order) {
+      switch(do_oop_order) {
+        case 0:
+          oops_do_narrow_then_full(cl);
+          break;
+        case 1:
+          oops_do_full_then_narrow(cl);
+          break;
+        case 2:
+          oops_do_mixed(cl);
+          break;
+        default:
+          oops_do_narrow_then_full(cl);
+          break;
+      }
+    }
+  };
+
+  class CountOopClosure : public OopClosure {
+    int _narrow_oop_count;
+    int _full_oop_count;
+   public:
+    CountOopClosure() : _narrow_oop_count(0), _full_oop_count(0) {}
+    void do_oop(narrowOop* p) {
+      assert((uintptr_t(p) & FakeRoots::NarrowOopMarker) != 0,
+          "The narrowOop was unexpectedly not marked with the NarrowOopMarker");
+      _narrow_oop_count++;
+    }
+
+    void do_oop(oop* p){
+      assert((uintptr_t(p) & FakeRoots::NarrowOopMarker) == 0,
+          "The oop was unexpectedly marked with the NarrowOopMarker");
+      _full_oop_count++;
+    }
+
+    int narrow_oop_count() { return _narrow_oop_count; }
+    int full_oop_count()   { return _full_oop_count; }
+    int all_oop_count()    { return _narrow_oop_count + _full_oop_count; }
+  };
+
+  class DoNothingOopClosure : public OopClosure {
+   public:
+    void do_oop(narrowOop* p) {}
+    void do_oop(oop* p)       {}
+  };
+
+  static void testCount(int num_narrow, int num_full, int do_oop_order) {
+    FakeRoots fr(num_narrow, num_full);
+
+    CountOopClosure coc;
+    BufferingOopClosure boc(&coc);
+
+    fr.oops_do(&boc, do_oop_order);
+
+    boc.done();
+
+    #define assert_testCount(got, expected)                                     \
+       assert((got) == (expected),                                              \
+           err_msg("Expected: %d, got: %d, when running testCount(%d, %d, %d)", \
+               (got), (expected), num_narrow, num_full, do_oop_order))
+
+    assert_testCount(num_narrow, coc.narrow_oop_count());
+    assert_testCount(num_full, coc.full_oop_count());
+    assert_testCount(num_narrow + num_full, coc.all_oop_count());
+  }
+
+  static void testCount() {
+    int buffer_length = BufferingOopClosure::BufferLength;
+
+    for (int order = 0; order < FakeRoots::MaxOrder; order++) {
+      testCount(0,                 0,                 order);
+      testCount(10,                0,                 order);
+      testCount(0,                 10,                order);
+      testCount(10,                10,                order);
+      testCount(buffer_length,     10,                order);
+      testCount(10,                buffer_length,     order);
+      testCount(buffer_length,     buffer_length,     order);
+      testCount(buffer_length + 1, 10,                order);
+      testCount(10,                buffer_length + 1, order);
+      testCount(buffer_length + 1, buffer_length,     order);
+      testCount(buffer_length,     buffer_length + 1, order);
+      testCount(buffer_length + 1, buffer_length + 1, order);
+    }
+  }
+
+  static void testIsBufferEmptyOrFull(int num_narrow, int num_full, bool expect_empty, bool expect_full) {
+    FakeRoots fr(num_narrow, num_full);
+
+    DoNothingOopClosure cl;
+    BufferingOopClosure boc(&cl);
+
+    fr.oops_do(&boc, 0);
+
+    #define assert_testIsBufferEmptyOrFull(got, expected)                             \
+        assert((got) == (expected),                                                   \
+            err_msg("Expected: %d, got: %d. testIsBufferEmptyOrFull(%d, %d, %s, %s)", \
+                (got), (expected), num_narrow, num_full,                              \
+                BOOL_TO_STR(expect_empty), BOOL_TO_STR(expect_full)))
+
+    assert_testIsBufferEmptyOrFull(expect_empty, boc.is_buffer_empty());
+    assert_testIsBufferEmptyOrFull(expect_full, boc.is_buffer_full());
+  }
+
+  static void testIsBufferEmptyOrFull() {
+    int bl = BufferingOopClosure::BufferLength;
+
+    testIsBufferEmptyOrFull(0,       0, true,  false);
+    testIsBufferEmptyOrFull(1,       0, false, false);
+    testIsBufferEmptyOrFull(0,       1, false, false);
+    testIsBufferEmptyOrFull(1,       1, false, false);
+    testIsBufferEmptyOrFull(10,      0, false, false);
+    testIsBufferEmptyOrFull(0,      10, false, false);
+    testIsBufferEmptyOrFull(10,     10, false, false);
+    testIsBufferEmptyOrFull(0,      bl, false, true);
+    testIsBufferEmptyOrFull(bl,      0, false, true);
+    testIsBufferEmptyOrFull(bl/2, bl/2, false, true);
+    testIsBufferEmptyOrFull(bl-1,    1, false, true);
+    testIsBufferEmptyOrFull(1,    bl-1, false, true);
+    // Processed
+    testIsBufferEmptyOrFull(bl+1,    0, false, false);
+    testIsBufferEmptyOrFull(bl*2,    0, false, true);
+  }
+
+  static void testEmptyAfterDone(int num_narrow, int num_full) {
+    FakeRoots fr(num_narrow, num_full);
+
+    DoNothingOopClosure cl;
+    BufferingOopClosure boc(&cl);
+
+    fr.oops_do(&boc, 0);
+
+    // Make sure all get processed.
+    boc.done();
+
+    assert(boc.is_buffer_empty(),
+        err_msg("Should be empty after call to done(). testEmptyAfterDone(%d, %d)",
+            num_narrow, num_full));
+  }
+
+  static void testEmptyAfterDone() {
+    int bl = BufferingOopClosure::BufferLength;
+
+    testEmptyAfterDone(0,       0);
+    testEmptyAfterDone(1,       0);
+    testEmptyAfterDone(0,       1);
+    testEmptyAfterDone(1,       1);
+    testEmptyAfterDone(10,      0);
+    testEmptyAfterDone(0,      10);
+    testEmptyAfterDone(10,     10);
+    testEmptyAfterDone(0,      bl);
+    testEmptyAfterDone(bl,      0);
+    testEmptyAfterDone(bl/2, bl/2);
+    testEmptyAfterDone(bl-1,    1);
+    testEmptyAfterDone(1,    bl-1);
+    // Processed
+    testEmptyAfterDone(bl+1,    0);
+    testEmptyAfterDone(bl*2,    0);
+  }
+
+  public:
+  static void test() {
+    testCount();
+    testIsBufferEmptyOrFull();
+    testEmptyAfterDone();
+  }
+};
+
+void TestBufferingOopClosure_test() {
+  TestBufferingOopClosure::test();
+}
+
+#endif
--- a/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/bufferingOopClosure.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,10 @@
 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP
 #define SHARE_VM_GC_IMPLEMENTATION_G1_BUFFERINGOOPCLOSURE_HPP
 
-#include "memory/genOopClosures.hpp"
-#include "memory/generation.hpp"
+#include "memory/iterator.hpp"
+#include "oops/oopsHierarchy.hpp"
 #include "runtime/os.hpp"
-#include "utilities/taskqueue.hpp"
+#include "utilities/debug.hpp"
 
 // A BufferingOops closure tries to separate out the cost of finding roots
 // from the cost of applying closures to them.  It maintains an array of
@@ -39,174 +39,105 @@
 // up, the wrapped closure is applied to all elements, keeping track of
 // this elapsed time of this process, and leaving the array empty.
 // The caller must be sure to call "done" to process any unprocessed
-// buffered entriess.
-
-class Generation;
-class HeapRegion;
+// buffered entries.
 
 class BufferingOopClosure: public OopClosure {
+  friend class TestBufferingOopClosure;
 protected:
-  enum PrivateConstants {
-    BufferLength = 1024
-  };
+  static const size_t BufferLength = 1024;
 
-  StarTask  _buffer[BufferLength];
-  StarTask* _buffer_top;
-  StarTask* _buffer_curr;
+  // We need to know if the buffered addresses contain oops or narrowOops.
+  // We can't tag the addresses the way StarTask does, because we need to
+  // be able to handle unaligned addresses coming from oops embedded in code.
+  //
+  // The addresses for the full-sized oops are filled in from the bottom,
+  // while the addresses for the narrowOops are filled in from the top.
+  OopOrNarrowOopStar  _buffer[BufferLength];
+  OopOrNarrowOopStar* _oop_top;
+  OopOrNarrowOopStar* _narrowOop_bottom;
 
   OopClosure* _oc;
   double      _closure_app_seconds;
 
-  void process_buffer () {
+
+  bool is_buffer_empty() {
+    return _oop_top == _buffer && _narrowOop_bottom == (_buffer + BufferLength - 1);
+  }
+
+  bool is_buffer_full() {
+    return _narrowOop_bottom < _oop_top;
+  }
+
+  // Process addresses containing full-sized oops.
+  void process_oops() {
+    for (OopOrNarrowOopStar* curr = _buffer; curr < _oop_top; ++curr) {
+      _oc->do_oop((oop*)(*curr));
+    }
+    _oop_top = _buffer;
+  }
+
+  // Process addresses containing narrow oops.
+  void process_narrowOops() {
+    for (OopOrNarrowOopStar* curr = _buffer + BufferLength - 1; curr > _narrowOop_bottom; --curr) {
+      _oc->do_oop((narrowOop*)(*curr));
+    }
+    _narrowOop_bottom = _buffer + BufferLength - 1;
+  }
+
+  // Apply the closure to all oops and clear the buffer.
+  // Accumulate the time it took.
+  void process_buffer() {
     double start = os::elapsedTime();
-    for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
-      if (curr->is_narrow()) {
-        assert(UseCompressedOops, "Error");
-        _oc->do_oop((narrowOop*)(*curr));
-      } else {
-        _oc->do_oop((oop*)(*curr));
-      }
-    }
-    _buffer_curr = _buffer;
+
+    process_oops();
+    process_narrowOops();
+
     _closure_app_seconds += (os::elapsedTime() - start);
   }
 
-  template <class T> inline void do_oop_work(T* p) {
-    if (_buffer_curr == _buffer_top) {
-      process_buffer();
-    }
-    StarTask new_ref(p);
-    *_buffer_curr = new_ref;
-    ++_buffer_curr;
-  }
-
-public:
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(oop* p)       { do_oop_work(p); }
-
-  void done () {
-    if (_buffer_curr > _buffer) {
+  void process_buffer_if_full() {
+    if (is_buffer_full()) {
       process_buffer();
     }
   }
-  double closure_app_seconds () {
-    return _closure_app_seconds;
-  }
-  BufferingOopClosure (OopClosure *oc) :
-    _oc(oc),
-    _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength),
-    _closure_app_seconds(0.0) { }
-};
 
-class BufferingOopsInGenClosure: public OopsInGenClosure {
-  BufferingOopClosure _boc;
-  OopsInGenClosure* _oc;
- protected:
-  template <class T> inline void do_oop_work(T* p) {
-    assert(generation()->is_in_reserved((void*)p), "Must be in!");
-    _boc.do_oop(p);
-  }
- public:
-  BufferingOopsInGenClosure(OopsInGenClosure *oc) :
-    _boc(oc), _oc(oc) {}
-
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(oop* p)       { do_oop_work(p); }
-
-  void done() {
-    _boc.done();
-  }
-
-  double closure_app_seconds () {
-    return _boc.closure_app_seconds();
-  }
-
-  void set_generation(Generation* gen) {
-    OopsInGenClosure::set_generation(gen);
-    _oc->set_generation(gen);
-  }
-
-  void reset_generation() {
-    // Make sure we finish the current work with the current generation.
-    _boc.done();
-    OopsInGenClosure::reset_generation();
-    _oc->reset_generation();
+  void add_narrowOop(narrowOop* p) {
+    assert(!is_buffer_full(), "Buffer should not be full");
+    *_narrowOop_bottom = (OopOrNarrowOopStar)p;
+    _narrowOop_bottom--;
   }
 
-};
-
-
-class BufferingOopsInHeapRegionClosure: public OopsInHeapRegionClosure {
-private:
-  enum PrivateConstants {
-    BufferLength = 1024
-  };
-
-  StarTask     _buffer[BufferLength];
-  StarTask*    _buffer_top;
-  StarTask*    _buffer_curr;
-
-  HeapRegion*  _hr_buffer[BufferLength];
-  HeapRegion** _hr_curr;
-
-  OopsInHeapRegionClosure*  _oc;
-  double                    _closure_app_seconds;
-
-  void process_buffer () {
-
-    assert((_hr_curr - _hr_buffer) == (_buffer_curr - _buffer),
-           "the two lengths should be the same");
-
-    double start = os::elapsedTime();
-    HeapRegion** hr_curr = _hr_buffer;
-    HeapRegion*  hr_prev = NULL;
-    for (StarTask* curr = _buffer; curr < _buffer_curr; ++curr) {
-      HeapRegion* region = *hr_curr;
-      if (region != hr_prev) {
-        _oc->set_region(region);
-        hr_prev = region;
-      }
-      if (curr->is_narrow()) {
-        assert(UseCompressedOops, "Error");
-        _oc->do_oop((narrowOop*)(*curr));
-      } else {
-        _oc->do_oop((oop*)(*curr));
-      }
-      ++hr_curr;
-    }
-    _buffer_curr = _buffer;
-    _hr_curr = _hr_buffer;
-    _closure_app_seconds += (os::elapsedTime() - start);
+  void add_oop(oop* p) {
+    assert(!is_buffer_full(), "Buffer should not be full");
+    *_oop_top = (OopOrNarrowOopStar)p;
+    _oop_top++;
   }
 
 public:
-  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
-  virtual void do_oop(      oop* p) { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) {
+    process_buffer_if_full();
+    add_narrowOop(p);
+  }
 
-  template <class T> void do_oop_work(T* p) {
-    if (_buffer_curr == _buffer_top) {
-      assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
-      process_buffer();
-    }
-    StarTask new_ref(p);
-    *_buffer_curr = new_ref;
-    ++_buffer_curr;
-    *_hr_curr = _from;
-    ++_hr_curr;
+  virtual void do_oop(oop* p)       {
+    process_buffer_if_full();
+    add_oop(p);
   }
-  void done () {
-    if (_buffer_curr > _buffer) {
-      assert(_hr_curr > _hr_buffer, "_hr_curr should be consistent with _buffer_curr");
+
+  void done() {
+    if (!is_buffer_empty()) {
       process_buffer();
     }
   }
-  double closure_app_seconds () {
+
+  double closure_app_seconds() {
     return _closure_app_seconds;
   }
-  BufferingOopsInHeapRegionClosure (OopsInHeapRegionClosure *oc) :
+
+  BufferingOopClosure(OopClosure *oc) :
     _oc(oc),
-    _buffer_curr(_buffer), _buffer_top(_buffer + BufferLength),
-    _hr_curr(_hr_buffer),
+    _oop_top(_buffer),
+    _narrowOop_bottom(_buffer + BufferLength - 1),
     _closure_app_seconds(0.0) { }
 };
 
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -33,7 +33,7 @@
   _threads(NULL), _n_threads(0),
   _hot_card_cache(g1h)
 {
-  // Ergomonically select initial concurrent refinement parameters
+  // Ergonomically select initial concurrent refinement parameters
   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
     FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
   }
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -44,8 +44,8 @@
   _vtime_accum(0.0)
 {
 
-  // Each thread has its own monitor. The i-th thread is responsible for signalling
-  // to thread i+1 if the number of buffers in the queue exceeds a threashold for this
+  // Each thread has its own monitor. The i-th thread is responsible for signaling
+  // to thread i+1 if the number of buffers in the queue exceeds a threshold for this
   // thread. Monitors are also used to wake up the threads during termination.
   // The 0th worker in notified by mutator threads and has a special monitor.
   // The last worker is used for young gen rset size sampling.
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -909,7 +909,7 @@
   }
 #endif
 
-  // Initialise marking structures. This has to be done in a STW phase.
+  // Initialize marking structures. This has to be done in a STW phase.
   reset();
 
   // For each region note start of marking.
@@ -923,8 +923,8 @@
 
   // If we force an overflow during remark, the remark operation will
   // actually abort and we'll restart concurrent marking. If we always
-  // force an oveflow during remark we'll never actually complete the
-  // marking phase. So, we initilize this here, at the start of the
+  // force an overflow during remark we'll never actually complete the
+  // marking phase. So, we initialize this here, at the start of the
   // cycle, so that at the remaining overflow number will decrease at
   // every remark and we'll eventually not need to cause one.
   force_overflow_stw()->init();
@@ -959,7 +959,7 @@
  *
  * Note, however, that this code is also used during remark and in
  * this case we should not attempt to leave / enter the STS, otherwise
- * we'll either hit an asseert (debug / fastdebug) or deadlock
+ * we'll either hit an assert (debug / fastdebug) or deadlock
  * (product). So we should only leave / enter the STS if we are
  * operating concurrently.
  *
@@ -1001,7 +1001,7 @@
       // task 0 is responsible for clearing the global data structures
       // We should be here because of an overflow. During STW we should
       // not clear the overflow flag since we rely on it being true when
-      // we exit this method to abort the pause and restart concurent
+      // we exit this method to abort the pause and restart concurrent
       // marking.
       reset_marking_state(true /* clear_overflow */);
       force_overflow()->update();
@@ -1251,7 +1251,7 @@
   CMConcurrentMarkingTask markingTask(this, cmThread());
   if (use_parallel_marking_threads()) {
     _parallel_workers->set_active_workers((int)active_workers);
-    // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
+    // Don't set _n_par_threads because it affects MT in process_strong_roots()
     // and the decisions on that MT processing is made elsewhere.
     assert(_parallel_workers->active_workers() > 0, "Should have been set");
     _parallel_workers->run_task(&markingTask);
@@ -1484,7 +1484,7 @@
     }
 
     // Set the marked bytes for the current region so that
-    // it can be queried by a calling verificiation routine
+    // it can be queried by a calling verification routine
     _region_marked_bytes = marked_bytes;
 
     return false;
@@ -1619,7 +1619,6 @@
   }
 };
 
-
 class G1ParVerifyFinalCountTask: public AbstractGangTask {
 protected:
   G1CollectedHeap* _g1h;
@@ -2307,7 +2306,7 @@
       // oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
       //
       // CMTask::do_marking_step() is called in a loop, which we'll exit
-      // if there's nothing more to do (i.e. we'completely drained the
+      // if there's nothing more to do (i.e. we've completely drained the
       // entries that were pushed as a a result of applying the 'keep alive'
       // closure to the entries on the discovered ref lists) or we overflow
       // the global marking stack.
@@ -2470,7 +2469,7 @@
     // reference processing is not multi-threaded and is thus
     // performed by the current thread instead of a gang worker).
     //
-    // The gang tasks involved in parallel reference procssing create
+    // The gang tasks involved in parallel reference processing create
     // their own instances of these closures, which do their own
     // synchronization among themselves.
     G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
@@ -2529,10 +2528,9 @@
     assert(!rp->discovery_enabled(), "Post condition");
   }
 
-  // Now clean up stale oops in StringTable
-  StringTable::unlink(&g1_is_alive);
-  // Clean up unreferenced symbols in symbol table.
-  SymbolTable::unlink();
+  g1h->unlink_string_and_symbol_table(&g1_is_alive,
+                                      /* process_strings */ false, // currently strings are always roots
+                                      /* process_symbols */ true);
 }
 
 void ConcurrentMark::swapMarkBitMaps() {
@@ -2548,7 +2546,7 @@
 public:
   void work(uint worker_id) {
     // Since all available tasks are actually started, we should
-    // only proceed if we're supposed to be actived.
+    // only proceed if we're supposed to be active.
     if (worker_id < _cm->active_tasks()) {
       CMTask* task = _cm->task(worker_id);
       task->record_start_time();
@@ -3068,7 +3066,7 @@
 
     // 'start' should be in the heap.
     assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
-    // 'end' *may* be just beyone the end of the heap (if hr is the last region)
+    // 'end' *may* be just beyond the end of the heap (if hr is the last region)
     assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
 
     BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
@@ -4416,7 +4414,7 @@
       // overflow was raised. This means we have to restart the
       // marking phase and start iterating over regions. However, in
       // order to do this we have to make sure that all tasks stop
-      // what they are doing and re-initialise in a safe manner. We
+      // what they are doing and re-initialize in a safe manner. We
       // will achieve this with the use of two barrier sync points.
 
       if (_cm->verbose_low()) {
@@ -4430,7 +4428,7 @@
 
         // When we exit this sync barrier we know that all tasks have
         // stopped doing marking work. So, it's now safe to
-        // re-initialise our data structures. At the end of this method,
+        // re-initialize our data structures. At the end of this method,
         // task 0 will clear the global data structures.
       }
 
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -378,19 +378,19 @@
   friend class G1CMDrainMarkingStackClosure;
 
 protected:
-  ConcurrentMarkThread* _cmThread;   // the thread doing the work
-  G1CollectedHeap*      _g1h;        // the heap.
-  uint                  _parallel_marking_threads; // the number of marking
-                                                   // threads we're use
-  uint                  _max_parallel_marking_threads; // max number of marking
-                                                   // threads we'll ever use
-  double                _sleep_factor; // how much we have to sleep, with
+  ConcurrentMarkThread* _cmThread;   // The thread doing the work
+  G1CollectedHeap*      _g1h;        // The heap
+  uint                  _parallel_marking_threads; // The number of marking
+                                                   // threads we're using
+  uint                  _max_parallel_marking_threads; // Max number of marking
+                                                       // threads we'll ever use
+  double                _sleep_factor; // How much we have to sleep, with
                                        // respect to the work we just did, to
                                        // meet the marking overhead goal
-  double                _marking_task_overhead; // marking target overhead for
+  double                _marking_task_overhead; // Marking target overhead for
                                                 // a single task
 
-  // same as the two above, but for the cleanup task
+  // Same as the two above, but for the cleanup task
   double                _cleanup_sleep_factor;
   double                _cleanup_task_overhead;
 
@@ -399,8 +399,8 @@
   // Concurrent marking support structures
   CMBitMap                _markBitMap1;
   CMBitMap                _markBitMap2;
-  CMBitMapRO*             _prevMarkBitMap; // completed mark bitmap
-  CMBitMap*               _nextMarkBitMap; // under-construction mark bitmap
+  CMBitMapRO*             _prevMarkBitMap; // Completed mark bitmap
+  CMBitMap*               _nextMarkBitMap; // Under-construction mark bitmap
 
   BitMap                  _region_bm;
   BitMap                  _card_bm;
@@ -409,43 +409,43 @@
   HeapWord*               _heap_start;
   HeapWord*               _heap_end;
 
-  // Root region tracking and claiming.
+  // Root region tracking and claiming
   CMRootRegions           _root_regions;
 
   // For gray objects
-  CMMarkStack             _markStack; // Grey objects behind global finger.
-  HeapWord* volatile      _finger;  // the global finger, region aligned,
+  CMMarkStack             _markStack; // Grey objects behind global finger
+  HeapWord* volatile      _finger;  // The global finger, region aligned,
                                     // always points to the end of the
                                     // last claimed region
 
-  // marking tasks
-  uint                    _max_worker_id;// maximum worker id
-  uint                    _active_tasks; // task num currently active
-  CMTask**                _tasks;        // task queue array (max_worker_id len)
-  CMTaskQueueSet*         _task_queues;  // task queue set
-  ParallelTaskTerminator  _terminator;   // for termination
+  // Marking tasks
+  uint                    _max_worker_id;// Maximum worker id
+  uint                    _active_tasks; // Task num currently active
+  CMTask**                _tasks;        // Task queue array (max_worker_id len)
+  CMTaskQueueSet*         _task_queues;  // Task queue set
+  ParallelTaskTerminator  _terminator;   // For termination
 
-  // Two sync barriers that are used to synchronise tasks when an
+  // Two sync barriers that are used to synchronize tasks when an
   // overflow occurs. The algorithm is the following. All tasks enter
   // the first one to ensure that they have all stopped manipulating
-  // the global data structures. After they exit it, they re-initialise
-  // their data structures and task 0 re-initialises the global data
+  // the global data structures. After they exit it, they re-initialize
+  // their data structures and task 0 re-initializes the global data
   // structures. Then, they enter the second sync barrier. This
   // ensure, that no task starts doing work before all data
-  // structures (local and global) have been re-initialised. When they
+  // structures (local and global) have been re-initialized. When they
   // exit it, they are free to start working again.
   WorkGangBarrierSync     _first_overflow_barrier_sync;
   WorkGangBarrierSync     _second_overflow_barrier_sync;
 
-  // this is set by any task, when an overflow on the global data
-  // structures is detected.
+  // This is set by any task, when an overflow on the global data
+  // structures is detected
   volatile bool           _has_overflown;
-  // true: marking is concurrent, false: we're in remark
+  // True: marking is concurrent, false: we're in remark
   volatile bool           _concurrent;
-  // set at the end of a Full GC so that marking aborts
+  // Set at the end of a Full GC so that marking aborts
   volatile bool           _has_aborted;
 
-  // used when remark aborts due to an overflow to indicate that
+  // Used when remark aborts due to an overflow to indicate that
   // another concurrent marking phase should start
   volatile bool           _restart_for_overflow;
 
@@ -455,10 +455,10 @@
   // time of remark.
   volatile bool           _concurrent_marking_in_progress;
 
-  // verbose level
+  // Verbose level
   CMVerboseLevel          _verbose_level;
 
-  // All of these times are in ms.
+  // All of these times are in ms
   NumberSeq _init_times;
   NumberSeq _remark_times;
   NumberSeq   _remark_mark_times;
@@ -467,7 +467,7 @@
   double    _total_counting_time;
   double    _total_rs_scrub_time;
 
-  double*   _accum_task_vtime;   // accumulated task vtime
+  double*   _accum_task_vtime;   // Accumulated task vtime
 
   FlexibleWorkGang* _parallel_workers;
 
@@ -487,7 +487,7 @@
   void reset_marking_state(bool clear_overflow = true);
 
   // We do this after we're done with marking so that the marking data
-  // structures are initialised to a sensible and predictable state.
+  // structures are initialized to a sensible and predictable state.
   void set_non_marking_state();
 
   // Called to indicate how many threads are currently active.
@@ -497,14 +497,14 @@
   // mark or remark) and how many threads are currently active.
   void set_concurrency_and_phase(uint active_tasks, bool concurrent);
 
-  // prints all gathered CM-related statistics
+  // Prints all gathered CM-related statistics
   void print_stats();
 
   bool cleanup_list_is_empty() {
     return _cleanup_list.is_empty();
   }
 
-  // accessor methods
+  // Accessor methods
   uint parallel_marking_threads() const     { return _parallel_marking_threads; }
   uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
   double sleep_factor()                     { return _sleep_factor; }
@@ -542,7 +542,7 @@
   // frequently.
   HeapRegion* claim_region(uint worker_id);
 
-  // It determines whether we've run out of regions to scan.
+  // It determines whether we've run out of regions to scan
   bool        out_of_regions() { return _finger == _heap_end; }
 
   // Returns the task with the given id
@@ -816,7 +816,7 @@
   inline bool do_yield_check(uint worker_i = 0);
   inline bool should_yield();
 
-  // Called to abort the marking cycle after a Full GC takes palce.
+  // Called to abort the marking cycle after a Full GC takes place.
   void abort();
 
   bool has_aborted()      { return _has_aborted; }
@@ -933,11 +933,11 @@
 
   // Similar to the above routine but there are times when we cannot
   // safely calculate the size of obj due to races and we, therefore,
-  // pass the size in as a parameter. It is the caller's reponsibility
+  // pass the size in as a parameter. It is the caller's responsibility
   // to ensure that the size passed in for obj is valid.
   inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
 
-  // Unconditionally mark the given object, and unconditinally count
+  // Unconditionally mark the given object, and unconditionally count
   // the object in the counting structures for worker id 0.
   // Should *not* be called from parallel code.
   inline bool mark_and_count(oop obj, HeapRegion* hr);
--- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -105,7 +105,7 @@
   // will then correspond to a (non-existent) card that is also
   // just beyond the heap.
   if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) {
-    // end of region is not card aligned - incremement to cover
+    // end of region is not card aligned - increment to cover
     // all the cards spanned by the region.
     end_idx += 1;
   }
@@ -222,7 +222,7 @@
   return false;
 }
 
-// Unconditionally mark the given object, and unconditinally count
+// Unconditionally mark the given object, and unconditionally count
 // the object in the counting structures for worker id 0.
 // Should *not* be called from parallel code.
 inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -70,7 +70,7 @@
 
 inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size,
                                                           bool bot_updates) {
-  // First we have to tedo the allocation, assuming we're holding the
+  // First we have to redo the allocation, assuming we're holding the
   // appropriate lock, in case another thread changed the region while
   // we were waiting to get the lock.
   HeapWord* result = attempt_allocation(word_size, bot_updates);
--- a/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -79,7 +79,7 @@
     assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
       err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
         mapping_granularity_in_bytes, end));
-    size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
+    size_t num_target_elems = pointer_delta(end, bottom, mapping_granularity_in_bytes);
     idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
     address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
     initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -448,7 +448,7 @@
 
   // Otherwise, find the block start using the table, but taking
   // care (cf block_start_unsafe() above) not to parse any objects/blocks
-  // on the cards themsleves.
+  // on the cards themselves.
   size_t index = _array->index_for(addr);
   assert(_array->address_for_index(index) == addr,
          "arg should be start of card");
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -169,7 +169,7 @@
 
     // We use the last address in hr as hr could be the
     // last region in the heap. In which case trying to find
-    // the card for hr->end() will be an OOB accesss to the
+    // the card for hr->end() will be an OOB access to the
     // card table.
     HeapWord* last = hr->end() - 1;
     assert(_g1h->g1_committed().contains(last),
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,8 +50,8 @@
 #include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "memory/gcLocker.inline.hpp"
-#include "memory/genOopClosures.inline.hpp"
 #include "memory/generationSpec.hpp"
+#include "memory/iterator.hpp"
 #include "memory/referenceProcessor.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
@@ -1575,8 +1575,6 @@
 void
 G1CollectedHeap::
 resize_if_necessary_after_full_collection(size_t word_size) {
-  assert(MinHeapFreeRatio <= MaxHeapFreeRatio, "sanity check");
-
   // Include the current allocation, if any, and bytes that will be
   // pre-allocated to support collections, as "used".
   const size_t used_after_gc = used();
@@ -2268,7 +2266,7 @@
                                 // (for efficiency/performance)
                            false);
                                 // Setting next fields of discovered
-                                // lists requires a barrier.
+                                // lists does not require a barrier.
 }
 
 size_t G1CollectedHeap::capacity() const {
@@ -2376,25 +2374,6 @@
   return blk.result();
 }
 
-size_t G1CollectedHeap::unsafe_max_alloc() {
-  if (free_regions() > 0) return HeapRegion::GrainBytes;
-  // otherwise, is there space in the current allocation region?
-
-  // We need to store the current allocation region in a local variable
-  // here. The problem is that this method doesn't take any locks and
-  // there may be other threads which overwrite the current allocation
-  // region field. attempt_allocation(), for example, sets it to NULL
-  // and this can happen *after* the NULL check here but before the call
-  // to free(), resulting in a SIGSEGV. Note that this doesn't appear
-  // to be a problem in the optimized build, since the two loads of the
-  // current allocation region field are optimized away.
-  HeapRegion* hr = _mutator_alloc_region.get();
-  if (hr == NULL) {
-    return 0;
-  }
-  return hr->free();
-}
-
 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
   switch (cause) {
     case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
@@ -3025,7 +3004,17 @@
 }
 
 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
-  return HeapRegion::GrainBytes;
+  return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
+}
+
+size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
+  return young_list()->eden_used_bytes();
+}
+
+// For G1 TLABs should not contain humongous objects, so the maximum TLAB size
+// must be smaller than the humongous object limit.
+size_t G1CollectedHeap::max_tlab_size() const {
+  return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
 }
 
 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
@@ -3037,11 +3026,11 @@
   // humongous objects.
 
   HeapRegion* hr = _mutator_alloc_region.get();
-  size_t max_tlab_size = _humongous_object_threshold_in_words * wordSize;
+  size_t max_tlab = max_tlab_size() * wordSize;
   if (hr == NULL) {
-    return max_tlab_size;
+    return max_tlab;
   } else {
-    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size);
+    return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
   }
 }
 
@@ -3106,11 +3095,7 @@
   return NULL; // keep some compilers happy
 }
 
-// TODO: VerifyRootsClosure extends OopsInGenClosure so that we can
-//       pass it as the perm_blk to SharedHeap::process_strong_roots.
-//       When process_strong_roots stop calling perm_blk->younger_refs_iterate
-//       we can change this closure to extend the simpler OopClosure.
-class VerifyRootsClosure: public OopsInGenClosure {
+class VerifyRootsClosure: public OopClosure {
 private:
   G1CollectedHeap* _g1h;
   VerifyOption     _vo;
@@ -3146,7 +3131,7 @@
   void do_oop(narrowOop* p) { do_oop_nv(p); }
 };
 
-class G1VerifyCodeRootOopClosure: public OopsInGenClosure {
+class G1VerifyCodeRootOopClosure: public OopClosure {
   G1CollectedHeap* _g1h;
   OopClosure* _root_cl;
   nmethod* _nm;
@@ -3419,26 +3404,27 @@
 
     if (!silent) { gclog_or_tty->print("Roots "); }
     VerifyRootsClosure rootsCl(vo);
-    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
-    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
     VerifyKlassClosure klassCl(this, &rootsCl);
 
     // We apply the relevant closures to all the oops in the
-    // system dictionary, the string table and the code cache.
-    const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
+    // system dictionary, class loader data graph and the string table.
+    // Don't verify the code cache here, since it's verified below.
+    const int so = SO_AllClasses | SO_Strings;
 
     // Need cleared claim bits for the strong roots processing
     ClassLoaderDataGraph::clear_claimed_marks();
 
     process_strong_roots(true,      // activate StrongRootsScope
-                         false,     // we set "is scavenging" to false,
-                                    // so we don't reset the dirty cards.
                          ScanningOption(so),  // roots scanning options
                          &rootsCl,
-                         &blobsCl,
                          &klassCl
                          );
 
+    // Verify the nmethods in the code cache.
+    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
+    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
+    CodeCache::blobs_do(&blobsCl);
+
     bool failures = rootsCl.failures() || codeRootsCl.failures();
 
     if (vo != VerifyOption_G1UseMarkWord) {
@@ -3684,6 +3670,7 @@
   // always_do_update_barrier = false;
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
   // Fill TLAB's and such
+  accumulate_statistics_all_tlabs();
   ensure_parsability(true);
 
   if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
@@ -3708,6 +3695,8 @@
                         "derived pointer present"));
   // always_do_update_barrier = true;
 
+  resize_all_tlabs();
+
   // We have just completed a GC. Update the soft reference
   // policy with the new heap occupancy
   Universe::update_heap_info_at_gc();
@@ -4555,7 +4544,7 @@
 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
   ParGCAllocBuffer(gclab_word_size), _retired(false) { }
 
-G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num)
+G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
   : _g1h(g1h),
     _refs(g1h->task_queue(queue_num)),
     _dcq(&g1h->dirty_card_queue_set()),
@@ -4565,7 +4554,7 @@
     _term_attempts(0),
     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
-    _age_table(false),
+    _age_table(false), _scanner(g1h, this, rp),
     _strong_roots_time(0), _term_time(0),
     _alloc_buffer_waste(0), _undo_waste(0) {
   // we allocate G1YoungSurvRateNumRegions plus one entries, since
@@ -4674,14 +4663,10 @@
 
 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
                                      G1ParScanThreadState* par_scan_state) :
-  _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
-  _par_scan_state(par_scan_state),
-  _worker_id(par_scan_state->queue_num()),
-  _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
-  _mark_in_progress(_g1->mark_in_progress()) { }
-
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
-void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>::mark_object(oop obj) {
+  _g1(g1), _par_scan_state(par_scan_state),
+  _worker_id(par_scan_state->queue_num()) { }
+
+void G1ParCopyHelper::mark_object(oop obj) {
 #ifdef ASSERT
   HeapRegion* hr = _g1->heap_region_containing(obj);
   assert(hr != NULL, "sanity");
@@ -4692,9 +4677,7 @@
   _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
 }
 
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
-void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
-  ::mark_forwarded_object(oop from_obj, oop to_obj) {
+void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
 #ifdef ASSERT
   assert(from_obj->is_forwarded(), "from obj should be forwarded");
   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
@@ -4716,27 +4699,25 @@
   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
 }
 
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
-oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
-  ::copy_to_survivor_space(oop old) {
+oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
   size_t word_sz = old->size();
-  HeapRegion* from_region = _g1->heap_region_containing_raw(old);
+  HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
   // +1 to make the -1 indexes valid...
   int       young_index = from_region->young_index_in_cset()+1;
   assert( (from_region->is_young() && young_index >  0) ||
          (!from_region->is_young() && young_index == 0), "invariant" );
-  G1CollectorPolicy* g1p = _g1->g1_policy();
+  G1CollectorPolicy* g1p = _g1h->g1_policy();
   markOop m = old->mark();
   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
                                            : m->age();
   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
                                                              word_sz);
-  HeapWord* obj_ptr = _par_scan_state->allocate(alloc_purpose, word_sz);
+  HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
 #ifndef PRODUCT
   // Should this evacuation fail?
-  if (_g1->evacuation_should_fail()) {
+  if (_g1h->evacuation_should_fail()) {
     if (obj_ptr != NULL) {
-      _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
+      undo_allocation(alloc_purpose, obj_ptr, word_sz);
       obj_ptr = NULL;
     }
   }
@@ -4745,7 +4726,7 @@
   if (obj_ptr == NULL) {
     // This will either forward-to-self, or detect that someone else has
     // installed a forwarding pointer.
-    return _g1->handle_evacuation_failure_par(_par_scan_state, old);
+    return _g1h->handle_evacuation_failure_par(this, old);
   }
 
   oop obj = oop(obj_ptr);
@@ -4778,12 +4759,12 @@
         m = m->incr_age();
         obj->set_mark(m);
       }
-      _par_scan_state->age_table()->add(obj, word_sz);
+      age_table()->add(obj, word_sz);
     } else {
       obj->set_mark(m);
     }
 
-    size_t* surv_young_words = _par_scan_state->surviving_young_words();
+    size_t* surv_young_words = surviving_young_words();
     surv_young_words[young_index] += word_sz;
 
     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
@@ -4792,15 +4773,15 @@
       // length field of the from-space object.
       arrayOop(obj)->set_length(0);
       oop* old_p = set_partial_array_mask(old);
-      _par_scan_state->push_on_queue(old_p);
+      push_on_queue(old_p);
     } else {
       // No point in using the slower heap_region_containing() method,
       // given that we know obj is in the heap.
-      _scanner.set_region(_g1->heap_region_containing_raw(obj));
+      _scanner.set_region(_g1h->heap_region_containing_raw(obj));
       obj->oop_iterate_backwards(&_scanner);
     }
   } else {
-    _par_scan_state->undo_allocation(alloc_purpose, obj_ptr, word_sz);
+    undo_allocation(alloc_purpose, obj_ptr, word_sz);
     obj = forward_ptr;
   }
   return obj;
@@ -4813,23 +4794,25 @@
   }
 }
 
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
+template <G1Barrier barrier, bool do_mark_object>
 template <class T>
-void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
-::do_oop_work(T* p) {
-  oop obj = oopDesc::load_decode_heap_oop(p);
-  assert(barrier != G1BarrierRS || obj != NULL,
-         "Precondition: G1BarrierRS implies obj is non-NULL");
+void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+
+  if (oopDesc::is_null(heap_oop)) {
+    return;
+  }
+
+  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
 
   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
 
-  // here the null check is implicit in the cset_fast_test() test
   if (_g1->in_cset_fast_test(obj)) {
     oop forwardee;
     if (obj->is_forwarded()) {
       forwardee = obj->forwardee();
     } else {
-      forwardee = copy_to_survivor_space(obj);
+      forwardee = _par_scan_state->copy_to_survivor_space(obj);
     }
     assert(forwardee != NULL, "forwardee should not be NULL");
     oopDesc::encode_store_heap_oop(p, forwardee);
@@ -4839,32 +4822,25 @@
       mark_forwarded_object(obj, forwardee);
     }
 
-    // When scanning the RS, we only care about objs in CS.
-    if (barrier == G1BarrierRS) {
-      _par_scan_state->update_rs(_from, p, _worker_id);
-    } else if (barrier == G1BarrierKlass) {
+    if (barrier == G1BarrierKlass) {
       do_klass_barrier(p, forwardee);
     }
   } else {
     // The object is not in collection set. If we're a root scanning
     // closure during an initial mark pause (i.e. do_mark_object will
     // be true) then attempt to mark the object.
-    if (do_mark_object && _g1->is_in_g1_reserved(obj)) {
+    if (do_mark_object) {
       mark_object(obj);
     }
   }
 
-  if (barrier == G1BarrierEvac && obj != NULL) {
+  if (barrier == G1BarrierEvac) {
     _par_scan_state->update_rs(_from, p, _worker_id);
   }
-
-  if (do_gen_barrier && obj != NULL) {
-    par_do_barrier(p);
-  }
-}
-
-template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p);
-template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(narrowOop* p);
+}
+
+template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(oop* p);
+template void G1ParCopyClosure<G1BarrierEvac, false>::do_oop_work(narrowOop* p);
 
 template <class T> void G1ParScanPartialArrayClosure::do_oop_nv(T* p) {
   assert(has_partial_array_mask(p), "invariant");
@@ -5055,7 +5031,7 @@
 
       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
 
-      G1ParScanThreadState            pss(_g1h, worker_id);
+      G1ParScanThreadState            pss(_g1h, worker_id, rp);
       G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
       G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
@@ -5148,15 +5124,9 @@
 
   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
 
-  assert(so & SO_CodeCache || scan_rs != NULL, "must scan code roots somehow");
-  // Walk the code cache/strong code roots w/o buffering, because StarTask
-  // cannot handle unaligned oop locations.
-  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
-
   process_strong_roots(false, // no scoping; this is parallel code
-                       is_scavenging, so,
+                       so,
                        &buf_scan_non_heap_roots,
-                       &eager_scan_code_roots,
                        scan_klasses
                        );
 
@@ -5202,7 +5172,7 @@
   // the collection set.
   // Note all threads participate in this set of root tasks.
   double mark_strong_code_roots_ms = 0.0;
-  if (g1_policy()->during_initial_mark_pause() && !(so & SO_CodeCache)) {
+  if (g1_policy()->during_initial_mark_pause() && !(so & SO_AllCodeCache)) {
     double mark_strong_roots_start = os::elapsedTime();
     mark_strong_code_roots(worker_i);
     mark_strong_code_roots_ms = (os::elapsedTime() - mark_strong_roots_start) * 1000.0;
@@ -5210,16 +5180,106 @@
   g1_policy()->phase_times()->record_strong_code_root_mark_time(worker_i, mark_strong_code_roots_ms);
 
   // Now scan the complement of the collection set.
-  if (scan_rs != NULL) {
-    g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
-  }
+  CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, true /* do_marking */);
+  g1_rem_set()->oops_into_collection_set_do(scan_rs, &eager_scan_code_roots, worker_i);
+
   _process_strong_tasks->all_tasks_completed();
 }
 
-void
-G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
-  CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
-  SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
+class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
+private:
+  BoolObjectClosure* _is_alive;
+  int _initial_string_table_size;
+  int _initial_symbol_table_size;
+
+  bool  _process_strings;
+  int _strings_processed;
+  int _strings_removed;
+
+  bool  _process_symbols;
+  int _symbols_processed;
+  int _symbols_removed;
+
+  bool _do_in_parallel;
+public:
+  G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
+    AbstractGangTask("Par String/Symbol table unlink"), _is_alive(is_alive),
+    _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),
+    _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
+    _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {
+
+    _initial_string_table_size = StringTable::the_table()->table_size();
+    _initial_symbol_table_size = SymbolTable::the_table()->table_size();
+    if (process_strings) {
+      StringTable::clear_parallel_claimed_index();
+    }
+    if (process_symbols) {
+      SymbolTable::clear_parallel_claimed_index();
+    }
+  }
+
+  ~G1StringSymbolTableUnlinkTask() {
+    guarantee(!_process_strings || !_do_in_parallel || StringTable::parallel_claimed_index() >= _initial_string_table_size,
+              err_msg("claim value "INT32_FORMAT" after unlink less than initial string table size "INT32_FORMAT,
+                      StringTable::parallel_claimed_index(), _initial_string_table_size));
+    guarantee(!_process_symbols || !_do_in_parallel || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size,
+              err_msg("claim value "INT32_FORMAT" after unlink less than initial symbol table size "INT32_FORMAT,
+                      SymbolTable::parallel_claimed_index(), _initial_symbol_table_size));
+  }
+
+  void work(uint worker_id) {
+    if (_do_in_parallel) {
+      int strings_processed = 0;
+      int strings_removed = 0;
+      int symbols_processed = 0;
+      int symbols_removed = 0;
+      if (_process_strings) {
+        StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed);
+        Atomic::add(strings_processed, &_strings_processed);
+        Atomic::add(strings_removed, &_strings_removed);
+      }
+      if (_process_symbols) {
+        SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed);
+        Atomic::add(symbols_processed, &_symbols_processed);
+        Atomic::add(symbols_removed, &_symbols_removed);
+      }
+    } else {
+      if (_process_strings) {
+        StringTable::unlink(_is_alive, &_strings_processed, &_strings_removed);
+      }
+      if (_process_symbols) {
+        SymbolTable::unlink(&_symbols_processed, &_symbols_removed);
+      }
+    }
+  }
+
+  size_t strings_processed() const { return (size_t)_strings_processed; }
+  size_t strings_removed()   const { return (size_t)_strings_removed; }
+
+  size_t symbols_processed() const { return (size_t)_symbols_processed; }
+  size_t symbols_removed()   const { return (size_t)_symbols_removed; }
+};
+
+void G1CollectedHeap::unlink_string_and_symbol_table(BoolObjectClosure* is_alive,
+                                                     bool process_strings, bool process_symbols) {
+  uint n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+                   _g1h->workers()->active_workers() : 1);
+
+  G1StringSymbolTableUnlinkTask g1_unlink_task(is_alive, process_strings, process_symbols);
+  if (G1CollectedHeap::use_parallel_gc_threads()) {
+    set_par_threads(n_workers);
+    workers()->run_task(&g1_unlink_task);
+    set_par_threads(0);
+  } else {
+    g1_unlink_task.work(0);
+  }
+  if (G1TraceStringSymbolTableScrubbing) {
+    gclog_or_tty->print_cr("Cleaned string and symbol table, "
+                           "strings: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed, "
+                           "symbols: "SIZE_FORMAT" processed, "SIZE_FORMAT" removed",
+                           g1_unlink_task.strings_processed(), g1_unlink_task.strings_removed(),
+                           g1_unlink_task.symbols_processed(), g1_unlink_task.symbols_removed());
+  }
 }
 
 // Weak Reference Processing support
@@ -5402,7 +5462,7 @@
 
     G1STWIsAliveClosure is_alive(_g1h);
 
-    G1ParScanThreadState pss(_g1h, worker_id);
+    G1ParScanThreadState            pss(_g1h, worker_id, NULL);
 
     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
@@ -5514,7 +5574,7 @@
     ResourceMark rm;
     HandleMark   hm;
 
-    G1ParScanThreadState            pss(_g1h, worker_id);
+    G1ParScanThreadState            pss(_g1h, worker_id, NULL);
     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
@@ -5640,7 +5700,7 @@
   // JNI refs.
 
   // Use only a single queue for this PSS.
-  G1ParScanThreadState pss(this, 0);
+  G1ParScanThreadState            pss(this, 0, NULL);
 
   // We do not embed a reference processor in the copying/scanning
   // closures while we're actually processing the discovered
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -209,7 +209,7 @@
   friend class OldGCAllocRegion;
 
   // Closures used in implementation.
-  template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
+  template <G1Barrier barrier, bool do_mark_object>
   friend class G1ParCopyClosure;
   friend class G1IsAliveClosure;
   friend class G1EvacuateFollowersClosure;
@@ -606,6 +606,11 @@
   // may not be a humongous - it must fit into a single heap region.
   HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
 
+  HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
+                                    HeapRegion*    alloc_region,
+                                    bool           par,
+                                    size_t         word_size);
+
   // Ensure that no further allocations can happen in "r", bearing in mind
   // that parallel threads might be attempting allocations.
   void par_allocate_remaining_space(HeapRegion* r);
@@ -703,23 +708,20 @@
   }
 
   // This is a fast test on whether a reference points into the
-  // collection set or not. It does not assume that the reference
-  // points into the heap; if it doesn't, it will return false.
+  // collection set or not. Assume that the reference
+  // points into the heap.
   bool in_cset_fast_test(oop obj) {
     assert(_in_cset_fast_test != NULL, "sanity");
-    if (_g1_committed.contains((HeapWord*) obj)) {
-      // no need to subtract the bottom of the heap from obj,
-      // _in_cset_fast_test is biased
-      uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
-      bool ret = _in_cset_fast_test[index];
-      // let's make sure the result is consistent with what the slower
-      // test returns
-      assert( ret || !obj_in_cs(obj), "sanity");
-      assert(!ret ||  obj_in_cs(obj), "sanity");
-      return ret;
-    } else {
-      return false;
-    }
+    assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
+    // no need to subtract the bottom of the heap from obj,
+    // _in_cset_fast_test is biased
+    uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
+    bool ret = _in_cset_fast_test[index];
+    // let's make sure the result is consistent with what the slower
+    // test returns
+    assert( ret || !obj_in_cs(obj), "sanity");
+    assert(!ret ||  obj_in_cs(obj), "sanity");
+    return ret;
   }
 
   void clear_cset_fast_test() {
@@ -838,11 +840,6 @@
                                G1KlassScanClosure* scan_klasses,
                                int worker_i);
 
-  // Apply "blk" to all the weak roots of the system.  These include
-  // JNI weak roots, the code cache, system dictionary, symbol table,
-  // string table, and referents of reachable weak refs.
-  void g1_process_weak_roots(OopClosure* root_closure);
-
   // Frees a non-humongous region by initializing its contents and
   // adding it to the free list that's passed as a parameter (this is
   // usually a local list which will be appended to the master free
@@ -1188,15 +1185,6 @@
   // end fields defining the extent of the contiguous allocation region.)
   // But G1CollectedHeap doesn't yet support this.
 
-  // Return an estimate of the maximum allocation that could be performed
-  // without triggering any collection or expansion activity.  In a
-  // generational collector, for example, this is probably the largest
-  // allocation that could be supported (without expansion) in the youngest
-  // generation.  It is "unsafe" because no locks are taken; the result
-  // should be treated as an approximation, not a guarantee, for use in
-  // heuristic resizing decisions.
-  virtual size_t unsafe_max_alloc();
-
   virtual bool is_maximal_no_gc() const {
     return _g1_storage.uncommitted_size() == 0;
   }
@@ -1387,7 +1375,7 @@
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
   // overpartition factor, currently 4).  Assumes that this will be called
-  // in parallel by ParallelGCThreads worker threads with discinct worker
+  // in parallel by ParallelGCThreads worker threads with distinct worker
   // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
   // calls will use the same "claim_value", and that that claim value is
   // different from the claim_value of any heap region before the start of
@@ -1484,9 +1472,11 @@
   // Section on thread-local allocation buffers (TLABs)
   // See CollectedHeap for semantics.
 
-  virtual bool supports_tlab_allocation() const;
-  virtual size_t tlab_capacity(Thread* thr) const;
-  virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
+  bool supports_tlab_allocation() const;
+  size_t tlab_capacity(Thread* ignored) const;
+  size_t tlab_used(Thread* ignored) const;
+  size_t max_tlab_size() const;
+  size_t unsafe_max_tlab_alloc(Thread* ignored) const;
 
   // Can a compiler initialize a new object without store barriers?
   // This permission only extends from the creation of a new object
@@ -1532,7 +1522,7 @@
   // Returns "true" iff the given word_size is "very large".
   static bool isHumongous(size_t word_size) {
     // Note this has to be strictly greater-than as the TLABs
-    // are capped at the humongous thresold and we want to
+    // are capped at the humongous threshold and we want to
     // ensure that we don't try to allocate a TLAB as
     // humongous and that we don't allocate a humongous
     // object in a TLAB.
@@ -1571,7 +1561,7 @@
   void set_region_short_lived_locked(HeapRegion* hr);
   // add appropriate methods for any other surv rate groups
 
-  YoungList* young_list() { return _young_list; }
+  YoungList* young_list() const { return _young_list; }
 
   // debugging
   bool check_young_list_well_formed() {
@@ -1662,26 +1652,30 @@
 
   // Optimized nmethod scanning support routines
 
-  // Register the given nmethod with the G1 heap
+  // Register the given nmethod with the G1 heap.
   virtual void register_nmethod(nmethod* nm);
 
-  // Unregister the given nmethod from the G1 heap
+  // Unregister the given nmethod from the G1 heap.
   virtual void unregister_nmethod(nmethod* nm);
 
   // Migrate the nmethods in the code root lists of the regions
   // in the collection set to regions in to-space. In the event
   // of an evacuation failure, nmethods that reference objects
-  // that were not successfullly evacuated are not migrated.
+  // that were not successfully evacuated are not migrated.
   void migrate_strong_code_roots();
 
   // During an initial mark pause, mark all the code roots that
   // point into regions *not* in the collection set.
   void mark_strong_code_roots(uint worker_id);
 
-  // Rebuild the stong code root lists for each region
-  // after a full GC
+  // Rebuild the strong code root lists for each region
+  // after a full GC.
   void rebuild_strong_code_roots();
 
+  // Delete entries for dead interned string and clean up unreferenced symbols
+  // in symbol table, possibly in parallel.
+  void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
+
   // Verification
 
   // The following is just to alert the verification code
@@ -1787,95 +1781,6 @@
     ParGCAllocBuffer::retire(end_of_gc, retain);
     _retired = true;
   }
-
-  bool is_retired() {
-    return _retired;
-  }
-};
-
-class G1ParGCAllocBufferContainer {
-protected:
-  static int const _priority_max = 2;
-  G1ParGCAllocBuffer* _priority_buffer[_priority_max];
-
-public:
-  G1ParGCAllocBufferContainer(size_t gclab_word_size) {
-    for (int pr = 0; pr < _priority_max; ++pr) {
-      _priority_buffer[pr] = new G1ParGCAllocBuffer(gclab_word_size);
-    }
-  }
-
-  ~G1ParGCAllocBufferContainer() {
-    for (int pr = 0; pr < _priority_max; ++pr) {
-      assert(_priority_buffer[pr]->is_retired(), "alloc buffers should all retire at this point.");
-      delete _priority_buffer[pr];
-    }
-  }
-
-  HeapWord* allocate(size_t word_sz) {
-    HeapWord* obj;
-    for (int pr = 0; pr < _priority_max; ++pr) {
-      obj = _priority_buffer[pr]->allocate(word_sz);
-      if (obj != NULL) return obj;
-    }
-    return obj;
-  }
-
-  bool contains(void* addr) {
-    for (int pr = 0; pr < _priority_max; ++pr) {
-      if (_priority_buffer[pr]->contains(addr)) return true;
-    }
-    return false;
-  }
-
-  void undo_allocation(HeapWord* obj, size_t word_sz) {
-    bool finish_undo;
-    for (int pr = 0; pr < _priority_max; ++pr) {
-      if (_priority_buffer[pr]->contains(obj)) {
-        _priority_buffer[pr]->undo_allocation(obj, word_sz);
-        finish_undo = true;
-      }
-    }
-    if (!finish_undo) ShouldNotReachHere();
-  }
-
-  size_t words_remaining() {
-    size_t result = 0;
-    for (int pr = 0; pr < _priority_max; ++pr) {
-      result += _priority_buffer[pr]->words_remaining();
-    }
-    return result;
-  }
-
-  size_t words_remaining_in_retired_buffer() {
-    G1ParGCAllocBuffer* retired = _priority_buffer[0];
-    return retired->words_remaining();
-  }
-
-  void flush_stats_and_retire(PLABStats* stats, bool end_of_gc, bool retain) {
-    for (int pr = 0; pr < _priority_max; ++pr) {
-      _priority_buffer[pr]->flush_stats_and_retire(stats, end_of_gc, retain);
-    }
-  }
-
-  void update(bool end_of_gc, bool retain, HeapWord* buf, size_t word_sz) {
-    G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
-    retired_and_set->retire(end_of_gc, retain);
-    retired_and_set->set_buf(buf);
-    retired_and_set->set_word_size(word_sz);
-    adjust_priority_order();
-  }
-
-private:
-  void adjust_priority_order() {
-    G1ParGCAllocBuffer* retired_and_set = _priority_buffer[0];
-
-    int last = _priority_max - 1;
-    for (int pr = 0; pr < last; ++pr) {
-      _priority_buffer[pr] = _priority_buffer[pr + 1];
-    }
-    _priority_buffer[last] = retired_and_set;
-  }
 };
 
 class G1ParScanThreadState : public StackObj {
@@ -1886,11 +1791,13 @@
   G1SATBCardTableModRefBS* _ct_bs;
   G1RemSet* _g1_rem;
 
-  G1ParGCAllocBufferContainer  _surviving_alloc_buffer;
-  G1ParGCAllocBufferContainer  _tenured_alloc_buffer;
-  G1ParGCAllocBufferContainer* _alloc_buffers[GCAllocPurposeCount];
+  G1ParGCAllocBuffer  _surviving_alloc_buffer;
+  G1ParGCAllocBuffer  _tenured_alloc_buffer;
+  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
   ageTable            _age_table;
 
+  G1ParScanClosure    _scanner;
+
   size_t           _alloc_buffer_waste;
   size_t           _undo_waste;
 
@@ -1943,7 +1850,7 @@
   }
 
 public:
-  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
+  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
 
   ~G1ParScanThreadState() {
     FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
@@ -1952,7 +1859,7 @@
   RefToScanQueue*   refs()            { return _refs;             }
   ageTable*         age_table()       { return &_age_table;       }
 
-  G1ParGCAllocBufferContainer* alloc_buffer(GCAllocPurpose purpose) {
+  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
     return _alloc_buffers[purpose];
   }
 
@@ -1982,13 +1889,15 @@
     HeapWord* obj = NULL;
     size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
     if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
-      G1ParGCAllocBufferContainer* alloc_buf = alloc_buffer(purpose);
+      G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
+      add_to_alloc_buffer_waste(alloc_buf->words_remaining());
+      alloc_buf->retire(false /* end_of_gc */, false /* retain */);
 
       HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
       if (buf == NULL) return NULL; // Let caller handle allocation failure.
-
-      add_to_alloc_buffer_waste(alloc_buf->words_remaining_in_retired_buffer());
-      alloc_buf->update(false /* end_of_gc */, false /* retain */, buf, gclab_word_size);
+      // Otherwise.
+      alloc_buf->set_word_size(gclab_word_size);
+      alloc_buf->set_buf(buf);
 
       obj = alloc_buf->allocate(word_sz);
       assert(obj != NULL, "buffer was definitely big enough...");
@@ -2078,6 +1987,8 @@
     }
   }
 
+  oop copy_to_survivor_space(oop const obj);
+
   template <class T> void deal_with_reference(T* ref_to_scan) {
     if (has_partial_array_mask(ref_to_scan)) {
       _partial_scan_cl->do_oop_nv(ref_to_scan);
@@ -2100,6 +2011,7 @@
     }
   }
 
+public:
   void trim_queue();
 };
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -318,7 +318,7 @@
 
 void G1CollectorPolicy::initialize_alignments() {
   _space_alignment = HeapRegion::GrainBytes;
-  size_t card_table_alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
+  size_t card_table_alignment = GenRemSet::max_alignment_constraint();
   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 }
@@ -1075,7 +1075,7 @@
   }
 
   _short_lived_surv_rate_group->start_adding_regions();
-  // do that for any other surv rate groupsx
+  // Do that for any other surv rate groups
 
   if (update_stats) {
     double cost_per_card_ms = 0.0;
@@ -1741,7 +1741,7 @@
   _inc_cset_predicted_elapsed_time_ms += region_elapsed_time_ms;
   _inc_cset_bytes_used_before += used_bytes;
 
-  // Cache the values we have added to the aggregated informtion
+  // Cache the values we have added to the aggregated information
   // in the heap region in case we have to remove this region from
   // the incremental collection set, or it is updated by the
   // rset sampling code
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -116,7 +116,7 @@
 // If only -XX:NewRatio is set we should use the specified ratio of the heap
 // as both min and max. This will be interpreted as "fixed" just like the
 // NewSize==MaxNewSize case above. But we will update the min and max
-// everytime the heap size changes.
+// every time the heap size changes.
 //
 // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is
 // combined with either NewSize or MaxNewSize. (A warning message is printed.)
@@ -523,9 +523,9 @@
   // synchronize updates to this field.
   size_t _inc_cset_recorded_rs_lengths;
 
-  // A concurrent refinement thread periodcially samples the young
+  // A concurrent refinement thread periodically samples the young
   // region RSets and needs to update _inc_cset_recorded_rs_lengths as
-  // the RSets grow. Instead of having to syncronize updates to that
+  // the RSets grow. Instead of having to synchronize updates to that
   // field we accumulate them in this field and add it to
   // _inc_cset_recorded_rs_lengths_diffs at the start of a GC.
   ssize_t _inc_cset_recorded_rs_lengths_diffs;
@@ -604,7 +604,7 @@
   // Calculate and return the maximum young list target length that
   // can fit into the pause time goal. The parameters are: rs_lengths
   // represent the prediction of how large the young RSet lengths will
-  // be, base_min_length is the alreay existing number of regions in
+  // be, base_min_length is the already existing number of regions in
   // the young list, min_length and max_length are the desired min and
   // max young list length according to the user's inputs.
   uint calculate_young_list_target_length(size_t rs_lengths,
@@ -820,6 +820,8 @@
     // do that for any other surv rate groups
   }
 
+  size_t young_list_target_length() const { return _young_list_target_length; }
+
   bool is_young_list_full() {
     uint young_list_length = _g1->young_list()->length();
     uint young_list_target_length = _young_list_target_length;
--- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -103,7 +103,7 @@
   // The data structure implemented is a circular queue.
   // Head "points" to the most recent addition, tail to the oldest one.
   // The array is of fixed size and I don't think we'll need more than
-  // two or three entries with the current behaviour of G1 pauses.
+  // two or three entries with the current behavior of G1 pauses.
   // If the array is full, an easy fix is to look for the pauses with
   // the shortest gap between them and consolidate them.
   // For now, we have taken the expedient alternative of forgetting
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -131,10 +131,8 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   sh->process_strong_roots(true,  // activate StrongRootsScope
-                           false, // not scavenging.
                            SharedHeap::SO_SystemClasses,
                            &GenMarkSweep::follow_root_closure,
-                           &GenMarkSweep::follow_code_root_closure,
                            &GenMarkSweep::follow_klass_closure);
 
   // Process reference objects found during marking
@@ -163,11 +161,8 @@
   // Prune dead klasses from subklass/sibling/implementor lists.
   Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
 
-  // Delete entries for dead interned strings.
-  StringTable::unlink(&GenMarkSweep::is_alive);
-
-  // Clean up unreferenced symbols in symbol table.
-  SymbolTable::unlink();
+  // Delete entries for dead interned string and clean up unreferenced symbols in symbol table.
+  G1CollectedHeap::heap()->unlink_string_and_symbol_table(&GenMarkSweep::is_alive);
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
@@ -180,7 +175,7 @@
     // any hash values from the mark word. These hash values are
     // used when verifying the dictionaries and so removing them
     // from the mark word can make verification of the dictionaries
-    // fail. At the end of the GC, the orginal mark word values
+    // fail. At the end of the GC, the original mark word values
     // (including hash values) are restored to the appropriate
     // objects.
     if (!VerifySilently) {
@@ -311,10 +306,8 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   sh->process_strong_roots(true,  // activate StrongRootsScope
-                           false, // not scavenging.
-                           SharedHeap::SO_AllClasses,
+                           SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
                            &GenMarkSweep::adjust_pointer_closure,
-                           NULL,  // do not touch code cache here
                            &GenMarkSweep::adjust_klass_closure);
 
   assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
@@ -322,7 +315,7 @@
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
+  sh->process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
 
   GenMarkSweep::adjust_marks();
 
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -112,7 +112,7 @@
   // take_sample() only returns "used".  When sampling was used, there
   // were some anomolous values emitted which may have been the consequence
   // of not updating all values simultaneously (i.e., see the calculation done
-  // in eden_space_used(), is it possbile that the values used to
+  // in eden_space_used(), is it possible that the values used to
   // calculate either eden_used or survivor_used are being updated by
   // the collector when the sample is being done?).
   const bool sampled = false;
@@ -135,7 +135,7 @@
 
   //   Young collection set
   //  name "generation.0".  This is logically the young generation.
-  //  The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
+  //  The "0, 3" are parameters for the n-th generation (=0) with 3 spaces.
   // See  _old_collection_counters for additional counters
   _young_collection_counters = new G1YoungGenerationCounters(this, "young");
 
@@ -254,7 +254,7 @@
     eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
     eden_counters()->update_used(eden_space_used());
     // only the to survivor space (s1) is active, so we don't need to
-    // update the counteres for the from survivor space (s0)
+    // update the counters for the from survivor space (s0)
     to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
     to_counters()->update_used(survivor_space_used());
     old_space_counters()->update_capacity(pad_capacity(old_space_committed()));
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -108,7 +108,7 @@
 // is that all the above sizes need to be recalculated when the old
 // gen changes capacity (after a GC or after a humongous allocation)
 // but only the eden occupancy changes when a new eden region is
-// allocated. So, in the latter case we have minimal recalcuation to
+// allocated. So, in the latter case we have minimal recalculation to
 // do which is important as we want to keep the eden region allocation
 // path as low-overhead as possible.
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1OopClosures.inline.hpp"
+
+G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state) :
+  G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
+  _cm(_g1->concurrent_mark()) {}
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
 
 // A class that scans oops in a given heap region (much as OopsInGenClosure
 // scans oops in a generation.)
-class OopsInHeapRegionClosure: public OopsInGenClosure {
+class OopsInHeapRegionClosure: public ExtendedOopClosure {
 protected:
   HeapRegion* _from;
 public:
@@ -48,12 +48,8 @@
 class G1ParClosureSuper : public OopsInHeapRegionClosure {
 protected:
   G1CollectedHeap* _g1;
-  G1RemSet* _g1_rem;
-  ConcurrentMark* _cm;
   G1ParScanThreadState* _par_scan_state;
   uint _worker_id;
-  bool _during_initial_mark;
-  bool _mark_in_progress;
 public:
   G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
   bool apply_to_weak_ref_discovered_field() { return true; }
@@ -86,13 +82,26 @@
 
 #define G1_PARTIAL_ARRAY_MASK 0x2
 
-template <class T> inline bool has_partial_array_mask(T* ref) {
+inline bool has_partial_array_mask(oop* ref) {
   return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
 }
 
-template <class T> inline T* set_partial_array_mask(T obj) {
+// We never encode partial array oops as narrowOop*, so return false immediately.
+// This allows the compiler to create optimized code when popping references from
+// the work queue.
+inline bool has_partial_array_mask(narrowOop* ref) {
+  assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
+  return false;
+}
+
+// Only implement set_partial_array_mask() for regular oops, not for narrowOops.
+// We always encode partial arrays as regular oop, to allow the
+// specialization for has_partial_array_mask() for narrowOops above.
+// This means that unintentional use of this method with narrowOops are caught
+// by the compiler.
+inline oop* set_partial_array_mask(oop obj) {
   assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
-  return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
+  return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
 }
 
 template <class T> inline oop clear_partial_array_mask(T* ref) {
@@ -120,23 +129,10 @@
 
 // Add back base class for metadata
 class G1ParCopyHelper : public G1ParClosureSuper {
+protected:
   Klass* _scanned_klass;
-
- public:
-  G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state) :
-      _scanned_klass(NULL),
-      G1ParClosureSuper(g1, par_scan_state) {}
+  ConcurrentMark* _cm;
 
-  void set_scanned_klass(Klass* k) { _scanned_klass = k; }
-  template <class T> void do_klass_barrier(T* p, oop new_obj);
-};
-
-template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
-class G1ParCopyClosure : public G1ParCopyHelper {
-  G1ParScanClosure _scanner;
-  template <class T> void do_oop_work(T* p);
-
-protected:
   // Mark the object if it's not already marked. This is used to mark
   // objects pointed to by roots that are guaranteed not to move
   // during the GC (i.e., non-CSet objects). It is MT-safe.
@@ -146,42 +142,40 @@
   // objects pointed to by roots that have been forwarded during a
   // GC. It is MT-safe.
   void mark_forwarded_object(oop from_obj, oop to_obj);
+ public:
+  G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state);
 
-  oop copy_to_survivor_space(oop obj);
+  void set_scanned_klass(Klass* k) { _scanned_klass = k; }
+  template <class T> void do_klass_barrier(T* p, oop new_obj);
+};
+
+template <G1Barrier barrier, bool do_mark_object>
+class G1ParCopyClosure : public G1ParCopyHelper {
+private:
+  template <class T> void do_oop_work(T* p);
 
 public:
   G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
                    ReferenceProcessor* rp) :
-      _scanner(g1, par_scan_state, rp),
       G1ParCopyHelper(g1, par_scan_state) {
     assert(_ref_processor == NULL, "sanity");
   }
 
-  G1ParScanClosure* scanner() { return &_scanner; }
-
-  template <class T> void do_oop_nv(T* p) {
-    do_oop_work(p);
-  }
+  template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
 };
 
-typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
-typedef G1ParCopyClosure<false, G1BarrierKlass, false> G1ParScanMetadataClosure;
+typedef G1ParCopyClosure<G1BarrierNone, false> G1ParScanExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierKlass, false> G1ParScanMetadataClosure;
 
 
-typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
-typedef G1ParCopyClosure<true,  G1BarrierNone, true> G1ParScanAndMarkClosure;
-typedef G1ParCopyClosure<false, G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
-
-// The following closure types are no longer used but are retained
-// for historical reasons:
-// typedef G1ParCopyClosure<false, G1BarrierRS,   false> G1ParScanHeapRSClosure;
-// typedef G1ParCopyClosure<false, G1BarrierRS,   true> G1ParScanAndMarkHeapRSClosure;
+typedef G1ParCopyClosure<G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
+typedef G1ParCopyClosure<G1BarrierKlass, true> G1ParScanAndMarkMetadataClosure;
 
 // The following closure type is defined in g1_specialized_oop_closures.hpp:
 //
-// typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
+// typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
 
 // We use a separate closure to handle references during evacuation
 // failure processing.
@@ -189,7 +183,7 @@
 // (since that closure no longer assumes that the references it
 // handles point into the collection set).
 
-typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
+typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
 
 class FilterIntoCSClosure: public ExtendedOopClosure {
   G1CollectedHeap* _g1;
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -29,6 +29,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1OopClosures.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 
 /*
@@ -82,7 +83,7 @@
 
       _par_scan_state->push_on_queue(p);
     } else {
-      _par_scan_state->update_rs(_from, p, _par_scan_state->queue_num());
+      _par_scan_state->update_rs(_from, p, _worker_id);
     }
   }
 }
@@ -177,7 +178,7 @@
     // The _record_refs_into_cset flag is true during the RSet
     // updating part of an evacuation pause. It is false at all
     // other times:
-    //  * rebuilding the rembered sets after a full GC
+    //  * rebuilding the remembered sets after a full GC
     //  * during concurrent refinement.
     //  * updating the remembered sets of regions in the collection
     //    set in the event of an evacuation failure (when deferred
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc_implementation/g1/bufferingOopClosure.hpp"
 #include "gc_implementation/g1/concurrentG1Refine.hpp"
 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
@@ -195,7 +194,7 @@
     HeapRegionRemSetIterator iter(hrrs);
     size_t card_index;
 
-    // We claim cards in block so as to recude the contention. The block size is determined by
+    // We claim cards in block so as to reduce the contention. The block size is determined by
     // the G1RSetScanBlockSize parameter.
     size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
     for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
@@ -587,7 +586,7 @@
 
   // While we are processing RSet buffers during the collection, we
   // actually don't want to scan any cards on the collection set,
-  // since we don't want to update remebered sets with entries that
+  // since we don't want to update remembered sets with entries that
   // point into the collection set, given that live objects from the
   // collection set are about to move and such entries will be stale
   // very soon. This change also deals with a reliability issue which
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -115,7 +115,8 @@
 
 void
 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
-                                                     oop new_val) {
+                                                     oop new_val,
+                                                     bool release) {
   volatile jbyte* byte = byte_for(field);
   if (*byte == g1_young_gen) {
     return;
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -151,7 +151,7 @@
       G1SATBCardTableModRefBS::is_a(bsn);
   }
 
-  void write_ref_field_work(void* field, oop new_val);
+  void write_ref_field_work(void* field, oop new_val, bool release = false);
 
   // Can be called from static contexts.
   static void write_ref_field_static(void* field, oop new_val);
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -71,6 +71,9 @@
   diagnostic(bool, G1TraceConcRefinement, false,                            \
           "Trace G1 concurrent refinement")                                 \
                                                                             \
+  experimental(bool, G1TraceStringSymbolTableScrubbing, false,              \
+          "Trace information string and symbol table scrubbing.")           \
+                                                                            \
   product(double, G1ConcMarkStepDurationMillis, 10.0,                       \
           "Target duration of individual concurrent marking steps "         \
           "in milliseconds.")                                               \
@@ -180,7 +183,7 @@
           "When true, record recent calls to rem set operations.")          \
                                                                             \
   develop(intx, G1MaxVerifyFailures, -1,                                    \
-          "The maximum number of verification failrues to print.  "         \
+          "The maximum number of verification failures to print.  "         \
           "-1 means print all.")                                            \
                                                                             \
   develop(bool, G1ScrubRemSets, true,                                       \
--- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -33,18 +33,17 @@
 // Forward declarations.
 enum G1Barrier {
   G1BarrierNone,
-  G1BarrierRS,
   G1BarrierEvac,
   G1BarrierKlass
 };
 
-template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
+template<G1Barrier barrier, bool do_mark_object>
 class G1ParCopyClosure;
 
 class G1ParScanClosure;
 class G1ParPushHeapRSClosure;
 
-typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
+typedef G1ParCopyClosure<G1BarrierEvac, false> G1ParScanHeapEvacClosure;
 
 class FilterIntoCSClosure;
 class FilterOutOfRegionClosure;
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1027,7 +1027,7 @@
       }
     }
 
-    // Loook up end - 1
+    // Look up end - 1
     HeapWord* addr_4 = the_end - 1;
     HeapWord* b_start_4 = _offsets.block_start_const(addr_4);
     if (b_start_4 != p) {
@@ -1111,7 +1111,7 @@
     // will be false, and it will pick up top() as the high water mark
     // of region. If it does so after _gc_time_stamp = ..., then it
     // will pick up the right saved_mark_word() as the high water mark
-    // of the region. Either way, the behaviour will be correct.
+    // of the region. Either way, the behavior will be correct.
     ContiguousSpace::set_saved_mark();
     OrderAccess::storestore();
     _gc_time_stamp = curr_gc_time_stamp;
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,7 +97,7 @@
   HeapWord* heap_end() const {return _regions.end_address_mapped(); }
 
  public:
-  // Empty contructor, we'll initialize it with the initialize() method.
+  // Empty constructor, we'll initialize it with the initialize() method.
   HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
 
   void initialize(HeapWord* bottom, HeapWord* end);
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,7 +71,7 @@
   assert(_lock->owned_by_self(), "Required.");
 
   // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
-  // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
+  // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
   // have the same rank and we may get the "possible deadlock" message
   _lock->unlock();
 
@@ -151,7 +151,7 @@
 
       // The current PtrQ may be the shared dirty card queue and
       // may be being manipulated by more than one worker thread
-      // during a pause. Since the enqueuing of the completed
+      // during a pause. Since the enqueueing of the completed
       // buffer unlocks the Shared_DirtyCardQ_lock more than one
       // worker thread can 'race' on reading the shared queue attributes
       // (_buf and _index) and multiple threads can call into this
@@ -170,7 +170,7 @@
 
       locking_enqueue_completed_buffer(buf);  // enqueue completed buffer
 
-      // While the current thread was enqueuing the buffer another thread
+      // While the current thread was enqueueing the buffer another thread
       // may have a allocated a new buffer and inserted it into this pointer
       // queue. If that happens then we just return so that the current
       // thread doesn't overwrite the buffer allocated by the other thread
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -91,7 +91,7 @@
       assert(new_index > 0, "we should not have already filled up the buffer");
       new_index -= oopSize;
       assert(new_index >= i,
-             "new_index should never be below i, as we alwaysr compact 'up'");
+             "new_index should never be below i, as we always compact 'up'");
       oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
       assert(new_p >= p, "the destination location should never be below "
              "the source as we always compact 'up'");
@@ -219,58 +219,52 @@
 }
 
 #ifdef ASSERT
-void SATBMarkQueueSet::dump_active_values(JavaThread* first,
-                                          bool expected_active) {
-  gclog_or_tty->print_cr("SATB queue active values for Java Threads");
-  gclog_or_tty->print_cr(" SATB queue set: active is %s",
-                         (is_active()) ? "TRUE" : "FALSE");
-  gclog_or_tty->print_cr(" expected_active is %s",
-                         (expected_active) ? "TRUE" : "FALSE");
-  for (JavaThread* t = first; t; t = t->next()) {
-    bool active = t->satb_mark_queue().is_active();
-    gclog_or_tty->print_cr("  thread %s, active is %s",
-                           t->name(), (active) ? "TRUE" : "FALSE");
+void SATBMarkQueueSet::dump_active_states(bool expected_active) {
+  gclog_or_tty->print_cr("Expected SATB active state: %s",
+                         expected_active ? "ACTIVE" : "INACTIVE");
+  gclog_or_tty->print_cr("Actual SATB active states:");
+  gclog_or_tty->print_cr("  Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE");
+  for (JavaThread* t = Threads::first(); t; t = t->next()) {
+    gclog_or_tty->print_cr("  Thread \"%s\" queue: %s", t->name(),
+                           t->satb_mark_queue().is_active() ? "ACTIVE" : "INACTIVE");
+  }
+  gclog_or_tty->print_cr("  Shared queue: %s",
+                         shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE");
+}
+
+void SATBMarkQueueSet::verify_active_states(bool expected_active) {
+  // Verify queue set state
+  if (is_active() != expected_active) {
+    dump_active_states(expected_active);
+    guarantee(false, "SATB queue set has an unexpected active state");
+  }
+
+  // Verify thread queue states
+  for (JavaThread* t = Threads::first(); t; t = t->next()) {
+    if (t->satb_mark_queue().is_active() != expected_active) {
+      dump_active_states(expected_active);
+      guarantee(false, "Thread SATB queue has an unexpected active state");
+    }
+  }
+
+  // Verify shared queue state
+  if (shared_satb_queue()->is_active() != expected_active) {
+    dump_active_states(expected_active);
+    guarantee(false, "Shared SATB queue has an unexpected active state");
   }
 }
 #endif // ASSERT
 
-void SATBMarkQueueSet::set_active_all_threads(bool b,
-                                              bool expected_active) {
+void SATBMarkQueueSet::set_active_all_threads(bool active, bool expected_active) {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
-  JavaThread* first = Threads::first();
-
 #ifdef ASSERT
-  if (_all_active != expected_active) {
-    dump_active_values(first, expected_active);
-
-    // I leave this here as a guarantee, instead of an assert, so
-    // that it will still be compiled in if we choose to uncomment
-    // the #ifdef ASSERT in a product build. The whole block is
-    // within an #ifdef ASSERT so the guarantee will not be compiled
-    // in a product build anyway.
-    guarantee(false,
-              "SATB queue set has an unexpected active value");
-  }
+  verify_active_states(expected_active);
 #endif // ASSERT
-  _all_active = b;
-
-  for (JavaThread* t = first; t; t = t->next()) {
-#ifdef ASSERT
-    bool active = t->satb_mark_queue().is_active();
-    if (active != expected_active) {
-      dump_active_values(first, expected_active);
-
-      // I leave this here as a guarantee, instead of an assert, so
-      // that it will still be compiled in if we choose to uncomment
-      // the #ifdef ASSERT in a product build. The whole block is
-      // within an #ifdef ASSERT so the guarantee will not be compiled
-      // in a product build anyway.
-      guarantee(false,
-                "thread has an unexpected active value in its SATB queue");
-    }
-#endif // ASSERT
-    t->satb_mark_queue().set_active(b);
+  _all_active = active;
+  for (JavaThread* t = Threads::first(); t; t = t->next()) {
+    t->satb_mark_queue().set_active(active);
   }
+  shared_satb_queue()->set_active(active);
 }
 
 void SATBMarkQueueSet::filter_thread_buffers() {
--- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -87,7 +87,8 @@
   bool apply_closure_to_completed_buffer_work(bool par, int worker);
 
 #ifdef ASSERT
-  void dump_active_values(JavaThread* first, bool expected_active);
+  void dump_active_states(bool expected_active);
+  void verify_active_states(bool expected_active);
 #endif // ASSERT
 
 public:
@@ -99,11 +100,11 @@
 
   static void handle_zero_index_for_thread(JavaThread* t);
 
-  // Apply "set_active(b)" to all Java threads' SATB queues. It should be
+  // Apply "set_active(active)" to all SATB queues in the set. It should be
   // called only with the world stopped. The method will assert that the
   // SATB queues of all threads it visits, as well as the SATB queue
   // set itself, has an active value same as expected_active.
-  void set_active_all_threads(bool b, bool expected_active);
+  void set_active_all_threads(bool active, bool expected_active);
 
   // Filter all the currently-active SATB buffers.
   void filter_thread_buffers();
--- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -144,7 +144,7 @@
 
   // Attempts to ensure that the given card_index in the given region is in
   // the sparse table.  If successful (because the card was already
-  // present, or because it was successfullly added) returns "true".
+  // present, or because it was successfully added) returns "true".
   // Otherwise, returns "false" to indicate that the addition would
   // overflow the entry for the region.  The caller must transfer these
   // entries to a larger-capacity representation.
@@ -201,8 +201,7 @@
   bool has_next(size_t& card_index);
 };
 
-// Concurrent accesss to a SparsePRT must be serialized by some external
-// mutex.
+// Concurrent access to a SparsePRT must be serialized by some external mutex.
 
 class SparsePRTIter;
 class SparsePRTCleanupTask;
@@ -248,7 +247,7 @@
 
   // Attempts to ensure that the given card_index in the given region is in
   // the sparse table.  If successful (because the card was already
-  // present, or because it was successfullly added) returns "true".
+  // present, or because it was successfully added) returns "true".
   // Otherwise, returns "false" to indicate that the addition would
   // overflow the entry for the region.  The caller must transfer these
   // entries to a larger-capacity representation.
--- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -154,7 +154,7 @@
   // There used to be this guarantee there.
   // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
   // Code below forces this requirement.  In addition the desired eden
-  // size and disired survivor sizes are desired goals and may
+  // size and desired survivor sizes are desired goals and may
   // exceed the total generation size.
 
   assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(),
--- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -213,7 +213,7 @@
       && sp->block_is_obj(first_block)      // first block is an object
       && !(oop(first_block)->is_objArray()  // first block is not an array (arrays are precisely dirtied)
            || oop(first_block)->is_typeArray())) {
-    // Find our least non-clean card, so that a left neighbour
+    // Find our least non-clean card, so that a left neighbor
     // does not scan an object straddling the mutual boundary
     // too far to the right, and attempt to scan a portion of
     // that object twice.
@@ -247,14 +247,14 @@
     } NOISY(else {
       tty->print_cr(" LNC: Found no dirty card in current chunk; leaving LNC entry NULL");
       // In the future, we could have this thread look for a non-NULL value to copy from its
-      // right neighbour (up to the end of the first object).
+      // right neighbor (up to the end of the first object).
       if (last_card_of_cur_chunk < last_card_of_first_obj) {
         tty->print_cr(" LNC: BEWARE!!! first obj straddles past right end of chunk:\n"
-                      "   might be efficient to get value from right neighbour?");
+                      "   might be efficient to get value from right neighbor?");
       }
     })
   } else {
-    // In this case we can help our neighbour by just asking them
+    // In this case we can help our neighbor by just asking them
     // to stop at our first card (even though it may not be dirty).
     NOISY(tty->print_cr(" LNC: first block is not a non-array object; setting LNC to first card of current chunk");)
     assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -612,17 +612,15 @@
   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
                                       gch->rem_set()->klass_rem_set());
 
-  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
 
   par_scan_state.start_strong_roots();
   gch->gen_process_strong_roots(_gen->level(),
                                 true,  // Process younger gens, if any,
                                        // as strong roots.
                                 false, // no scope; this is parallel code
-                                true,  // is scavenging
                                 SharedHeap::ScanningOption(so),
                                 &par_scan_state.to_space_root_closure(),
-                                true,   // walk *all* scavengable nmethods
                                 &par_scan_state.older_gen_closure(),
                                 &klass_scan_closure);
   par_scan_state.end_strong_roots();
@@ -1071,7 +1069,7 @@
     size_policy->avg_survived()->sample(from()->used());
   }
 
-  // We need to use a monotonically non-deccreasing time in ms
+  // We need to use a monotonically non-decreasing time in ms
   // or we will see time-warp warnings and os::javaTimeMillis()
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@@ -1403,7 +1401,7 @@
 #ifndef PRODUCT
 // It's OK to call this multi-threaded;  the worst thing
 // that can happen is that we'll get a bunch of closely
-// spaced simulated oveflows, but that's OK, in fact
+// spaced simulated overflows, but that's OK, in fact
 // probably good as it would exercise the overflow code
 // under contention.
 bool ParNewGeneration::should_simulate_overflow() {
--- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -118,8 +118,8 @@
 
 
 // Make checks on the current sizes of the generations and
-// the contraints on the sizes of the generations.  Push
-// up the boundary within the contraints.  A partial
+// the constraints on the sizes of the generations.  Push
+// up the boundary within the constraints.  A partial
 // push can occur.
 void AdjoiningGenerations::request_old_gen_expansion(size_t expand_in_bytes) {
   assert(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary, "runtime check");
--- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,7 +69,7 @@
   // the available space and attempt to move the boundary if more space
   // is needed.  The growth is not guaranteed to occur.
   void adjust_boundary_for_old_gen_needs(size_t desired_change_in_bytes);
-  // Similary for a growth of the young generation.
+  // Similarly for a growth of the young generation.
   void adjust_boundary_for_young_gen_needs(size_t eden_size, size_t survivor_size);
 
   // Return the total byte size of the reserved space
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/asPSYoungGen.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -65,7 +65,7 @@
   }
 };
 
-// Checks all objects for the existance of some type of mark,
+// Checks all objects for the existence of some type of mark,
 // precise or imprecise, dirty or newgen.
 class CheckForUnmarkedObjects : public ObjectClosure {
  private:
@@ -84,7 +84,7 @@
   }
 
   // Card marks are not precise. The current system can leave us with
-  // a mismash of precise marks and beginning of object marks. This means
+  // a mismatch of precise marks and beginning of object marks. This means
   // we test for missing precise marks first. If any are found, we don't
   // fail unless the object head is also unmarked.
   virtual void do_object(oop obj) {
--- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -202,12 +202,12 @@
     list->print("list:");
   }
   if (list->is_empty()) {
-    // Enqueuing the empty list: nothing to do.
+    // Enqueueing the empty list: nothing to do.
     return;
   }
   uint list_length = list->length();
   if (is_empty()) {
-    // Enqueuing to empty list: just acquire elements.
+    // Enqueueing to empty list: just acquire elements.
     set_insert_end(list->insert_end());
     set_remove_end(list->remove_end());
     set_length(list_length);
--- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -303,7 +303,7 @@
 // load balancing (i.e., over partitioning).  The last task to be
 // executed by a GC thread in a job is a work stealing task.  A
 // GC  thread that gets a work stealing task continues to execute
-// that task until the job is done.  In the static number of GC theads
+// that task until the job is done.  In the static number of GC threads
 // case, tasks are added to a queue (FIFO).  The work stealing tasks are
 // the last to be added.  Once the tasks are added, the GC threads grab
 // a task and go.  A single thread can do all the non-work stealing tasks
--- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -139,11 +139,6 @@
       return true;
     }
   }
-  // No object starts in this slice; verify this using
-  // more traditional methods:  Note that no object can
-  // start before the start_addr.
-  assert(end_addr == start_addr ||
-         object_start(end_addr - 1) <= start_addr,
-         "Oops an object does start in this slice?");
+
   return false;
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -38,6 +38,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -484,12 +484,12 @@
   young_gen()->eden_space()->ensure_parsability();
 }
 
-size_t ParallelScavengeHeap::unsafe_max_alloc() {
-  return young_gen()->eden_space()->free_in_bytes();
+size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
+  return young_gen()->eden_space()->tlab_capacity(thr);
 }
 
-size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
-  return young_gen()->eden_space()->tlab_capacity(thr);
+size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
+  return young_gen()->eden_space()->tlab_used(thr);
 }
 
 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
@@ -677,7 +677,7 @@
 
 // Before delegating the resize to the young generation,
 // the reserved space for the young and old generations
-// may be changed to accomodate the desired resize.
+// may be changed to accommodate the desired resize.
 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
     size_t survivor_size) {
   if (UseAdaptiveGCBoundary) {
@@ -694,7 +694,7 @@
 
 // Before delegating the resize to the old generation,
 // the reserved space for the young and old generations
-// may be changed to accomodate the desired resize.
+// may be changed to accommodate the desired resize.
 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
   if (UseAdaptiveGCBoundary) {
     if (size_policy()->bytes_absorbed_from_eden() != 0) {
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -184,11 +184,10 @@
   void accumulate_statistics_all_tlabs();
   void resize_all_tlabs();
 
-  size_t unsafe_max_alloc();
-
   bool supports_tlab_allocation() const { return true; }
 
   size_t tlab_capacity(Thread* thr) const;
+  size_t tlab_used(Thread* thr) const;
   size_t unsafe_max_tlab_alloc(Thread* thr) const;
 
   // Can a compiler initialize a new object without store barriers?
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
 // the do_it() method of a ThreadRootsMarkingTask is executed, it
 // starts marking from the thread's roots.
 //
-// The enqueuing of the MarkFromRootsTask and ThreadRootsMarkingTask
+// The enqueueing of the MarkFromRootsTask and ThreadRootsMarkingTask
 // do little more than create the task and put it on a queue.  The
 // queue is a GCTaskQueue and threads steal tasks from this GCTaskQueue.
 //
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
@@ -76,6 +77,38 @@
   _old_gen_policy_is_ready = false;
 }
 
+size_t PSAdaptiveSizePolicy::calculate_free_based_on_live(size_t live, uintx ratio_as_percentage) {
+  // We want to calculate how much free memory there can be based on the
+  // amount of live data currently in the old gen. Using the formula:
+  // ratio * (free + live) = free
+  // Some equation solving later we get:
+  // free = (live * ratio) / (1 - ratio)
+
+  const double ratio = ratio_as_percentage / 100.0;
+  const double ratio_inverse = 1.0 - ratio;
+  const double tmp = live * ratio;
+  size_t free = (size_t)(tmp / ratio_inverse);
+
+  return free;
+}
+
+size_t PSAdaptiveSizePolicy::calculated_old_free_size_in_bytes() const {
+  size_t free_size = (size_t)(_promo_size + avg_promoted()->padded_average());
+  size_t live = ParallelScavengeHeap::heap()->old_gen()->used_in_bytes();
+
+  if (MinHeapFreeRatio != 0) {
+    size_t min_free = calculate_free_based_on_live(live, MinHeapFreeRatio);
+    free_size = MAX2(free_size, min_free);
+  }
+
+  if (MaxHeapFreeRatio != 100) {
+    size_t max_free = calculate_free_based_on_live(live, MaxHeapFreeRatio);
+    free_size = MIN2(max_free, free_size);
+  }
+
+  return free_size;
+}
+
 void PSAdaptiveSizePolicy::major_collection_begin() {
   // Update the interval time
   _major_timer.stop();
@@ -482,7 +515,7 @@
   //   adjust down the total heap size.  Adjust down the larger of the
   //   generations.
 
-  // Add some checks for a threshhold for a change.  For example,
+  // Add some checks for a threshold for a change.  For example,
   // a change less than the necessary alignment is probably not worth
   // attempting.
 
@@ -1161,7 +1194,7 @@
     // We use the tenuring threshold to equalize the cost of major
     // and minor collections.
     // ThresholdTolerance is used to indicate how sensitive the
-    // tenuring threshold is to differences in cost betweent the
+    // tenuring threshold is to differences in cost between the
     // collection types.
 
     // Get the times of interest. This involves a little work, so
@@ -1292,3 +1325,18 @@
                           st,
                           PSScavenge::tenuring_threshold());
 }
+
+#ifndef PRODUCT
+
+void TestOldFreeSpaceCalculation_test() {
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 20) == 25, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 50) == 100, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 60) == 150, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(100, 75) == 300, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 20) == 100, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 50) == 400, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 60) == 600, "Calculation of free memory failed");
+  assert(PSAdaptiveSizePolicy::calculate_free_based_on_live(400, 75) == 1200, "Calculation of free memory failed");
+}
+
+#endif /* !PRODUCT */
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -37,7 +37,7 @@
 //
 // It also computes an optimal tenuring threshold between the young
 // and old generations, so as to equalize the cost of collections
-// of those generations, as well as optimial survivor space sizes
+// of those generations, as well as optimal survivor space sizes
 // for the young generation.
 //
 // While this class is specifically intended for a generational system
@@ -113,7 +113,7 @@
   // Changing the generation sizing depends on the data that is
   // gathered about the effects of changes on the pause times and
   // throughput.  These variable count the number of data points
-  // gathered.  The policy may use these counters as a threshhold
+  // gathered.  The policy may use these counters as a threshold
   // for reliable data.
   julong _young_gen_change_for_major_pause_count;
 
@@ -240,7 +240,6 @@
   void major_collection_begin();
   void major_collection_end(size_t amount_live, GCCause::Cause gc_cause);
 
-  //
   void tenured_allocation(size_t size) {
     _avg_pretenured->sample(size);
   }
@@ -248,9 +247,9 @@
   // Accessors
   // NEEDS_CLEANUP   should use sizes.hpp
 
-  size_t calculated_old_free_size_in_bytes() const {
-    return (size_t)(_promo_size + avg_promoted()->padded_average());
-  }
+  static size_t calculate_free_based_on_live(size_t live, uintx ratio_as_percentage);
+
+  size_t calculated_old_free_size_in_bytes() const;
 
   size_t average_old_live_in_bytes() const {
     return (size_t) avg_old_live()->average();
--- a/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -195,7 +195,7 @@
 
   // Update all the counters that can be updated from the size policy.
   // This should be called after all policy changes have been made
-  // and reflected internall in the size policy.
+  // and reflected internally in the size policy.
   void update_counters_from_policy();
 
   // Update counters that can be updated from fields internal to the
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -661,7 +661,7 @@
 }
 
 jlong PSMarkSweep::millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   jlong ret_val = now - _time_of_last_gc;
@@ -674,7 +674,7 @@
 }
 
 void PSMarkSweep::reset_millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -280,7 +280,7 @@
         "Should be true before post_resize()");
       MemRegion mangle_region(object_space()->end(), virtual_space_high);
       // Note that the object space has not yet been updated to
-      // coincede with the new underlying virtual space.
+      // coincide with the new underlying virtual space.
       SpaceMangler::mangle_region(mangle_region);
     }
     post_resize();
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -187,7 +187,7 @@
 
   void space_invariants() PRODUCT_RETURN;
 
-  // Performace Counter support
+  // Performance Counter support
   void update_counters();
 
   // Printing support
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -2176,7 +2176,7 @@
 
     heap->resize_all_tlabs();
 
-    // Resize the metaspace capactiy after a collection
+    // Resize the metaspace capacity after a collection
     MetaspaceGC::compute_new_size();
 
     if (TraceGen1Time) accumulated_time()->stop();
@@ -3285,7 +3285,7 @@
 }
 
 jlong PSParallelCompact::millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   jlong ret_val = now - _time_of_last_gc;
@@ -3298,7 +3298,7 @@
 }
 
 void PSParallelCompact::reset_millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -877,7 +877,7 @@
 // The summary phase calculates the total live data to the left of each region
 // XXX.  Based on that total and the bottom of the space, it can calculate the
 // starting location of the live data in XXX.  The summary phase calculates for
-// each region XXX quantites such as
+// each region XXX quantities such as
 //
 //      - the amount of live data at the beginning of a region from an object
 //        entering the region.
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -78,7 +78,7 @@
   // Returns a subregion containing all objects in this space.
   MemRegion used_region()            { return MemRegion(bottom(), top()); }
 
-  // Boolean querries.
+  // Boolean queries.
   bool is_empty() const              { return used() == 0; }
   bool not_empty() const             { return used() > 0; }
   bool contains(const void* p) const { return _bottom <= p && p < _end; }
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -28,6 +28,7 @@
 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
+#include "oops/oop.psgc.inline.hpp"
 
 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
   assert(_manager_array != NULL, "access of NULL manager_array");
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -466,10 +466,12 @@
       }
     }
 
-    GCTraceTime tm("StringTable", false, false, &_gc_timer);
-    // Unlink any dead interned Strings and process the remaining live ones.
-    PSScavengeRootsClosure root_closure(promotion_manager);
-    StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
+    {
+      GCTraceTime tm("StringTable", false, false, &_gc_timer);
+      // Unlink any dead interned Strings and process the remaining live ones.
+      PSScavengeRootsClosure root_closure(promotion_manager);
+      StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
+    }
 
     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
     promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
@@ -527,8 +529,19 @@
           counters->update_survivor_overflowed(_survivor_overflow);
         }
 
+        size_t max_young_size = young_gen->max_size();
+
+        // Deciding a free ratio in the young generation is tricky, so if
+        // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
+        // that the old generation size may have been limited because of them) we
+        // should then limit our young generation size using NewRatio to have it
+        // follow the old generation size.
+        if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
+          max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
+        }
+
         size_t survivor_limit =
-          size_policy->max_survivor_size(young_gen->max_size());
+          size_policy->max_survivor_size(max_young_size);
         _tenuring_threshold =
           size_policy->compute_survivor_space_size_and_threshold(
                                                            _survivor_overflow,
@@ -551,12 +564,11 @@
         // Do call at minor collections?
         // Don't check if the size_policy is ready at this
         // level.  Let the size_policy check that internally.
-        if (UseAdaptiveSizePolicy &&
-            UseAdaptiveGenerationSizePolicyAtMinorCollection &&
+        if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
             ((gc_cause != GCCause::_java_lang_system_gc) ||
               UseAdaptiveSizePolicyWithSystemGC)) {
 
-          // Calculate optimial free space amounts
+          // Calculate optimal free space amounts
           assert(young_gen->max_size() >
             young_gen->from_space()->capacity_in_bytes() +
             young_gen->to_space()->capacity_in_bytes(),
@@ -566,7 +578,7 @@
           size_t eden_live = young_gen->eden_space()->used_in_bytes();
           size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
           size_t max_old_gen_size = old_gen->max_gen_size();
-          size_t max_eden_size = young_gen->max_size() -
+          size_t max_eden_size = max_young_size -
             young_gen->from_space()->capacity_in_bytes() -
             young_gen->to_space()->capacity_in_bytes();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@
     case threads:
     {
       ResourceMark rm;
-      CLDToOopClosure* cld_closure = NULL; // Not needed. All CLDs are already visited.
+      CLDClosure* cld_closure = NULL; // Not needed. All CLDs are already visited.
       Threads::oops_do(&roots_closure, cld_closure, NULL);
     }
     break;
@@ -122,7 +122,7 @@
 
   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   PSScavengeRootsClosure roots_closure(pm);
-  CLDToOopClosure* roots_from_clds = NULL;  // Not needed. All CLDs are already visited.
+  CLDClosure* roots_from_clds = NULL;  // Not needed. All CLDs are already visited.
   CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true);
 
   if (_java_thread != NULL)
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -35,6 +35,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
--- a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -35,7 +35,7 @@
 class PSVirtualSpace : public CHeapObj<mtGC> {
   friend class VMStructs;
  protected:
-  // The space is committed/uncommited in chunks of size _alignment.  The
+  // The space is committed/uncommitted in chunks of size _alignment.  The
   // ReservedSpace passed to initialize() must be aligned to this value.
   const size_t _alignment;
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -136,7 +136,7 @@
     // generation - the less space committed, the smaller the survivor
     // space, possibly as small as an alignment. However, we are interested
     // in the case where the young generation is 100% committed, as this
-    // is the point where eden reachs its maximum size. At this point,
+    // is the point where eden reaches its maximum size. At this point,
     // the size of a survivor space is max_survivor_size.
     max_eden_size = size - 2 * max_survivor_size;
   }
@@ -288,7 +288,7 @@
   // There used to be this guarantee there.
   // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
   // Code below forces this requirement.  In addition the desired eden
-  // size and disired survivor sizes are desired goals and may
+  // size and desired survivor sizes are desired goals and may
   // exceed the total generation size.
 
   assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking");
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -127,7 +127,7 @@
   void adjust_pointers();
   void compact();
 
-  // Called during/after gc
+  // Called during/after GC
   void swap_spaces();
 
   // Resize generation using suggested free space size and survivor size
@@ -146,14 +146,14 @@
   size_t free_in_words() const;
 
   // The max this generation can grow to
-  size_t max_size() const            { return _reserved.byte_size(); }
+  size_t max_size() const { return _reserved.byte_size(); }
 
   // The max this generation can grow to if the boundary between
   // the generations are allowed to move.
   size_t gen_size_limit() const { return _max_gen_size; }
 
   bool is_maximal_no_gc() const {
-    return true;  // never expands except at a GC
+    return true;  // Never expands except at a GC
   }
 
   // Allocation
--- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -121,7 +121,7 @@
 
   // Choose a number of GC threads based on the current size
   // of the heap.  This may be complicated because the size of
-  // the heap depends on factors such as the thoughput goal.
+  // the heap depends on factors such as the throughput goal.
   // Still a large heap should be collected by more GC threads.
   active_workers_by_heap_size =
       MAX2((size_t) 2U, Universe::heap()->capacity() / HeapSizePerGCThread);
@@ -168,7 +168,7 @@
 
   if (TraceDynamicGCThreads) {
      gclog_or_tty->print_cr("GCTaskManager::calc_default_active_workers() : "
-       "active_workers(): %d  new_acitve_workers: %d  "
+       "active_workers(): %d  new_active_workers: %d  "
        "prev_active_workers: %d\n"
        " active_workers_by_JT: %d  active_workers_by_heap_size: %d",
        active_workers, new_active_workers, prev_active_workers,
@@ -445,7 +445,7 @@
   // into account (i.e., don't trigger if the amount of free
   // space has suddenly jumped up).  If the current is much
   // higher than the average, use the average since it represents
-  // the longer term behavor.
+  // the longer term behavior.
   const size_t live_in_eden =
     MIN2(eden_live, (size_t) avg_eden_live()->average());
   const size_t free_in_eden = max_eden_size > live_in_eden ?
--- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -74,7 +74,7 @@
   };
 
   // Goal for the fraction of the total time during which application
-  // threads run.
+  // threads run
   const double _throughput_goal;
 
   // Last calculated sizes, in bytes, and aligned
@@ -83,21 +83,21 @@
 
   size_t _survivor_size;    // calculated survivor size in bytes
 
-  // This is a hint for the heap:  we've detected that gc times
+  // This is a hint for the heap:  we've detected that GC times
   // are taking longer than GCTimeLimit allows.
   bool _gc_overhead_limit_exceeded;
   // Use for diagnostics only.  If UseGCOverheadLimit is false,
   // this variable is still set.
   bool _print_gc_overhead_limit_would_be_exceeded;
   // Count of consecutive GC that have exceeded the
-  // GC time limit criterion.
+  // GC time limit criterion
   uint _gc_overhead_limit_count;
   // This flag signals that GCTimeLimit is being exceeded
-  // but may not have done so for the required number of consequetive
-  // collections.
+  // but may not have done so for the required number of consecutive
+  // collections
 
   // Minor collection timers used to determine both
-  // pause and interval times for collections.
+  // pause and interval times for collections
   static elapsedTimer _minor_timer;
 
   // Major collection timers, used to determine both
@@ -120,7 +120,7 @@
   // Statistics for survivor space calculation for young generation
   AdaptivePaddedAverage*   _avg_survived;
 
-  // Objects that have been directly allocated in the old generation.
+  // Objects that have been directly allocated in the old generation
   AdaptivePaddedNoZeroDevAverage*   _avg_pretenured;
 
   // Variable for estimating the major and minor pause times.
@@ -142,33 +142,33 @@
   // for making ergonomic decisions.
   double _latest_minor_mutator_interval_seconds;
 
-  // Allowed difference between major and minor gc times, used
-  // for computing tenuring_threshold.
+  // Allowed difference between major and minor GC times, used
+  // for computing tenuring_threshold
   const double _threshold_tolerance_percent;
 
-  const double _gc_pause_goal_sec; // goal for maximum gc pause
+  const double _gc_pause_goal_sec; // Goal for maximum GC pause
 
   // Flag indicating that the adaptive policy is ready to use
   bool _young_gen_policy_is_ready;
 
-  // decrease/increase the young generation for minor pause time
+  // Decrease/increase the young generation for minor pause time
   int _change_young_gen_for_min_pauses;
 
-  // decrease/increase the old generation for major pause time
+  // Decrease/increase the old generation for major pause time
   int _change_old_gen_for_maj_pauses;
 
-  //   change old geneneration for throughput
+  //   change old generation for throughput
   int _change_old_gen_for_throughput;
 
   //   change young generation for throughput
   int _change_young_gen_for_throughput;
 
   // Flag indicating that the policy would
-  //   increase the tenuring threshold because of the total major gc cost
-  //   is greater than the total minor gc cost
+  //   increase the tenuring threshold because of the total major GC cost
+  //   is greater than the total minor GC cost
   bool _increment_tenuring_threshold_for_gc_cost;
-  //   decrease the tenuring threshold because of the the total minor gc
-  //   cost is greater than the total major gc cost
+  //   decrease the tenuring threshold because of the the total minor GC
+  //   cost is greater than the total major GC cost
   bool _decrement_tenuring_threshold_for_gc_cost;
   //   decrease due to survivor size limit
   bool _decrement_tenuring_threshold_for_survivor_limit;
@@ -182,7 +182,7 @@
   // Changing the generation sizing depends on the data that is
   // gathered about the effects of changes on the pause times and
   // throughput.  These variable count the number of data points
-  // gathered.  The policy may use these counters as a threshhold
+  // gathered.  The policy may use these counters as a threshold
   // for reliable data.
   julong _young_gen_change_for_minor_throughput;
   julong _old_gen_change_for_major_throughput;
@@ -225,7 +225,7 @@
   // larger than 1.0 if just the sum of the minor cost the
   // the major cost is used.  Worse than that is the
   // fact that the minor cost and the major cost each
-  // tend toward 1.0 in the extreme of high gc costs.
+  // tend toward 1.0 in the extreme of high GC costs.
   // Limit the value of gc_cost to 1.0 so that the mutator
   // cost stays non-negative.
   virtual double gc_cost() const {
@@ -238,23 +238,23 @@
   virtual double time_since_major_gc() const;
 
   // Average interval between major collections to be used
-  // in calculating the decaying major gc cost.  An overestimate
+  // in calculating the decaying major GC cost.  An overestimate
   // of this time would be a conservative estimate because
   // this time is used to decide if the major GC cost
   // should be decayed (i.e., if the time since the last
-  // major gc is long compared to the time returned here,
+  // major GC is long compared to the time returned here,
   // then the major GC cost will be decayed).  See the
   // implementations for the specifics.
   virtual double major_gc_interval_average_for_decay() const {
     return _avg_major_interval->average();
   }
 
-  // Return the cost of the GC where the major gc cost
+  // Return the cost of the GC where the major GC cost
   // has been decayed based on the time since the last
   // major collection.
   double decaying_gc_cost() const;
 
-  // Decay the major gc cost.  Use this only for decisions on
+  // Decay the major GC cost.  Use this only for decisions on
   // whether to adjust, not to determine by how much to adjust.
   // This approximation is crude and may not be good enough for the
   // latter.
--- a/src/share/vm/gc_implementation/shared/allocationStats.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/allocationStats.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/allocationStats.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/allocationStats.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -49,11 +49,11 @@
   // estimates.
   AdaptivePaddedAverage _demand_rate_estimate;
 
-  ssize_t     _desired;         // Demand stimate computed as described above
+  ssize_t     _desired;          // Demand estimate computed as described above
   ssize_t     _coal_desired;     // desired +/- small-percent for tuning coalescing
 
-  ssize_t     _surplus;         // count - (desired +/- small-percent),
-                                // used to tune splitting in best fit
+  ssize_t     _surplus;          // count - (desired +/- small-percent),
+                                 // used to tune splitting in best fit
   ssize_t     _bfr_surp;         // surplus at start of current sweep
   ssize_t     _prev_sweep;       // count from end of previous sweep
   ssize_t     _before_sweep;     // count from before current sweep
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -31,6 +31,7 @@
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/os.hpp"
 
 // CopyrightVersion 1.2
 
@@ -54,7 +55,7 @@
 void ConcurrentGCThread::create_and_start() {
   if (os::create_thread(this, os::cgc_thread)) {
     // XXX: need to set this to low priority
-    // unless "agressive mode" set; priority
+    // unless "aggressive mode" set; priority
     // should be just less than that of VMThread.
     os::set_priority(this, NearMaxPriority);
     if (!_should_terminate && !DisableStartThread) {
@@ -206,7 +207,7 @@
     // exceptions anyway, check and abort if this fails.
     if (res == NULL || res->osthread() == NULL) {
       vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    "unable to create new native thread");
+                                    os::native_thread_creation_failed_msg());
     }
     java_lang_Thread::set_thread(thread_oop(), res);
     java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/gcUtil.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/gcUtil.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -159,7 +159,7 @@
 // that no calculation of the slope has yet been done.  Returning true
 // for a slope equal to 0 reflects the intuitive expectation of the
 // dependence on the slope.  Don't use the complement of these functions
-// since that untuitive expectation is not built into the complement.
+// since that intuitive expectation is not built into the complement.
 bool LinearLeastSquareFit::decrement_will_decrease() {
   return (_slope >= 0.00);
 }
--- a/src/share/vm/gc_implementation/shared/gcUtil.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/gcUtil.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -210,7 +210,7 @@
   double y(double x);
   double slope() { return _slope; }
   // Methods to decide if a change in the dependent variable will
-  // achive a desired goal.  Note that these methods are not
+  // achieve a desired goal.  Note that these methods are not
   // complementary and both are needed.
   bool decrement_will_decrease();
   bool increment_will_decrease();
--- a/src/share/vm/gc_implementation/shared/immutableSpace.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/immutableSpace.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/isGCActiveMark.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/isGCActiveMark.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -47,7 +47,6 @@
 SerialOldTracer*        MarkSweep::_gc_tracer       = NULL;
 
 MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
-CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true);
 
 void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
 void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -143,7 +143,6 @@
   // Public closures
   static IsAliveClosure       is_alive;
   static FollowRootClosure    follow_root_closure;
-  static CodeBlobToOopClosure follow_code_root_closure; // => follow_root_closure
   static MarkAndPushClosure   mark_and_push_closure;
   static FollowKlassClosure   follow_klass_closure;
   static FollowStackClosure   follow_stack_closure;
--- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,7 +72,7 @@
 #endif  // NOT_PRODUCT
 
 // There may be unallocated holes in the middle chunks
-// that should be filled with dead objects to ensure parseability.
+// that should be filled with dead objects to ensure parsability.
 void MutableNUMASpace::ensure_parsability() {
   for (int i = 0; i < lgrp_spaces()->length(); i++) {
     LGRPSpace *ls = lgrp_spaces()->at(i);
@@ -173,6 +173,26 @@
   return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
 }
 
+size_t MutableNUMASpace::tlab_used(Thread *thr) const {
+  // Please see the comments for tlab_capacity().
+  guarantee(thr != NULL, "No thread");
+  int lgrp_id = thr->lgrp_id();
+  if (lgrp_id == -1) {
+    if (lgrp_spaces()->length() > 0) {
+      return (used_in_bytes()) / lgrp_spaces()->length();
+    } else {
+      assert(false, "There should be at least one locality group");
+      return 0;
+    }
+  }
+  int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
+  if (i == -1) {
+    return 0;
+  }
+  return lgrp_spaces()->at(i)->space()->used_in_bytes();
+}
+
+
 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
   // Please see the comments for tlab_capacity().
   guarantee(thr != NULL, "No thread");
@@ -539,7 +559,7 @@
                                   bool clear_space,
                                   bool mangle_space,
                                   bool setup_pages) {
-  assert(clear_space, "Reallocation will destory data!");
+  assert(clear_space, "Reallocation will destroy data!");
   assert(lgrp_spaces()->length() > 0, "There should be at least one space");
 
   MemRegion old_region = region(), new_region;
@@ -880,8 +900,8 @@
 }
 
 void MutableNUMASpace::verify() {
-  // This can be called after setting an arbitary value to the space's top,
-  // so an object can cross the chunk boundary. We ensure the parsablity
+  // This can be called after setting an arbitrary value to the space's top,
+  // so an object can cross the chunk boundary. We ensure the parsability
   // of the space and just walk the objects in linear fashion.
   ensure_parsability();
   MutableSpace::verify();
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -217,6 +217,7 @@
   using MutableSpace::capacity_in_words;
   virtual size_t capacity_in_words(Thread* thr) const;
   virtual size_t tlab_capacity(Thread* thr) const;
+  virtual size_t tlab_used(Thread* thr) const;
   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
 
   // Allocation (return NULL if full)
--- a/src/share/vm/gc_implementation/shared/mutableSpace.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/mutableSpace.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/mutableSpace.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/mutableSpace.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -31,7 +31,7 @@
 
 // A MutableSpace is a subtype of ImmutableSpace that supports the
 // concept of allocation. This includes the concepts that a space may
-// be only partially full, and the querry methods that go with such
+// be only partially full, and the query methods that go with such
 // an assumption. MutableSpace is also responsible for minimizing the
 // page allocation time by having the memory pretouched (with
 // AlwaysPretouch) and for optimizing page placement on NUMA systems
@@ -111,7 +111,7 @@
 
   virtual void mangle_region(MemRegion mr) PRODUCT_RETURN;
 
-  // Boolean querries.
+  // Boolean queries.
   bool is_empty() const              { return used_in_words() == 0; }
   bool not_empty() const             { return used_in_words() > 0; }
   bool contains(const void* p) const { return _bottom <= p && p < _end; }
@@ -124,6 +124,7 @@
   virtual size_t used_in_words() const                    { return pointer_delta(top(), bottom()); }
   virtual size_t free_in_words() const                    { return pointer_delta(end(),    top()); }
   virtual size_t tlab_capacity(Thread* thr) const         { return capacity_in_bytes();            }
+  virtual size_t tlab_used(Thread* thr) const             { return used_in_bytes();                }
   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const { return free_in_bytes();                }
 
   // Allocation (return NULL if full)
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -89,6 +89,10 @@
 // scavenge; it clears the sensor accumulators.
 void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
   assert(ResizePLAB, "Not set");
+
+  assert(is_object_aligned(max_size()) && min_size() <= max_size(),
+         "PLAB clipping computation may be incorrect");
+
   if (_allocated == 0) {
     assert(_unused == 0,
            err_msg("Inconsistency in PLAB stats: "
@@ -152,7 +156,7 @@
 
 // The buffer comes with its own BOT, with a shared (obviously) underlying
 // BlockOffsetSharedArray. We manipulate this BOT in the normal way
-// as we would for any contiguous space. However, on accasion we
+// as we would for any contiguous space. However, on occasion we
 // need to do some buffer surgery at the extremities before we
 // start using the body of the buffer for allocations. Such surgery
 // (as explained elsewhere) is to prevent allocation on a card that
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -92,7 +92,7 @@
   }
 
   // The total (word) size of the buffer, including both allocated and
-  // unallocted space.
+  // unallocated space.
   size_t word_sz() { return _word_sz; }
 
   // Should only be done if we are about to reset with a new buffer of the
@@ -158,7 +158,7 @@
   // Fills in the unallocated portion of the buffer with a garbage object.
   // If "end_of_gc" is TRUE, is after the last use in the GC.  IF "retain"
   // is true, attempt to re-use the unused portion in the next GC.
-  virtual void retire(bool end_of_gc, bool retain);
+  void retire(bool end_of_gc, bool retain);
 
   void print() PRODUCT_RETURN;
 };
@@ -181,16 +181,7 @@
     _used(0),
     _desired_plab_sz(desired_plab_sz_),
     _filter(wt)
-  {
-    size_t min_sz = min_size();
-    size_t max_sz = max_size();
-    size_t aligned_min_sz = align_object_size(min_sz);
-    size_t aligned_max_sz = align_object_size(max_sz);
-    assert(min_sz <= aligned_min_sz && max_sz >= aligned_max_sz &&
-           min_sz <= max_sz,
-           "PLAB clipping computation in adjust_desired_plab_sz()"
-           " may be incorrect");
-  }
+  { }
 
   static const size_t min_size() {
     return ParGCAllocBuffer::min_size();
--- a/src/share/vm/gc_implementation/shared/spaceCounters.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/spaceCounters.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/spaceCounters.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/spaceCounters.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_implementation/shared/spaceDecorator.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/spaceDecorator.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -75,7 +75,7 @@
 
   // High water mark for allocations.  Typically, the space above
   // this point have been mangle previously and don't need to be
-  // touched again.  Space belows this point has been allocated
+  // touched again.  Space below this point has been allocated
   // and remangling is needed between the current top and this
   // high water mark.
   HeapWord* _top_for_allocations;
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -41,32 +41,18 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #endif // INCLUDE_ALL_GCS
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
-HS_DTRACE_PROBE_DECL(hotspot, gc__end);
-#endif /* !USDT2 */
-
 // The same dtrace probe can't be inserted in two different files, so we
 // have to call it here, so it's only in one file.  Can't create new probes
 // for the other file anymore.   The dtrace probes have to remain stable.
 void VM_GC_Operation::notify_gc_begin(bool full) {
-#ifndef USDT2
-  HS_DTRACE_PROBE1(hotspot, gc__begin, full);
-  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
-#else /* USDT2 */
   HOTSPOT_GC_BEGIN(
                    full);
-#endif /* USDT2 */
+  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
 }
 
 void VM_GC_Operation::notify_gc_end() {
-#ifndef USDT2
-  HS_DTRACE_PROBE(hotspot, gc__end);
+  HOTSPOT_GC_END();
   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
-#else /* USDT2 */
-  HOTSPOT_GC_END(
-);
-#endif /* USDT2 */
 }
 
 void VM_GC_Operation::acquire_pending_list_lock() {
@@ -82,7 +68,7 @@
 
 // Allocations may fail in several threads at about the same time,
 // resulting in multiple gc requests.  We only want to do one of them.
-// In case a GC locker is active and the need for a GC is already signalled,
+// In case a GC locker is active and the need for a GC is already signaled,
 // we want to skip this GC attempt altogether, without doing a futile
 // safepoint operation.
 bool VM_GC_Operation::skip_operation() const {
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -323,6 +323,21 @@
   assert(thread->deferred_card_mark().is_empty(), "invariant");
 }
 
+size_t CollectedHeap::max_tlab_size() const {
+  // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
+  // This restriction could be removed by enabling filling with multiple arrays.
+  // If we compute that the reasonable way as
+  //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
+  // we'll overflow on the multiply, so we do the divide first.
+  // We actually lose a little by dividing first,
+  // but that just makes the TLAB  somewhat smaller than the biggest array,
+  // which is fine, since we'll be able to fill that.
+  size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
+              sizeof(jint) *
+              ((juint) max_jint / (size_t) HeapWordSize);
+  return align_size_down(max_int_size, MinObjAlignment);
+}
+
 // Helper for ReduceInitialCardMarks. For performance,
 // compiled code may elide card-marks for initializing stores
 // to a newly allocated object along the fast-path. We
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -389,28 +389,21 @@
   // allocation from them and necessitating allocation of new TLABs.
   virtual void ensure_parsability(bool retire_tlabs);
 
-  // Return an estimate of the maximum allocation that could be performed
-  // without triggering any collection or expansion activity.  In a
-  // generational collector, for example, this is probably the largest
-  // allocation that could be supported (without expansion) in the youngest
-  // generation.  It is "unsafe" because no locks are taken; the result
-  // should be treated as an approximation, not a guarantee, for use in
-  // heuristic resizing decisions.
-  virtual size_t unsafe_max_alloc() = 0;
-
   // Section on thread-local allocation buffers (TLABs)
   // If the heap supports thread-local allocation buffers, it should override
   // the following methods:
   // Returns "true" iff the heap supports thread-local allocation buffers.
   // The default is "no".
-  virtual bool supports_tlab_allocation() const {
-    return false;
-  }
+  virtual bool supports_tlab_allocation() const = 0;
+
   // The amount of space available for thread-local allocation buffers.
-  virtual size_t tlab_capacity(Thread *thr) const {
-    guarantee(false, "thread-local allocation buffers not supported");
-    return 0;
-  }
+  virtual size_t tlab_capacity(Thread *thr) const = 0;
+
+  // The amount of used space for thread-local allocation buffers for the given thread.
+  virtual size_t tlab_used(Thread *thr) const = 0;
+
+  virtual size_t max_tlab_size() const;
+
   // An estimate of the maximum allocation that could be performed
   // for thread-local allocation buffers without triggering any
   // collection or expansion activity.
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_interface/gcCause.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_interface/gcCause.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/gc_interface/gcCause.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/gc_interface/gcCause.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@
 // This class exposes implementation details of the various
 // collector(s), and we need to be very careful with it. If
 // use of this class grows, we should split it into public
-// and implemenation-private "causes".
+// and implementation-private "causes".
 //
 
 class GCCause : public AllStatic {
--- a/src/share/vm/interpreter/abstractInterpreter.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/abstractInterpreter.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -42,8 +42,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "interp_masm_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "interp_masm_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "interp_masm_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "interp_masm_ppc_64.hpp"
 #endif
 
 // This file contains the platform-independent parts
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,14 +28,16 @@
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/bytecodeInterpreter.hpp"
 #include "interpreter/bytecodeInterpreter.inline.hpp"
+#include "interpreter/bytecodeInterpreterProfiling.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
-#include "memory/cardTableModRefBS.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/methodCounters.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/biasedLocking.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -66,6 +68,9 @@
 #ifdef TARGET_OS_ARCH_linux_ppc
 # include "orderAccess_linux_ppc.inline.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "orderAccess_aix_ppc.inline.hpp"
+#endif
 #ifdef TARGET_OS_ARCH_bsd_x86
 # include "orderAccess_bsd_x86.inline.hpp"
 #endif
@@ -138,19 +143,20 @@
  * is no entry point to do the transition to vm so we just
  * do it by hand here.
  */
-#define VM_JAVA_ERROR_NO_JUMP(name, msg)                                          \
+#define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap)                             \
     DECACHE_STATE();                                                              \
     SET_LAST_JAVA_FRAME();                                                        \
     {                                                                             \
+       InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI());          \
        ThreadInVMfromJava trans(THREAD);                                          \
        Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg);             \
     }                                                                             \
     RESET_LAST_JAVA_FRAME();                                                      \
     CACHE_STATE();
 
-// Normal throw of a java error
-#define VM_JAVA_ERROR(name, msg)                                                  \
-    VM_JAVA_ERROR_NO_JUMP(name, msg)                                              \
+// Normal throw of a java error.
+#define VM_JAVA_ERROR(name, msg, note_a_trap)                                     \
+    VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap)                                 \
     goto handle_exception;
 
 #ifdef PRODUCT
@@ -197,6 +203,10 @@
               !THREAD->pop_frame_in_process()) {                                 \
             goto handle_Pop_Frame;                                               \
           }                                                                      \
+          if (THREAD->jvmti_thread_state() &&                                    \
+              THREAD->jvmti_thread_state()->is_earlyret_pending()) {             \
+            goto handle_Early_Return;                                            \
+          }                                                                      \
           opcode = *pc;                                                          \
         }                                                                        \
       }                                                                          \
@@ -332,12 +342,30 @@
       if (UseLoopCounter) {                                                                         \
         bool do_OSR = UseOnStackReplacement;                                                        \
         mcs->backedge_counter()->increment();                                                       \
-        if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit();                    \
+        if (ProfileInterpreter) {                                                                   \
+          BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);                                   \
+          /* Check for overflow against MDO count. */                                               \
+          do_OSR = do_OSR                                                                           \
+            && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\
+            /* When ProfileInterpreter is on, the backedge_count comes     */                       \
+            /* from the methodDataOop, which value does not get reset on   */                       \
+            /* the call to frequency_counter_overflow(). To avoid          */                       \
+            /* excessive calls to the overflow routine while the method is */                       \
+            /* being compiled, add a second test to make sure the overflow */                       \
+            /* function is called only once every overflow_frequency.      */                       \
+            && (!(mdo_last_branch_taken_count & 1023));                                             \
+        } else {                                                                                    \
+          /* check for overflow of backedge counter */                                              \
+          do_OSR = do_OSR                                                                           \
+            && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter());         \
+        }                                                                                           \
         if (do_OSR) {                                                                               \
-          nmethod*  osr_nmethod;                                                                    \
+          nmethod* osr_nmethod;                                                                     \
           OSR_REQUEST(osr_nmethod, branch_pc);                                                      \
           if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) {          \
-            intptr_t* buf = SharedRuntime::OSR_migration_begin(THREAD);                             \
+            intptr_t* buf;                                                                          \
+            /* Call OSR migration with last java frame only, no checks. */                          \
+            CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD));                      \
             istate->set_msg(do_osr);                                                                \
             istate->set_osr_buf((address)buf);                                                      \
             istate->set_osr_entry(osr_nmethod->osr_entry());                                        \
@@ -345,7 +373,6 @@
           }                                                                                         \
         }                                                                                           \
       }  /* UseCompiler ... */                                                                      \
-      mcs->invocation_counter()->increment();                                                       \
       SAFEPOINT;                                                                                    \
     }
 
@@ -378,17 +405,21 @@
 #undef CACHE_FRAME
 #define CACHE_FRAME()
 
+// BCI() returns the current bytecode-index.
+#undef  BCI
+#define BCI()           ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base()))
+
 /*
  * CHECK_NULL - Macro for throwing a NullPointerException if the object
  * passed is a null ref.
  * On some architectures/platforms it should be possible to do this implicitly
  */
 #undef CHECK_NULL
-#define CHECK_NULL(obj_)                                                 \
-    if ((obj_) == NULL) {                                                \
-        VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "");  \
-    }                                                                    \
-    VERIFY_OOP(obj_)
+#define CHECK_NULL(obj_)                                                                         \
+        if ((obj_) == NULL) {                                                                    \
+          VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \
+        }                                                                                        \
+        VERIFY_OOP(obj_)
 
 #define VMdoubleConstZero() 0.0
 #define VMdoubleConstOne() 1.0
@@ -410,22 +441,30 @@
         CACHE_CP();     \
         CACHE_LOCALS();
 
-// Call the VM don't check for pending exceptions
-#define CALL_VM_NOCHECK(func)                                     \
-          DECACHE_STATE();                                        \
-          SET_LAST_JAVA_FRAME();                                  \
-          func;                                                   \
-          RESET_LAST_JAVA_FRAME();                                \
-          CACHE_STATE();                                          \
-          if (THREAD->pop_frame_pending() &&                      \
-              !THREAD->pop_frame_in_process()) {                  \
-            goto handle_Pop_Frame;                                \
-          }
+// Call the VM with last java frame only.
+#define CALL_VM_NAKED_LJF(func)                                    \
+        DECACHE_STATE();                                           \
+        SET_LAST_JAVA_FRAME();                                     \
+        func;                                                      \
+        RESET_LAST_JAVA_FRAME();                                   \
+        CACHE_STATE();
+
+// Call the VM. Don't check for pending exceptions.
+#define CALL_VM_NOCHECK(func)                                      \
+        CALL_VM_NAKED_LJF(func)                                    \
+        if (THREAD->pop_frame_pending() &&                         \
+            !THREAD->pop_frame_in_process()) {                     \
+          goto handle_Pop_Frame;                                   \
+        }                                                          \
+        if (THREAD->jvmti_thread_state() &&                        \
+            THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
+          goto handle_Early_Return;                                \
+        }
 
 // Call the VM and check for pending exceptions
-#define CALL_VM(func, label) {                                    \
-          CALL_VM_NOCHECK(func);                                  \
-          if (THREAD->has_pending_exception()) goto label;        \
+#define CALL_VM(func, label) {                                     \
+          CALL_VM_NOCHECK(func);                                   \
+          if (THREAD->has_pending_exception()) goto label;         \
         }
 
 /*
@@ -502,8 +541,6 @@
   interpreterState orig = istate;
 #endif
 
-  static volatile jbyte* _byte_map_base; // adjusted card table base for oop store barrier
-
   register intptr_t*        topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
   register address          pc = istate->bcp();
   register jubyte opcode;
@@ -511,12 +548,9 @@
   register ConstantPoolCache*    cp = istate->constants(); // method()->constants()->cache()
 #ifdef LOTS_OF_REGS
   register JavaThread*      THREAD = istate->thread();
-  register volatile jbyte*  BYTE_MAP_BASE = _byte_map_base;
 #else
 #undef THREAD
 #define THREAD istate->thread()
-#undef BYTE_MAP_BASE
-#define BYTE_MAP_BASE _byte_map_base
 #endif
 
 #ifdef USELABELS
@@ -622,16 +656,20 @@
          topOfStack < istate->stack_base(),
          "Stack top out of range");
 
+#ifdef CC_INTERP_PROFILE
+  // MethodData's last branch taken count.
+  uint mdo_last_branch_taken_count = 0;
+#else
+  const uint mdo_last_branch_taken_count = 0;
+#endif
+
   switch (istate->msg()) {
     case initialize: {
-      if (initialized++) ShouldNotReachHere(); // Only one initialize call
+      if (initialized++) ShouldNotReachHere(); // Only one initialize call.
       _compiling = (UseCompiler || CountCompiledCalls);
 #ifdef VM_JVMTI
       _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
 #endif
-      BarrierSet* bs = Universe::heap()->barrier_set();
-      assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
-      _byte_map_base = (volatile jbyte*)(((CardTableModRefBS*)bs)->byte_map_base);
       return;
     }
     break;
@@ -646,15 +684,12 @@
           METHOD->increment_interpreter_invocation_count(THREAD);
         }
         mcs->invocation_counter()->increment();
-        if (mcs->invocation_counter()->reached_InvocationLimit()) {
-            CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
-
-            // We no longer retry on a counter overflow
-
-            // istate->set_msg(retry_method);
-            // THREAD->clr_do_not_unlock();
-            // return;
+        if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) {
+          CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
+          // We no longer retry on a counter overflow.
         }
+        // Get or create profile data. Check for pending (async) exceptions.
+        BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
         SAFEPOINT;
       }
 
@@ -676,117 +711,99 @@
       }
 #endif // HACK
 
-
-      // lock method if synchronized
+      // Lock method if synchronized.
       if (METHOD->is_synchronized()) {
-          // oop rcvr = locals[0].j.r;
-          oop rcvr;
-          if (METHOD->is_static()) {
-            rcvr = METHOD->constants()->pool_holder()->java_mirror();
-          } else {
-            rcvr = LOCALS_OBJECT(0);
-            VERIFY_OOP(rcvr);
-          }
-          // The initial monitor is ours for the taking
-          BasicObjectLock* mon = &istate->monitor_base()[-1];
-          oop monobj = mon->obj();
-          assert(mon->obj() == rcvr, "method monitor mis-initialized");
-
-          bool success = UseBiasedLocking;
-          if (UseBiasedLocking) {
-            markOop mark = rcvr->mark();
-            if (mark->has_bias_pattern()) {
-              // The bias pattern is present in the object's header. Need to check
-              // whether the bias owner and the epoch are both still current.
-              intptr_t xx = ((intptr_t) THREAD) ^ (intptr_t) mark;
-              xx = (intptr_t) rcvr->klass()->prototype_header() ^ xx;
-              intptr_t yy = (xx & ~((int) markOopDesc::age_mask_in_place));
-              if (yy != 0 ) {
-                // At this point we know that the header has the bias pattern and
-                // that we are not the bias owner in the current epoch. We need to
-                // figure out more details about the state of the header in order to
-                // know what operations can be legally performed on the object's
-                // header.
-
-                // If the low three bits in the xor result aren't clear, that means
-                // the prototype header is no longer biased and we have to revoke
-                // the bias on this object.
-
-                if (yy & markOopDesc::biased_lock_mask_in_place == 0 ) {
-                  // Biasing is still enabled for this data type. See whether the
-                  // epoch of the current bias is still valid, meaning that the epoch
-                  // bits of the mark word are equal to the epoch bits of the
-                  // prototype header. (Note that the prototype header's epoch bits
-                  // only change at a safepoint.) If not, attempt to rebias the object
-                  // toward the current thread. Note that we must be absolutely sure
-                  // that the current epoch is invalid in order to do this because
-                  // otherwise the manipulations it performs on the mark word are
-                  // illegal.
-                  if (yy & markOopDesc::epoch_mask_in_place == 0) {
-                    // The epoch of the current bias is still valid but we know nothing
-                    // about the owner; it might be set or it might be clear. Try to
-                    // acquire the bias of the object using an atomic operation. If this
-                    // fails we will go in to the runtime to revoke the object's bias.
-                    // Note that we first construct the presumed unbiased header so we
-                    // don't accidentally blow away another thread's valid bias.
-                    intptr_t unbiased = (intptr_t) mark & (markOopDesc::biased_lock_mask_in_place |
-                                                           markOopDesc::age_mask_in_place |
-                                                           markOopDesc::epoch_mask_in_place);
-                    if (Atomic::cmpxchg_ptr((intptr_t)THREAD | unbiased, (intptr_t*) rcvr->mark_addr(), unbiased) != unbiased) {
-                      CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
-                    }
-                  } else {
-                    try_rebias:
-                    // At this point we know the epoch has expired, meaning that the
-                    // current "bias owner", if any, is actually invalid. Under these
-                    // circumstances _only_, we are allowed to use the current header's
-                    // value as the comparison value when doing the cas to acquire the
-                    // bias in the current epoch. In other words, we allow transfer of
-                    // the bias from one thread to another directly in this situation.
-                    xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD;
-                    if (Atomic::cmpxchg_ptr((intptr_t)THREAD | (intptr_t) rcvr->klass()->prototype_header(),
-                                            (intptr_t*) rcvr->mark_addr(),
-                                            (intptr_t) mark) != (intptr_t) mark) {
-                      CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
-                    }
-                  }
-                } else {
-                  try_revoke_bias:
-                  // The prototype mark in the klass doesn't have the bias bit set any
-                  // more, indicating that objects of this data type are not supposed
-                  // to be biased any more. We are going to try to reset the mark of
-                  // this object to the prototype value and fall through to the
-                  // CAS-based locking scheme. Note that if our CAS fails, it means
-                  // that another thread raced us for the privilege of revoking the
-                  // bias of this particular object, so it's okay to continue in the
-                  // normal locking code.
-                  //
-                  xx = (intptr_t) rcvr->klass()->prototype_header() | (intptr_t) THREAD;
-                  if (Atomic::cmpxchg_ptr(rcvr->klass()->prototype_header(),
-                                          (intptr_t*) rcvr->mark_addr(),
-                                          mark) == mark) {
-                    // (*counters->revoked_lock_entry_count_addr())++;
-                  success = false;
-                  }
-                }
+        // oop rcvr = locals[0].j.r;
+        oop rcvr;
+        if (METHOD->is_static()) {
+          rcvr = METHOD->constants()->pool_holder()->java_mirror();
+        } else {
+          rcvr = LOCALS_OBJECT(0);
+          VERIFY_OOP(rcvr);
+        }
+        // The initial monitor is ours for the taking.
+        // Monitor not filled in frame manager any longer as this caused race condition with biased locking.
+        BasicObjectLock* mon = &istate->monitor_base()[-1];
+        mon->set_obj(rcvr);
+        bool success = false;
+        uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
+        markOop mark = rcvr->mark();
+        intptr_t hash = (intptr_t) markOopDesc::no_hash;
+        // Implies UseBiasedLocking.
+        if (mark->has_bias_pattern()) {
+          uintptr_t thread_ident;
+          uintptr_t anticipated_bias_locking_value;
+          thread_ident = (uintptr_t)istate->thread();
+          anticipated_bias_locking_value =
+            (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
+            ~((uintptr_t) markOopDesc::age_mask_in_place);
+
+          if (anticipated_bias_locking_value == 0) {
+            // Already biased towards this thread, nothing to do.
+            if (PrintBiasedLockingStatistics) {
+              (* BiasedLocking::biased_lock_entry_count_addr())++;
+            }
+            success = true;
+          } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+            // Try to revoke bias.
+            markOop header = rcvr->klass()->prototype_header();
+            if (hash != markOopDesc::no_hash) {
+              header = header->copy_set_hash(hash);
+            }
+            if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
+              if (PrintBiasedLockingStatistics)
+                (*BiasedLocking::revoked_lock_entry_count_addr())++;
+            }
+          } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
+            // Try to rebias.
+            markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
+            if (hash != markOopDesc::no_hash) {
+              new_header = new_header->copy_set_hash(hash);
+            }
+            if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
+              if (PrintBiasedLockingStatistics) {
+                (* BiasedLocking::rebiased_lock_entry_count_addr())++;
               }
             } else {
-              cas_label:
-              success = false;
+              CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
+            }
+            success = true;
+          } else {
+            // Try to bias towards thread in case object is anonymously biased.
+            markOop header = (markOop) ((uintptr_t) mark &
+                                        ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
+                                         (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
+            if (hash != markOopDesc::no_hash) {
+              header = header->copy_set_hash(hash);
+            }
+            markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+            // Debugging hint.
+            DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+            if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
+              if (PrintBiasedLockingStatistics) {
+                (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
+              }
+            } else {
+              CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
+            }
+            success = true;
+          }
+        }
+
+        // Traditional lightweight locking.
+        if (!success) {
+          markOop displaced = rcvr->mark()->set_unlocked();
+          mon->lock()->set_displaced_header(displaced);
+          bool call_vm = UseHeavyMonitors;
+          if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
+            // Is it simple recursive case?
+            if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
+              mon->lock()->set_displaced_header(NULL);
+            } else {
+              CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
             }
           }
-          if (!success) {
-            markOop displaced = rcvr->mark()->set_unlocked();
-            mon->lock()->set_displaced_header(displaced);
-            if (Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
-              // Is it simple recursive case?
-              if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
-                mon->lock()->set_displaced_header(NULL);
-              } else {
-                CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
-              }
-            }
-          }
+        }
       }
       THREAD->clr_do_not_unlock();
 
@@ -808,9 +825,14 @@
     case popping_frame: {
       // returned from a java call to pop the frame, restart the call
       // clear the message so we don't confuse ourselves later
-      ShouldNotReachHere();  // we don't return this.
       assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
       istate->set_msg(no_request);
+      if (_compiling) {
+        // Set MDX back to the ProfileData of the invoke bytecode that will be
+        // restarted.
+        SET_MDX(NULL);
+        BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
+      }
       THREAD->clr_pop_frame_in_process();
       goto run;
     }
@@ -836,10 +858,19 @@
       if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
         goto handle_Pop_Frame;
       }
+      if (THREAD->jvmti_thread_state() &&
+          THREAD->jvmti_thread_state()->is_earlyret_pending()) {
+        goto handle_Early_Return;
+      }
 
       if (THREAD->has_pending_exception()) goto handle_exception;
       // Update the pc by the saved amount of the invoke bytecode size
       UPDATE_PC(istate->bcp_advance());
+
+      if (_compiling) {
+        // Get or create profile data. Check for pending (async) exceptions.
+        BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
+      }
       goto run;
     }
 
@@ -847,6 +878,11 @@
       // Returned from an opcode that will reexecute. Deopt was
       // a result of a PopFrame request.
       //
+
+      if (_compiling) {
+        // Get or create profile data. Check for pending (async) exceptions.
+        BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
+      }
       goto run;
     }
 
@@ -869,6 +905,11 @@
       }
       UPDATE_PC(Bytecodes::length_at(METHOD, pc));
       if (THREAD->has_pending_exception()) goto handle_exception;
+
+      if (_compiling) {
+        // Get or create profile data. Check for pending (async) exceptions.
+        BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
+      }
       goto run;
     }
     case got_monitors: {
@@ -881,15 +922,84 @@
       BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
       assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
       entry->set_obj(lockee);
-
-      markOop displaced = lockee->mark()->set_unlocked();
-      entry->lock()->set_displaced_header(displaced);
-      if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
-        // Is it simple recursive case?
-        if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
-          entry->lock()->set_displaced_header(NULL);
+      bool success = false;
+      uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
+
+      markOop mark = lockee->mark();
+      intptr_t hash = (intptr_t) markOopDesc::no_hash;
+      // implies UseBiasedLocking
+      if (mark->has_bias_pattern()) {
+        uintptr_t thread_ident;
+        uintptr_t anticipated_bias_locking_value;
+        thread_ident = (uintptr_t)istate->thread();
+        anticipated_bias_locking_value =
+          (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
+          ~((uintptr_t) markOopDesc::age_mask_in_place);
+
+        if  (anticipated_bias_locking_value == 0) {
+          // already biased towards this thread, nothing to do
+          if (PrintBiasedLockingStatistics) {
+            (* BiasedLocking::biased_lock_entry_count_addr())++;
+          }
+          success = true;
+        } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+          // try revoke bias
+          markOop header = lockee->klass()->prototype_header();
+          if (hash != markOopDesc::no_hash) {
+            header = header->copy_set_hash(hash);
+          }
+          if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
+            if (PrintBiasedLockingStatistics) {
+              (*BiasedLocking::revoked_lock_entry_count_addr())++;
+            }
+          }
+        } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
+          // try rebias
+          markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
+          if (hash != markOopDesc::no_hash) {
+                new_header = new_header->copy_set_hash(hash);
+          }
+          if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
+            if (PrintBiasedLockingStatistics) {
+              (* BiasedLocking::rebiased_lock_entry_count_addr())++;
+            }
+          } else {
+            CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+          }
+          success = true;
         } else {
-          CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+          // try to bias towards thread in case object is anonymously biased
+          markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
+                                                          (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
+          if (hash != markOopDesc::no_hash) {
+            header = header->copy_set_hash(hash);
+          }
+          markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+          // debugging hint
+          DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+          if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
+            if (PrintBiasedLockingStatistics) {
+              (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
+            }
+          } else {
+            CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+          }
+          success = true;
+        }
+      }
+
+      // traditional lightweight locking
+      if (!success) {
+        markOop displaced = lockee->mark()->set_unlocked();
+        entry->lock()->set_displaced_header(displaced);
+        bool call_vm = UseHeavyMonitors;
+        if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+          // Is it simple recursive case?
+          if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
+            entry->lock()->set_displaced_header(NULL);
+          } else {
+            CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+          }
         }
       }
       UPDATE_PC_AND_TOS(1, -1);
@@ -1050,6 +1160,11 @@
           uint16_t reg = Bytes::get_Java_u2(pc + 2);
 
           opcode = pc[1];
+
+          // Wide and it's sub-bytecode are counted as separate instructions. If we
+          // don't account for this here, the bytecode trace skips the next bytecode.
+          DO_UPDATE_INSTRUCTION_COUNT(opcode);
+
           switch(opcode) {
               case Bytecodes::_aload:
                   VERIFY_OOP(LOCALS_OBJECT(reg));
@@ -1093,10 +1208,13 @@
                   UPDATE_PC_AND_CONTINUE(6);
               }
               case Bytecodes::_ret:
+                  // Profile ret.
+                  BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg))));
+                  // Now, update the pc.
                   pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
                   UPDATE_PC_AND_CONTINUE(0);
               default:
-                  VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode");
+                  VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap);
           }
       }
 
@@ -1177,7 +1295,7 @@
       CASE(_i##opcname):                                                \
           if (test && (STACK_INT(-1) == 0)) {                           \
               VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
-                            "/ by zero");                               \
+                            "/ by zero", note_div0Check_trap);          \
           }                                                             \
           SET_STACK_INT(VMint##opname(STACK_INT(-2),                    \
                                       STACK_INT(-1)),                   \
@@ -1189,7 +1307,7 @@
             jlong l1 = STACK_LONG(-1);                                  \
             if (VMlongEqz(l1)) {                                        \
               VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
-                            "/ by long zero");                          \
+                            "/ by long zero", note_div0Check_trap);     \
             }                                                           \
           }                                                             \
           /* First long at (-1,-2) next long at (-3,-4) */              \
@@ -1402,17 +1520,23 @@
 
 #define COMPARISON_OP(name, comparison)                                      \
       CASE(_if_icmp##name): {                                                \
-          int skip = (STACK_INT(-2) comparison STACK_INT(-1))                \
+          const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1));         \
+          int skip = cmp                                                     \
                       ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
           address branch_pc = pc;                                            \
+          /* Profile branch. */                                              \
+          BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp);                        \
           UPDATE_PC_AND_TOS(skip, -2);                                       \
           DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
           CONTINUE;                                                          \
       }                                                                      \
       CASE(_if##name): {                                                     \
-          int skip = (STACK_INT(-1) comparison 0)                            \
+          const bool cmp = (STACK_INT(-1) comparison 0);                     \
+          int skip = cmp                                                     \
                       ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
           address branch_pc = pc;                                            \
+          /* Profile branch. */                                              \
+          BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp);                        \
           UPDATE_PC_AND_TOS(skip, -1);                                       \
           DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
           CONTINUE;                                                          \
@@ -1421,9 +1545,12 @@
 #define COMPARISON_OP2(name, comparison)                                     \
       COMPARISON_OP(name, comparison)                                        \
       CASE(_if_acmp##name): {                                                \
-          int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1))          \
+          const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1));   \
+          int skip = cmp                                                     \
                        ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;            \
           address branch_pc = pc;                                            \
+          /* Profile branch. */                                              \
+          BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp);                        \
           UPDATE_PC_AND_TOS(skip, -2);                                       \
           DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
           CONTINUE;                                                          \
@@ -1431,9 +1558,12 @@
 
 #define NULL_COMPARISON_NOT_OP(name)                                         \
       CASE(_if##name): {                                                     \
-          int skip = (!(STACK_OBJECT(-1) == NULL))                           \
+          const bool cmp = (!(STACK_OBJECT(-1) == NULL));                    \
+          int skip = cmp                                                     \
                       ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
           address branch_pc = pc;                                            \
+          /* Profile branch. */                                              \
+          BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp);                        \
           UPDATE_PC_AND_TOS(skip, -1);                                       \
           DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
           CONTINUE;                                                          \
@@ -1441,9 +1571,12 @@
 
 #define NULL_COMPARISON_OP(name)                                             \
       CASE(_if##name): {                                                     \
-          int skip = ((STACK_OBJECT(-1) == NULL))                            \
+          const bool cmp = ((STACK_OBJECT(-1) == NULL));                     \
+          int skip = cmp                                                     \
                       ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3;             \
           address branch_pc = pc;                                            \
+          /* Profile branch. */                                              \
+          BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp);                        \
           UPDATE_PC_AND_TOS(skip, -1);                                       \
           DO_BACKEDGE_CHECKS(skip, branch_pc);                               \
           CONTINUE;                                                          \
@@ -1466,30 +1599,42 @@
           int32_t  high = Bytes::get_Java_u4((address)&lpc[2]);
           int32_t  skip;
           key -= low;
-          skip = ((uint32_t) key > (uint32_t)(high - low))
-                      ? Bytes::get_Java_u4((address)&lpc[0])
-                      : Bytes::get_Java_u4((address)&lpc[key + 3]);
-          // Does this really need a full backedge check (osr?)
+          if (((uint32_t) key > (uint32_t)(high - low))) {
+            key = -1;
+            skip = Bytes::get_Java_u4((address)&lpc[0]);
+          } else {
+            skip = Bytes::get_Java_u4((address)&lpc[key + 3]);
+          }
+          // Profile switch.
+          BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key);
+          // Does this really need a full backedge check (osr)?
           address branch_pc = pc;
           UPDATE_PC_AND_TOS(skip, -1);
           DO_BACKEDGE_CHECKS(skip, branch_pc);
           CONTINUE;
       }
 
-      /* Goto pc whose table entry matches specified key */
+      /* Goto pc whose table entry matches specified key. */
 
       CASE(_lookupswitch): {
           jint* lpc  = (jint*)VMalignWordUp(pc+1);
           int32_t  key  = STACK_INT(-1);
           int32_t  skip = Bytes::get_Java_u4((address) lpc); /* default amount */
+          // Remember index.
+          int      index = -1;
+          int      newindex = 0;
           int32_t  npairs = Bytes::get_Java_u4((address) &lpc[1]);
           while (--npairs >= 0) {
-              lpc += 2;
-              if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
-                  skip = Bytes::get_Java_u4((address)&lpc[1]);
-                  break;
-              }
+            lpc += 2;
+            if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
+              skip = Bytes::get_Java_u4((address)&lpc[1]);
+              index = newindex;
+              break;
+            }
+            newindex += 1;
           }
+          // Profile switch.
+          BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index);
           address branch_pc = pc;
           UPDATE_PC_AND_TOS(skip, -1);
           DO_BACKEDGE_CHECKS(skip, branch_pc);
@@ -1574,7 +1719,7 @@
       if ((uint32_t)index >= (uint32_t)arrObj->length()) {                     \
           sprintf(message, "%d", index);                                       \
           VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
-                        message);                                              \
+                        message, note_rangeCheck_trap);                        \
       }
 
       /* 32-bit loads. These handle conversion from < 32-bit types */
@@ -1600,8 +1745,11 @@
           ARRAY_LOADTO32(T_INT, jint,   "%d",   STACK_INT, 0);
       CASE(_faload):
           ARRAY_LOADTO32(T_FLOAT, jfloat, "%f",   STACK_FLOAT, 0);
-      CASE(_aaload):
-          ARRAY_LOADTO32(T_OBJECT, oop,   INTPTR_FORMAT, STACK_OBJECT, 0);
+      CASE(_aaload): {
+          ARRAY_INTRO(-2);
+          SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2);
+          UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
+      }
       CASE(_baload):
           ARRAY_LOADTO32(T_BYTE, jbyte,  "%d",   STACK_INT, 0);
       CASE(_caload):
@@ -1645,21 +1793,24 @@
           // arrObj, index are set
           if (rhsObject != NULL) {
             /* Check assignability of rhsObject into arrObj */
-            Klass* rhsKlassOop = rhsObject->klass(); // EBX (subclass)
-            Klass* elemKlassOop = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
+            Klass* rhsKlass = rhsObject->klass(); // EBX (subclass)
+            Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
             //
             // Check for compatibilty. This check must not GC!!
             // Seems way more expensive now that we must dispatch
             //
-            if (rhsKlassOop != elemKlassOop && !rhsKlassOop->is_subtype_of(elemKlassOop)) { // ebx->is...
-              VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "");
+            if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is...
+              // Decrement counter if subtype check failed.
+              BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass);
+              VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap);
             }
+            // Profile checkcast with null_seen and receiver.
+            BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass);
+          } else {
+            // Profile checkcast with null_seen and receiver.
+            BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
           }
-          oop* elem_loc = (oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop));
-          // *(oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)) = rhsObject;
-          *elem_loc = rhsObject;
-          // Mark the card
-          OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)elem_loc >> CardTableModRefBS::card_shift], 0);
+          ((objArrayOop) arrObj)->obj_at_put(index, rhsObject);
           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
       }
       CASE(_bastore):
@@ -1700,14 +1851,87 @@
         }
         if (entry != NULL) {
           entry->set_obj(lockee);
-          markOop displaced = lockee->mark()->set_unlocked();
-          entry->lock()->set_displaced_header(displaced);
-          if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
-            // Is it simple recursive case?
-            if (THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
-              entry->lock()->set_displaced_header(NULL);
-            } else {
-              CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+          int success = false;
+          uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
+
+          markOop mark = lockee->mark();
+          intptr_t hash = (intptr_t) markOopDesc::no_hash;
+          // implies UseBiasedLocking
+          if (mark->has_bias_pattern()) {
+            uintptr_t thread_ident;
+            uintptr_t anticipated_bias_locking_value;
+            thread_ident = (uintptr_t)istate->thread();
+            anticipated_bias_locking_value =
+              (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
+              ~((uintptr_t) markOopDesc::age_mask_in_place);
+
+            if  (anticipated_bias_locking_value == 0) {
+              // already biased towards this thread, nothing to do
+              if (PrintBiasedLockingStatistics) {
+                (* BiasedLocking::biased_lock_entry_count_addr())++;
+              }
+              success = true;
+            }
+            else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
+              // try revoke bias
+              markOop header = lockee->klass()->prototype_header();
+              if (hash != markOopDesc::no_hash) {
+                header = header->copy_set_hash(hash);
+              }
+              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
+                if (PrintBiasedLockingStatistics)
+                  (*BiasedLocking::revoked_lock_entry_count_addr())++;
+              }
+            }
+            else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
+              // try rebias
+              markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
+              if (hash != markOopDesc::no_hash) {
+                new_header = new_header->copy_set_hash(hash);
+              }
+              if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
+                if (PrintBiasedLockingStatistics)
+                  (* BiasedLocking::rebiased_lock_entry_count_addr())++;
+              }
+              else {
+                CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+              }
+              success = true;
+            }
+            else {
+              // try to bias towards thread in case object is anonymously biased
+              markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
+                                                              (uintptr_t)markOopDesc::age_mask_in_place |
+                                                              epoch_mask_in_place));
+              if (hash != markOopDesc::no_hash) {
+                header = header->copy_set_hash(hash);
+              }
+              markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
+              // debugging hint
+              DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
+              if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
+                if (PrintBiasedLockingStatistics)
+                  (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
+              }
+              else {
+                CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+              }
+              success = true;
+            }
+          }
+
+          // traditional lightweight locking
+          if (!success) {
+            markOop displaced = lockee->mark()->set_unlocked();
+            entry->lock()->set_displaced_header(displaced);
+            bool call_vm = UseHeavyMonitors;
+            if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+              // Is it simple recursive case?
+              if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
+                entry->lock()->set_displaced_header(NULL);
+              } else {
+                CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
+              }
             }
           }
           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
@@ -1729,12 +1953,15 @@
             BasicLock* lock = most_recent->lock();
             markOop header = lock->displaced_header();
             most_recent->set_obj(NULL);
-            // If it isn't recursive we either must swap old header or call the runtime
-            if (header != NULL) {
-              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
-                // restore object for the slow case
-                most_recent->set_obj(lockee);
-                CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
+            if (!lockee->mark()->has_bias_pattern()) {
+              bool call_vm = UseHeavyMonitors;
+              // If it isn't recursive we either must swap old header or call the runtime
+              if (header != NULL || call_vm) {
+                if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+                  // restore object for the slow case
+                  most_recent->set_obj(lockee);
+                  CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
+                }
               }
             }
             UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
@@ -1807,6 +2034,9 @@
           TosState tos_type = cache->flag_state();
           int field_offset = cache->f2_as_index();
           if (cache->is_volatile()) {
+            if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+              OrderAccess::fence();
+            }
             if (tos_type == atos) {
               VERIFY_OOP(obj->obj_field_acquire(field_offset));
               SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
@@ -1923,7 +2153,6 @@
             } else if (tos_type == atos) {
               VERIFY_OOP(STACK_OBJECT(-1));
               obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
-              OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
             } else if (tos_type == btos) {
               obj->release_byte_field_put(field_offset, STACK_INT(-1));
             } else if (tos_type == ltos) {
@@ -1944,7 +2173,6 @@
             } else if (tos_type == atos) {
               VERIFY_OOP(STACK_OBJECT(-1));
               obj->obj_field_put(field_offset, STACK_OBJECT(-1));
-              OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
             } else if (tos_type == btos) {
               obj->byte_field_put(field_offset, STACK_INT(-1));
             } else if (tos_type == ltos) {
@@ -1981,10 +2209,14 @@
             if (UseTLAB) {
               result = (oop) THREAD->tlab().allocate(obj_size);
             }
+            // Disable non-TLAB-based fast-path, because profiling requires that all
+            // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate
+            // returns NULL.
+#ifndef CC_INTERP_PROFILE
             if (result == NULL) {
               need_zero = true;
               // Try allocate in shared eden
-        retry:
+            retry:
               HeapWord* compare_to = *Universe::heap()->top_addr();
               HeapWord* new_top = compare_to + obj_size;
               if (new_top <= *Universe::heap()->end_addr()) {
@@ -1994,6 +2226,7 @@
                 result = (oop) compare_to;
               }
             }
+#endif
             if (result != NULL) {
               // Initialize object (if nonzero size and need) and then the header
               if (need_zero ) {
@@ -2010,6 +2243,9 @@
               }
               result->set_klass_gap(0);
               result->set_klass(k_entry);
+              // Must prevent reordering of stores for object initialization
+              // with stores that publish the new object.
+              OrderAccess::storestore();
               SET_STACK_OBJECT(result, 0);
               UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
             }
@@ -2018,6 +2254,9 @@
         // Slow case allocation
         CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
                 handle_exception);
+        // Must prevent reordering of stores for object initialization
+        // with stores that publish the new object.
+        OrderAccess::storestore();
         SET_STACK_OBJECT(THREAD->vm_result(), 0);
         THREAD->set_vm_result(NULL);
         UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
@@ -2027,6 +2266,9 @@
         jint size = STACK_INT(-1);
         CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
                 handle_exception);
+        // Must prevent reordering of stores for object initialization
+        // with stores that publish the new object.
+        OrderAccess::storestore();
         SET_STACK_OBJECT(THREAD->vm_result(), -1);
         THREAD->set_vm_result(NULL);
         UPDATE_PC_AND_CONTINUE(3);
@@ -2041,6 +2283,9 @@
         //adjust pointer to start of stack element
         CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
                 handle_exception);
+        // Must prevent reordering of stores for object initialization
+        // with stores that publish the new object.
+        OrderAccess::storestore();
         SET_STACK_OBJECT(THREAD->vm_result(), -dims);
         THREAD->set_vm_result(NULL);
         UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
@@ -2049,61 +2294,63 @@
           if (STACK_OBJECT(-1) != NULL) {
             VERIFY_OOP(STACK_OBJECT(-1));
             u2 index = Bytes::get_Java_u2(pc+1);
-            if (ProfileInterpreter) {
-              // needs Profile_checkcast QQQ
-              ShouldNotReachHere();
-            }
             // Constant pool may have actual klass or unresolved klass. If it is
-            // unresolved we must resolve it
+            // unresolved we must resolve it.
             if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
               CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
             }
             Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
-            Klass* objKlassOop = STACK_OBJECT(-1)->klass(); //ebx
+            Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
             //
             // Check for compatibilty. This check must not GC!!
-            // Seems way more expensive now that we must dispatch
+            // Seems way more expensive now that we must dispatch.
             //
-            if (objKlassOop != klassOf &&
-                !objKlassOop->is_subtype_of(klassOf)) {
+            if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
+              // Decrement counter at checkcast.
+              BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
               ResourceMark rm(THREAD);
-              const char* objName = objKlassOop->external_name();
+              const char* objName = objKlass->external_name();
               const char* klassName = klassOf->external_name();
               char* message = SharedRuntime::generate_class_cast_message(
                 objName, klassName);
-              VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message);
+              VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap);
             }
+            // Profile checkcast with null_seen and receiver.
+            BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass);
           } else {
-            if (UncommonNullCast) {
-//              istate->method()->set_null_cast_seen();
-// [RGV] Not sure what to do here!
-
-            }
+            // Profile checkcast with null_seen and receiver.
+            BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
           }
           UPDATE_PC_AND_CONTINUE(3);
 
       CASE(_instanceof):
           if (STACK_OBJECT(-1) == NULL) {
             SET_STACK_INT(0, -1);
+            // Profile instanceof with null_seen and receiver.
+            BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL);
           } else {
             VERIFY_OOP(STACK_OBJECT(-1));
             u2 index = Bytes::get_Java_u2(pc+1);
             // Constant pool may have actual klass or unresolved klass. If it is
-            // unresolved we must resolve it
+            // unresolved we must resolve it.
             if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
               CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
             }
             Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
-            Klass* objKlassOop = STACK_OBJECT(-1)->klass();
+            Klass* objKlass = STACK_OBJECT(-1)->klass();
             //
             // Check for compatibilty. This check must not GC!!
-            // Seems way more expensive now that we must dispatch
+            // Seems way more expensive now that we must dispatch.
             //
-            if ( objKlassOop == klassOf || objKlassOop->is_subtype_of(klassOf)) {
+            if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) {
               SET_STACK_INT(1, -1);
             } else {
               SET_STACK_INT(0, -1);
+              // Decrement counter at checkcast.
+              BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
             }
+            // Profile instanceof with null_seen and receiver.
+            BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass);
           }
           UPDATE_PC_AND_CONTINUE(3);
 
@@ -2246,6 +2493,9 @@
         istate->set_callee_entry_point(method->from_interpreted_entry());
         istate->set_bcp_advance(5);
 
+        // Invokedynamic has got a call counter, just like an invokestatic -> increment!
+        BI_PROFILE_UPDATE_CALL();
+
         UPDATE_PC_AND_RETURN(0); // I'll be back...
       }
 
@@ -2278,6 +2528,9 @@
         istate->set_callee_entry_point(method->from_interpreted_entry());
         istate->set_bcp_advance(3);
 
+        // Invokehandle has got a call counter, just like a final call -> increment!
+        BI_PROFILE_UPDATE_FINALCALL();
+
         UPDATE_PC_AND_RETURN(0); // I'll be back...
       }
 
@@ -2305,14 +2558,18 @@
           CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
           if (cache->is_vfinal()) {
             callee = cache->f2_as_vfinal_method();
+            // Profile 'special case of invokeinterface' final call.
+            BI_PROFILE_UPDATE_FINALCALL();
           } else {
-            // get receiver
+            // Get receiver.
             int parms = cache->parameter_size();
-            // Same comments as invokevirtual apply here
-            VERIFY_OOP(STACK_OBJECT(-parms));
-            InstanceKlass* rcvrKlass = (InstanceKlass*)
-                                 STACK_OBJECT(-parms)->klass();
+            // Same comments as invokevirtual apply here.
+            oop rcvr = STACK_OBJECT(-parms);
+            VERIFY_OOP(rcvr);
+            InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
             callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
+            // Profile 'special case of invokeinterface' virtual call.
+            BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
           }
           istate->set_callee(callee);
           istate->set_callee_entry_point(callee->from_interpreted_entry());
@@ -2343,15 +2600,18 @@
         // interface.  The link resolver checks this but only for the first
         // time this interface is called.
         if (i == int2->itable_length()) {
-          VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "");
+          VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap);
         }
         int mindex = cache->f2_as_index();
         itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
         callee = im[mindex].method();
         if (callee == NULL) {
-          VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "");
+          VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap);
         }
 
+        // Profile virtual call.
+        BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
+
         istate->set_callee(callee);
         istate->set_callee_entry_point(callee->from_interpreted_entry());
 #ifdef VM_JVMTI
@@ -2383,8 +2643,11 @@
           Method* callee;
           if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
             CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
-            if (cache->is_vfinal()) callee = cache->f2_as_vfinal_method();
-            else {
+            if (cache->is_vfinal()) {
+              callee = cache->f2_as_vfinal_method();
+              // Profile final call.
+              BI_PROFILE_UPDATE_FINALCALL();
+            } else {
               // get receiver
               int parms = cache->parameter_size();
               // this works but needs a resourcemark and seems to create a vtable on every call:
@@ -2393,8 +2656,9 @@
               // this fails with an assert
               // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
               // but this works
-              VERIFY_OOP(STACK_OBJECT(-parms));
-              InstanceKlass* rcvrKlass = (InstanceKlass*) STACK_OBJECT(-parms)->klass();
+              oop rcvr = STACK_OBJECT(-parms);
+              VERIFY_OOP(rcvr);
+              InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
               /*
                 Executing this code in java.lang.String:
                     public String(char value[]) {
@@ -2412,12 +2676,17 @@
 
               */
               callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
+              // Profile virtual call.
+              BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
             }
           } else {
             if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
               CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
             }
             callee = cache->f1_as_method();
+
+            // Profile call.
+            BI_PROFILE_UPDATE_CALL();
           }
 
           istate->set_callee(callee);
@@ -2439,6 +2708,9 @@
         jint size = STACK_INT(-1);
         CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
                 handle_exception);
+        // Must prevent reordering of stores for object initialization
+        // with stores that publish the new object.
+        OrderAccess::storestore();
         SET_STACK_OBJECT(THREAD->vm_result(), -1);
         THREAD->set_vm_result(NULL);
 
@@ -2469,6 +2741,8 @@
       CASE(_goto):
       {
           int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
+          // Profile jump.
+          BI_PROFILE_UPDATE_JUMP();
           address branch_pc = pc;
           UPDATE_PC(offset);
           DO_BACKEDGE_CHECKS(offset, branch_pc);
@@ -2485,6 +2759,8 @@
       CASE(_goto_w):
       {
           int32_t offset = Bytes::get_Java_u4(pc + 1);
+          // Profile jump.
+          BI_PROFILE_UPDATE_JUMP();
           address branch_pc = pc;
           UPDATE_PC(offset);
           DO_BACKEDGE_CHECKS(offset, branch_pc);
@@ -2494,6 +2770,9 @@
       /* return from a jsr or jsr_w */
 
       CASE(_ret): {
+          // Profile ret.
+          BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1]))));
+          // Now, update the pc.
           pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
           UPDATE_PC_AND_CONTINUE(0);
       }
@@ -2567,23 +2846,26 @@
       if (TraceExceptions) {
         ttyLocker ttyl;
         ResourceMark rm;
-        tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
+        tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
         tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
         tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
-                      pc - (intptr_t)METHOD->code_base(),
+                      istate->bcp() - (intptr_t)METHOD->code_base(),
                       continuation_bci, THREAD);
       }
       // for AbortVMOnException flag
       NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
+
+      // Update profiling data.
+      BI_PROFILE_ALIGN_TO_CURRENT_BCI();
       goto run;
     }
     if (TraceExceptions) {
       ttyLocker ttyl;
       ResourceMark rm;
-      tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
+      tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
       tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
       tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
-                    pc  - (intptr_t) METHOD->code_base(),
+                    istate->bcp() - (intptr_t)METHOD->code_base(),
                     THREAD);
     }
     // for AbortVMOnException flag
@@ -2591,32 +2873,87 @@
     // No handler in this activation, unwind and try again
     THREAD->set_pending_exception(except_oop(), NULL, 0);
     goto handle_return;
-  }  /* handle_exception: */
-
-
+  }  // handle_exception:
 
   // Return from an interpreter invocation with the result of the interpretation
   // on the top of the Java Stack (or a pending exception)
 
-handle_Pop_Frame:
-
-  // We don't really do anything special here except we must be aware
-  // that we can get here without ever locking the method (if sync).
-  // Also we skip the notification of the exit.
-
-  istate->set_msg(popping_frame);
-  // Clear pending so while the pop is in process
-  // we don't start another one if a call_vm is done.
-  THREAD->clr_pop_frame_pending();
-  // Let interpreter (only) see the we're in the process of popping a frame
-  THREAD->set_pop_frame_in_process();
-
-handle_return:
-  {
+  handle_Pop_Frame: {
+
+    // We don't really do anything special here except we must be aware
+    // that we can get here without ever locking the method (if sync).
+    // Also we skip the notification of the exit.
+
+    istate->set_msg(popping_frame);
+    // Clear pending so while the pop is in process
+    // we don't start another one if a call_vm is done.
+    THREAD->clr_pop_frame_pending();
+    // Let interpreter (only) see the we're in the process of popping a frame
+    THREAD->set_pop_frame_in_process();
+
+    goto handle_return;
+
+  } // handle_Pop_Frame
+
+  // ForceEarlyReturn ends a method, and returns to the caller with a return value
+  // given by the invoker of the early return.
+  handle_Early_Return: {
+
+    istate->set_msg(early_return);
+
+    // Clear expression stack.
+    topOfStack = istate->stack_base() - Interpreter::stackElementWords;
+
+    JvmtiThreadState *ts = THREAD->jvmti_thread_state();
+
+    // Push the value to be returned.
+    switch (istate->method()->result_type()) {
+      case T_BOOLEAN:
+      case T_SHORT:
+      case T_BYTE:
+      case T_CHAR:
+      case T_INT:
+        SET_STACK_INT(ts->earlyret_value().i, 0);
+        MORE_STACK(1);
+        break;
+      case T_LONG:
+        SET_STACK_LONG(ts->earlyret_value().j, 1);
+        MORE_STACK(2);
+        break;
+      case T_FLOAT:
+        SET_STACK_FLOAT(ts->earlyret_value().f, 0);
+        MORE_STACK(1);
+        break;
+      case T_DOUBLE:
+        SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
+        MORE_STACK(2);
+        break;
+      case T_ARRAY:
+      case T_OBJECT:
+        SET_STACK_OBJECT(ts->earlyret_oop(), 0);
+        MORE_STACK(1);
+        break;
+    }
+
+    ts->clr_earlyret_value();
+    ts->set_earlyret_oop(NULL);
+    ts->clr_earlyret_pending();
+
+    // Fall through to handle_return.
+
+  } // handle_Early_Return
+
+  handle_return: {
+    // A storestore barrier is required to order initialization of
+    // final fields with publishing the reference to the object that
+    // holds the field. Without the barrier the value of final fields
+    // can be observed to change.
+    OrderAccess::storestore();
+
     DECACHE_STATE();
 
-    bool suppress_error = istate->msg() == popping_frame;
-    bool suppress_exit_event = THREAD->has_pending_exception() || suppress_error;
+    bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
+    bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
     Handle original_exception(THREAD, THREAD->pending_exception());
     Handle illegal_state_oop(THREAD, NULL);
 
@@ -2677,15 +3014,18 @@
           BasicLock* lock = end->lock();
           markOop header = lock->displaced_header();
           end->set_obj(NULL);
-          // If it isn't recursive we either must swap old header or call the runtime
-          if (header != NULL) {
-            if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
-              // restore object for the slow case
-              end->set_obj(lockee);
-              {
-                // Prevent any HandleMarkCleaner from freeing our live handles
-                HandleMark __hm(THREAD);
-                CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
+
+          if (!lockee->mark()->has_bias_pattern()) {
+            // If it isn't recursive we either must swap old header or call the runtime
+            if (header != NULL) {
+              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+                // restore object for the slow case
+                end->set_obj(lockee);
+                {
+                  // Prevent any HandleMarkCleaner from freeing our live handles
+                  HandleMark __hm(THREAD);
+                  CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
+                }
               }
             }
           }
@@ -2730,27 +3070,41 @@
           oop rcvr = base->obj();
           if (rcvr == NULL) {
             if (!suppress_error) {
-              VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
+              VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap);
               illegal_state_oop = THREAD->pending_exception();
               THREAD->clear_pending_exception();
             }
+          } else if (UseHeavyMonitors) {
+            {
+              // Prevent any HandleMarkCleaner from freeing our live handles.
+              HandleMark __hm(THREAD);
+              CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
+            }
+            if (THREAD->has_pending_exception()) {
+              if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
+              THREAD->clear_pending_exception();
+            }
           } else {
             BasicLock* lock = base->lock();
             markOop header = lock->displaced_header();
             base->set_obj(NULL);
-            // If it isn't recursive we either must swap old header or call the runtime
-            if (header != NULL) {
-              if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
-                // restore object for the slow case
-                base->set_obj(rcvr);
-                {
-                  // Prevent any HandleMarkCleaner from freeing our live handles
-                  HandleMark __hm(THREAD);
-                  CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
-                }
-                if (THREAD->has_pending_exception()) {
-                  if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
-                  THREAD->clear_pending_exception();
+
+            if (!rcvr->mark()->has_bias_pattern()) {
+              base->set_obj(NULL);
+              // If it isn't recursive we either must swap old header or call the runtime
+              if (header != NULL) {
+                if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
+                  // restore object for the slow case
+                  base->set_obj(rcvr);
+                  {
+                    // Prevent any HandleMarkCleaner from freeing our live handles
+                    HandleMark __hm(THREAD);
+                    CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
+                  }
+                  if (THREAD->has_pending_exception()) {
+                    if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
+                    THREAD->clear_pending_exception();
+                  }
                 }
               }
             }
@@ -2758,6 +3112,8 @@
         }
       }
     }
+    // Clear the do_not_unlock flag now.
+    THREAD->clr_do_not_unlock();
 
     //
     // Notify jvmti/jvmdi
@@ -2802,15 +3158,14 @@
     // A pending exception that was pending prior to a possible popping frame
     // overrides the popping frame.
     //
-    assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed");
+    assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed");
     if (illegal_state_oop() != NULL || original_exception() != NULL) {
-      // inform the frame manager we have no result
+      // Inform the frame manager we have no result.
       istate->set_msg(throwing_exception);
       if (illegal_state_oop() != NULL)
         THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
       else
         THREAD->set_pending_exception(original_exception(), NULL, 0);
-      istate->set_return_kind((Bytecodes::Code)opcode);
       UPDATE_PC_AND_RETURN(0);
     }
 
@@ -2829,13 +3184,12 @@
                                 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
         THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
       }
-      THREAD->clr_pop_frame_in_process();
+    } else {
+      istate->set_msg(return_from_method);
     }
 
     // Normal return
     // Advance the pc and return to frame manager
-    istate->set_msg(return_from_method);
-    istate->set_return_kind((Bytecodes::Code)opcode);
     UPDATE_PC_AND_RETURN(1);
   } /* handle_return: */
 
@@ -2883,7 +3237,7 @@
 }
 
 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
-  return (oop)tos [Interpreter::expr_index_at(-offset)];
+  return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]);
 }
 
 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
@@ -2952,7 +3306,7 @@
   return (jfloat)locals[Interpreter::local_index_at(-offset)];
 }
 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
-  return (oop)locals[Interpreter::local_index_at(-offset)];
+  return cast_to_oop(locals[Interpreter::local_index_at(-offset)]);
 }
 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
   return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
@@ -3110,9 +3464,8 @@
   tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
   tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
   tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
-  tty->print_cr("result_return_kind 0x%x ", (int) this->_result._return_kind);
   tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
-  tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
+  tty->print_cr("native_mirror: " INTPTR_FORMAT, (void*) this->_oop_temp);
   tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
   tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
   tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
@@ -3129,9 +3482,9 @@
 }
 
 extern "C" {
-    void PI(uintptr_t arg) {
-        ((BytecodeInterpreter*)arg)->print();
-    }
+  void PI(uintptr_t arg) {
+    ((BytecodeInterpreter*)arg)->print();
+  }
 }
 #endif // PRODUCT
 
--- a/src/share/vm/interpreter/bytecodeInterpreter.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/bytecodeInterpreter.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -66,27 +66,26 @@
 typedef class BytecodeInterpreter* interpreterState;
 
 struct call_message {
-    class Method* _callee;    /* method to call during call_method request */
-    address   _callee_entry_point;   /* address to jump to for call_method request */
-    int       _bcp_advance;          /* size of the invoke bytecode operation */
+  class Method* _callee;           // method to call during call_method request
+  address _callee_entry_point;     // address to jump to for call_method request
+  int _bcp_advance;                // size of the invoke bytecode operation
 };
 
 struct osr_message {
-    address _osr_buf;                 /* the osr buffer */
-    address _osr_entry;               /* the entry to the osr method */
+  address _osr_buf;                 // the osr buffer
+  address _osr_entry;               // the entry to the osr method
 };
 
 struct osr_result {
-  nmethod* nm;                       /* osr nmethod */
-  address return_addr;               /* osr blob return address */
+  nmethod* nm;                      // osr nmethod
+  address return_addr;              // osr blob return address
 };
 
 // Result returned to frame manager
 union frame_manager_message {
-    call_message _to_call;            /* describes callee */
-    Bytecodes::Code _return_kind;     /* i_return, a_return, ... */
-    osr_message _osr;                 /* describes the osr */
-    osr_result _osr_result;           /* result of OSR request */
+  call_message _to_call;            // describes callee
+  osr_message _osr;                 // describes the osr
+  osr_result _osr_result;           // result of OSR request
 };
 
 class BytecodeInterpreter : StackObj {
@@ -115,7 +114,8 @@
          more_monitors,             // need a new monitor
          throwing_exception,        // unwind stack and rethrow
          popping_frame,             // unwind call and retry call
-         do_osr                     // request this invocation be OSR's
+         do_osr,                    // request this invocation be OSR's
+         early_return               // early return as commanded by jvmti
     };
 
 private:
@@ -216,8 +216,6 @@
 inline int bcp_advance() { return _result._to_call._bcp_advance; }
 inline void set_bcp_advance(int count) { _result._to_call._bcp_advance = count; }
 
-inline void set_return_kind(Bytecodes::Code kind) { _result._return_kind = kind; }
-
 inline interpreterState prev() { return _prev_link; }
 
 inline intptr_t* stack() { return _stack; }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/interpreter/bytecodeInterpreterProfiling.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// This file defines a set of macros which are used by the c++-interpreter
+// for updating a method's methodData object.
+
+
+#ifndef SHARE_VM_INTERPRETER_BYTECODEINTERPRETERPROFILING_HPP
+#define SHARE_VM_INTERPRETER_BYTECODEINTERPRETERPROFILING_HPP
+
+
+// Global settings /////////////////////////////////////////////////////////////
+
+
+// Enables profiling support.
+#if defined(COMPILER2)
+#define CC_INTERP_PROFILE
+#endif
+
+// Enables assertions for profiling code (also works in product-builds).
+// #define CC_INTERP_PROFILE_WITH_ASSERTIONS
+
+
+#ifdef CC_INTERP
+
+// Empty dummy implementations if profiling code is switched off. //////////////
+
+#ifndef CC_INTERP_PROFILE
+
+#define SET_MDX(mdx)
+
+#define BI_PROFILE_GET_OR_CREATE_METHOD_DATA(exception_handler)                \
+  if (ProfileInterpreter) {                                                    \
+    ShouldNotReachHere();                                                      \
+  }
+
+#define BI_PROFILE_ALIGN_TO_CURRENT_BCI()
+
+#define BI_PROFILE_UPDATE_JUMP()
+#define BI_PROFILE_UPDATE_BRANCH(is_taken)
+#define BI_PROFILE_UPDATE_RET(bci)
+#define BI_PROFILE_SUBTYPECHECK_FAILED(receiver)
+#define BI_PROFILE_UPDATE_CHECKCAST(null_seen, receiver)
+#define BI_PROFILE_UPDATE_INSTANCEOF(null_seen, receiver)
+#define BI_PROFILE_UPDATE_CALL()
+#define BI_PROFILE_UPDATE_FINALCALL()
+#define BI_PROFILE_UPDATE_VIRTUALCALL(receiver)
+#define BI_PROFILE_UPDATE_SWITCH(switch_index)
+
+
+#else
+
+
+// Non-dummy implementations ///////////////////////////////////////////////////
+
+// Accessors for the current method data pointer 'mdx'.
+#define MDX()        (istate->mdx())
+#define SET_MDX(mdx)                                                           \
+  if (TraceProfileInterpreter) {                                               \
+    /* Let it look like TraceBytecodes' format. */                             \
+    tty->print_cr("[%d]           %4d  "                                       \
+                  "mdx " PTR_FORMAT "(%d)"                                     \
+                  "  "                                                         \
+                  " \t-> " PTR_FORMAT "(%d)",                                  \
+                (int) THREAD->osthread()->thread_id(),                         \
+                BCI(),                                                         \
+                MDX(),                                                         \
+                (MDX() == NULL                                                 \
+                 ? 0                                                           \
+                 : istate->method()->method_data()->dp_to_di((address)MDX())), \
+                mdx,                                                           \
+                istate->method()->method_data()->dp_to_di((address)mdx)        \
+                );                                                             \
+  };                                                                           \
+  istate->set_mdx(mdx);
+
+
+// Dumps the profiling method data for the current method.
+#ifdef PRODUCT
+#define BI_PROFILE_PRINT_METHOD_DATA()
+#else  // PRODUCT
+#define BI_PROFILE_PRINT_METHOD_DATA()                                         \
+  {                                                                            \
+    ttyLocker ttyl;                                                            \
+    MethodData *md = istate->method()->method_data();                          \
+    tty->cr();                                                                 \
+    tty->print("method data at mdx " PTR_FORMAT "(0) for",                     \
+               md->data_layout_at(md->bci_to_di(0)));                          \
+    istate->method()->print_short_name(tty);                                   \
+    tty->cr();                                                                 \
+    if (md != NULL) {                                                          \
+      md->print_data_on(tty);                                                  \
+      address mdx = (address) MDX();                                           \
+      if (mdx != NULL) {                                                       \
+        tty->print_cr("current mdx " PTR_FORMAT "(%d)",                        \
+                      mdx,                                                     \
+                      istate->method()->method_data()->dp_to_di(mdx));         \
+      }                                                                        \
+    } else {                                                                   \
+      tty->print_cr("no method data");                                         \
+    }                                                                          \
+  }
+#endif // PRODUCT
+
+
+// Gets or creates the profiling method data and initializes mdx.
+#define BI_PROFILE_GET_OR_CREATE_METHOD_DATA(exception_handler)                \
+  if (ProfileInterpreter && MDX() == NULL) {                                   \
+    /* Mdx is not yet initialized for this activation. */                      \
+    MethodData *md = istate->method()->method_data();                          \
+    if (md == NULL) {                                                          \
+      MethodCounters* mcs;                                                     \
+      GET_METHOD_COUNTERS(mcs);                                                \
+      /* The profiling method data doesn't exist for this method, */           \
+      /* create it if the counters have overflowed. */                         \
+      if (mcs->invocation_counter()                                            \
+                         ->reached_ProfileLimit(mcs->backedge_counter())) {    \
+        /* Must use CALL_VM, because an async exception may be pending. */     \
+        CALL_VM((InterpreterRuntime::profile_method(THREAD)),                  \
+                exception_handler);                                            \
+        md = istate->method()->method_data();                                  \
+        if (md != NULL) {                                                      \
+          if (TraceProfileInterpreter) {                                       \
+            BI_PROFILE_PRINT_METHOD_DATA();                                    \
+          }                                                                    \
+          Method *m = istate->method();                                        \
+          int bci = m->bci_from(pc);                                           \
+          jint di = md->bci_to_di(bci);                                        \
+          SET_MDX(md->data_layout_at(di));                                     \
+        }                                                                      \
+      }                                                                        \
+    } else {                                                                   \
+      /* The profiling method data exists, align the method data pointer */    \
+      /* mdx to the current bytecode index. */                                 \
+      if (TraceProfileInterpreter) {                                           \
+        BI_PROFILE_PRINT_METHOD_DATA();                                        \
+      }                                                                        \
+      SET_MDX(md->data_layout_at(md->bci_to_di(BCI())));                       \
+    }                                                                          \
+  }
+
+
+// Asserts that the current method data pointer mdx corresponds
+// to the current bytecode.
+#if defined(CC_INTERP_PROFILE_WITH_ASSERTIONS)
+#define BI_PROFILE_CHECK_MDX()                                                 \
+  {                                                                            \
+    MethodData *md = istate->method()->method_data();                          \
+    address mdx  = (address) MDX();                                            \
+    address mdx2 = (address) md->data_layout_at(md->bci_to_di(BCI()));         \
+    guarantee(md   != NULL, "1");                                              \
+    guarantee(mdx  != NULL, "2");                                              \
+    guarantee(mdx2 != NULL, "3");                                              \
+    if (mdx != mdx2) {                                                         \
+      BI_PROFILE_PRINT_METHOD_DATA();                                          \
+      fatal3("invalid mdx at bci %d:"                                          \
+             " was " PTR_FORMAT                                                \
+             " but expected " PTR_FORMAT,                                      \
+             BCI(),                                                            \
+             mdx,                                                              \
+             mdx2);                                                            \
+    }                                                                          \
+  }
+#else
+#define BI_PROFILE_CHECK_MDX()
+#endif
+
+
+// Aligns the method data pointer mdx to the current bytecode index.
+#define BI_PROFILE_ALIGN_TO_CURRENT_BCI()                                      \
+  if (ProfileInterpreter && MDX() != NULL) {                                   \
+    MethodData *md = istate->method()->method_data();                          \
+    SET_MDX(md->data_layout_at(md->bci_to_di(BCI())));                         \
+  }
+
+
+// Updates profiling data for a jump.
+#define BI_PROFILE_UPDATE_JUMP()                                               \
+  if (ProfileInterpreter && MDX() != NULL) {                                   \
+    BI_PROFILE_CHECK_MDX();                                                    \
+    JumpData::increment_taken_count_no_overflow(MDX());                        \
+    /* Remember last branch taken count. */                                    \
+    mdo_last_branch_taken_count = JumpData::taken_count(MDX());                \
+    SET_MDX(JumpData::advance_taken(MDX()));                                   \
+  }
+
+
+// Updates profiling data for a taken/not taken branch.
+#define BI_PROFILE_UPDATE_BRANCH(is_taken)                                     \
+  if (ProfileInterpreter && MDX() != NULL) {                                   \
+    BI_PROFILE_CHECK_MDX();                                                    \
+    if (is_taken) {                                                            \
+      BranchData::increment_taken_count_no_overflow(MDX());                    \
+      /* Remember last branch taken count. */                                  \
+      mdo_last_branch_taken_count = BranchData::taken_count(MDX());            \
+      SET_MDX(BranchData::advance_taken(MDX()));                               \
+    } else {                                                                   \
+      BranchData::increment_not_taken_count_no_overflow(MDX());                \
+      SET_MDX(BranchData::advance_not_taken(MDX()));                           \
+    }                                                                          \
+  }
+
+
+// Updates profiling data for a ret with given bci.
+#define BI_PROFILE_UPDATE_RET(bci)                                             \
+  if (ProfileInterpreter && MDX() != NULL) {                                   \
+    BI_PROFILE_CHECK_MDX();                                                    \
+    MethodData *md = istate->method()->method_data();                          \
+/* FIXME: there is more to do here than increment and advance(mdx)! */         \
+    CounterData::increment_count_no_overflow(MDX());                           \
+    SET_MDX(RetData::advance(md, bci));                                        \
+  }
+
+// Decrement counter at checkcast if the subtype check fails (as template
+// interpreter does!).
+#define BI_PROFILE_SUBTYPECHECK_FAILED(receiver)                               \
+  if (ProfileInterpreter && MDX() != NULL) {                                   \
+    BI_PROFILE_CHECK_MDX();                                                    \
+    ReceiverTypeData::increment_receiver_count_no_overflow(MDX(), receiver);   \
+    ReceiverTypeData::decrement_count(MDX());                                  \
+  }
+
+// Updates profiling data for a checkcast (was a null seen? which receiver?).
+#define BI_PROFILE_UPDATE_CHECKCAST(null_seen, receiver)                       \
+  if (ProfileInterpreter && MDX() != NULL) {                                   \
+    BI_PROFILE_CHECK_MDX();                                                    \
+    if (null_seen) {                                                           \
+      ReceiverTypeData::set_null_seen(MDX());                                  \
+    } else {                                                                   \
+      /* Template interpreter doesn't increment count. */                      \
+      /* ReceiverTypeData::increment_count_no_overflow(MDX()); */              \
+      ReceiverTypeData::increment_receiver_count_no_overflow(MDX(), receiver); \
+    }                                                                          \
+    SET_MDX(ReceiverTypeData::advance(MDX()));                                 \
+  }
+
+
+// Updates profiling data for an instanceof (was a null seen? which receiver?).
+#define BI_PROFILE_UPDATE_INSTANCEOF(null_seen, receiver)                      \
+  BI_PROFILE_UPDATE_CHECKCAST(null_seen, receiver)
+
+
+// Updates profiling data for a call.
+#define BI_PROFILE_UPDATE_CALL()                                               \
+  if (ProfileInterpreter && MDX() != NULL) {                                   \
+    BI_PROFILE_CHECK_MDX();                                                    \
+    CounterData::increment_count_no_overflow(MDX());                           \
+    SET_MDX(CounterData::advance(MDX()));                                      \
+  }
+
+
+// Updates profiling data for a final call.
+#define BI_PROFILE_UPDATE_FINALCALL()                                          \
+  if (ProfileInterpreter && MDX() != NULL) {                                   \
+    BI_PROFILE_CHECK_MDX();                                                    \
+    VirtualCallData::increment_count_no_overflow(MDX());                       \
+    SET_MDX(VirtualCallData::advance(MDX()));                                  \
+  }
+
+
+// Updates profiling data for a virtual call with given receiver Klass.
+#define BI_PROFILE_UPDATE_VIRTUALCALL(receiver)                                \
+  if (ProfileInterpreter && MDX() != NULL) {                                   \
+    BI_PROFILE_CHECK_MDX();                                                    \
+    VirtualCallData::increment_receiver_count_no_overflow(MDX(), receiver);    \
+    SET_MDX(VirtualCallData::advance(MDX()));                                  \
+  }
+
+
+// Updates profiling data for a switch (tabelswitch or lookupswitch) with
+// given taken index (-1 means default case was taken).
+#define BI_PROFILE_UPDATE_SWITCH(switch_index)                                 \
+  if (ProfileInterpreter && MDX() != NULL) {                                   \
+    BI_PROFILE_CHECK_MDX();                                                    \
+    MultiBranchData::increment_count_no_overflow(MDX(), switch_index);         \
+    SET_MDX(MultiBranchData::advance(MDX(), switch_index));                    \
+  }
+
+
+// The end /////////////////////////////////////////////////////////////////////
+
+#endif // CC_INTERP_PROFILE
+
+#endif // CC_INTERP
+
+#endif // SHARE_VM_INTERPRETER_BYTECODECINTERPRETERPROFILING_HPP
--- a/src/share/vm/interpreter/bytecodeTracer.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -596,7 +596,7 @@
     if (data != NULL) {
       st->print("  %d", mdo->dp_to_di(data->dp()));
       st->fill_to(6);
-      data->print_data_on(st);
+      data->print_data_on(st, mdo);
     }
   }
 }
--- a/src/share/vm/interpreter/cppInterpreter.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/cppInterpreter.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/interpreter.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/interpreter.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/interpreterRuntime.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -241,18 +241,15 @@
 //------------------------------------------------------------------------------------------------------------------------
 // Exceptions
 
-// Assume the compiler is (or will be) interested in this event.
-// If necessary, create an MDO to hold the information, and record it.
-void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
-  assert(ProfileTraps, "call me only if profiling");
-  methodHandle trap_method(thread, method(thread));
-
+void InterpreterRuntime::note_trap_inner(JavaThread* thread, int reason,
+                                         methodHandle trap_method, int trap_bci, TRAPS) {
   if (trap_method.not_null()) {
     MethodData* trap_mdo = trap_method->method_data();
     if (trap_mdo == NULL) {
       Method::build_interpreter_method_data(trap_method, THREAD);
       if (HAS_PENDING_EXCEPTION) {
-        assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
+        assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())),
+               "we expect only an OOM error here");
         CLEAR_PENDING_EXCEPTION;
       }
       trap_mdo = trap_method->method_data();
@@ -261,12 +258,42 @@
     if (trap_mdo != NULL) {
       // Update per-method count of trap events.  The interpreter
       // is updating the MDO to simulate the effect of compiler traps.
-      int trap_bci = trap_method->bci_from(bcp(thread));
       Deoptimization::update_method_data_from_interpreter(trap_mdo, trap_bci, reason);
     }
   }
 }
 
+// Assume the compiler is (or will be) interested in this event.
+// If necessary, create an MDO to hold the information, and record it.
+void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
+  assert(ProfileTraps, "call me only if profiling");
+  methodHandle trap_method(thread, method(thread));
+  int trap_bci = trap_method->bci_from(bcp(thread));
+  note_trap_inner(thread, reason, trap_method, trap_bci, THREAD);
+}
+
+#ifdef CC_INTERP
+// As legacy note_trap, but we have more arguments.
+IRT_ENTRY(void, InterpreterRuntime::note_trap(JavaThread* thread, int reason, Method *method, int trap_bci))
+  methodHandle trap_method(method);
+  note_trap_inner(thread, reason, trap_method, trap_bci, THREAD);
+IRT_END
+
+// Class Deoptimization is not visible in BytecodeInterpreter, so we need a wrapper
+// for each exception.
+void InterpreterRuntime::note_nullCheck_trap(JavaThread* thread, Method *method, int trap_bci)
+  { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_null_check, method, trap_bci); }
+void InterpreterRuntime::note_div0Check_trap(JavaThread* thread, Method *method, int trap_bci)
+  { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_div0_check, method, trap_bci); }
+void InterpreterRuntime::note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci)
+  { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_range_check, method, trap_bci); }
+void InterpreterRuntime::note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci)
+  { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_class_check, method, trap_bci); }
+void InterpreterRuntime::note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci)
+  { if (ProfileTraps) note_trap(thread, Deoptimization::Reason_array_check, method, trap_bci); }
+#endif // CC_INTERP
+
+
 static Handle get_preinitialized_exception(Klass* k, TRAPS) {
   // get klass
   InstanceKlass* klass = InstanceKlass::cast(k);
@@ -437,7 +464,7 @@
 #ifdef GRAAL
   if (h_method->method_data() != NULL) {
     ResourceMark rm(thread);
-    ProfileData* pdata = h_method->method_data()->allocate_bci_to_data(current_bci);
+    ProfileData* pdata = h_method->method_data()->allocate_bci_to_data(current_bci, NULL);
     if (pdata != NULL && pdata->is_BitData()) {
       BitData* bit_data = (BitData*) pdata;
       bit_data->set_exception_seen();
--- a/src/share/vm/interpreter/interpreterRuntime.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -66,9 +66,15 @@
 
   static ConstantPoolCacheEntry* cache_entry_at(JavaThread *thread, int i)  { return method(thread)->constants()->cache()->entry_at(i); }
   static ConstantPoolCacheEntry* cache_entry(JavaThread *thread)            { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); }
+  static void      note_trap_inner(JavaThread* thread, int reason,
+                                   methodHandle trap_method, int trap_bci, TRAPS);
   static void      note_trap(JavaThread *thread, int reason, TRAPS);
+#ifdef CC_INTERP
+  // Profile traps in C++ interpreter.
+  static void      note_trap(JavaThread* thread, int reason, Method *method, int trap_bci);
+#endif // CC_INTERP
 
-  // Inner work method for Interpreter's frequency counter overflow
+  // Inner work method for Interpreter's frequency counter overflow.
   static nmethod* frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp);
 
  public:
@@ -101,6 +107,17 @@
 #endif
   static void    throw_pending_exception(JavaThread* thread);
 
+#ifdef CC_INTERP
+  // Profile traps in C++ interpreter.
+  static void    note_nullCheck_trap (JavaThread* thread, Method *method, int trap_bci);
+  static void    note_div0Check_trap (JavaThread* thread, Method *method, int trap_bci);
+  static void    note_rangeCheck_trap(JavaThread* thread, Method *method, int trap_bci);
+  static void    note_classCheck_trap(JavaThread* thread, Method *method, int trap_bci);
+  static void    note_arrayCheck_trap(JavaThread* thread, Method *method, int trap_bci);
+  // A dummy for makros that shall not profile traps.
+  static void    note_no_trap(JavaThread* thread, Method *method, int trap_bci) {}
+#endif // CC_INTERP
+
   // Statics & fields
   static void    resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode);
 
--- a/src/share/vm/interpreter/invocationCounter.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/invocationCounter.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -99,16 +99,24 @@
   int   get_BackwardBranchLimit() const          { return InterpreterBackwardBranchLimit >> number_of_noncount_bits; }
   int   get_ProfileLimit() const                 { return InterpreterProfileLimit >> number_of_noncount_bits; }
 
+#ifdef CC_INTERP
   // Test counter using scaled limits like the asm interpreter would do rather than doing
   // the shifts to normalize the counter.
-
-  bool   reached_InvocationLimit() const         { return _counter >= (unsigned int) InterpreterInvocationLimit; }
-  bool   reached_BackwardBranchLimit() const     { return _counter >= (unsigned int) InterpreterBackwardBranchLimit; }
-
-  // Do this just like asm interpreter does for max speed
-  bool   reached_ProfileLimit(InvocationCounter *back_edge_count) const {
-    return (_counter && count_mask) + back_edge_count->_counter >= (unsigned int) InterpreterProfileLimit;
+  // Checks sum of invocation_counter and backedge_counter as the template interpreter does.
+  bool reached_InvocationLimit(InvocationCounter *back_edge_count) const {
+    return (_counter & count_mask) + (back_edge_count->_counter & count_mask) >=
+           (unsigned int) InterpreterInvocationLimit;
   }
+  bool reached_BackwardBranchLimit(InvocationCounter *back_edge_count) const {
+    return (_counter & count_mask) + (back_edge_count->_counter & count_mask) >=
+           (unsigned int) InterpreterBackwardBranchLimit;
+  }
+  // Do this just like asm interpreter does for max speed.
+  bool reached_ProfileLimit(InvocationCounter *back_edge_count) const {
+    return (_counter & count_mask) + (back_edge_count->_counter & count_mask) >=
+           (unsigned int) InterpreterProfileLimit;
+  }
+#endif // CC_INTERP
 
   void increment()                               { _counter += count_increment; }
 
--- a/src/share/vm/interpreter/linkResolver.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/linkResolver.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -300,7 +300,7 @@
   Symbol* signature = resolved_method->signature();
 
   // First check in default method array
-  if (!resolved_method->is_abstract()  &&
+  if (!resolved_method->is_abstract() &&
     (InstanceKlass::cast(klass())->default_methods() != NULL)) {
     int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature);
     if (index >= 0 ) {
@@ -318,7 +318,11 @@
 
 void LinkResolver::lookup_method_in_interfaces(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
   InstanceKlass *ik = InstanceKlass::cast(klass());
-  result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name, signature));
+
+  // Specify 'true' in order to skip default methods when searching the
+  // interfaces.  Function lookup_method_in_klasses() already looked for
+  // the method in the default methods table.
+  result = methodHandle(THREAD, ik->lookup_method_in_all_interfaces(name, signature, true));
 }
 
 void LinkResolver::lookup_polymorphic_method(methodHandle& result,
@@ -560,16 +564,7 @@
     }
   }
 
-  // 5. check if method is concrete
-  if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) {
-    ResourceMark rm(THREAD);
-    THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
-              Method::name_and_sig_as_C_string(resolved_klass(),
-                                                      method_name,
-                                                      method_signature));
-  }
-
-  // 6. access checks, access checking may be turned off when calling from within the VM.
+  // 5. access checks, access checking may be turned off when calling from within the VM.
   if (check_access) {
     assert(current_klass.not_null() , "current_klass should not be null");
 
@@ -620,7 +615,7 @@
                                             bool check_access,
                                             bool nostatics, TRAPS) {
 
- // check if klass is interface
+  // check if klass is interface
   if (!resolved_klass->is_interface()) {
     ResourceMark rm(THREAD);
     char buf[200];
@@ -645,16 +640,6 @@
     }
   }
 
-  if (nostatics && resolved_method->is_static()) {
-    ResourceMark rm(THREAD);
-    char buf[200];
-    jio_snprintf(buf, sizeof(buf), "Expected instance not static method %s", Method::name_and_sig_as_C_string(resolved_klass(),
-                                                      resolved_method->name(),
-                                                      resolved_method->signature()));
-    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
-  }
-
-
   if (check_access) {
     // JDK8 adds non-public interface methods, and accessability check requirement
     assert(current_klass.not_null() , "current_klass should not be null");
@@ -698,6 +683,15 @@
     }
   }
 
+  if (nostatics && resolved_method->is_static()) {
+    ResourceMark rm(THREAD);
+    char buf[200];
+    jio_snprintf(buf, sizeof(buf), "Expected instance not static method %s",
+                 Method::name_and_sig_as_C_string(resolved_klass(),
+                 resolved_method->name(), resolved_method->signature()));
+    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+  }
+
   if (TraceItables && Verbose) {
     ResourceMark rm(THREAD);
     tty->print("invokeinterface resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
@@ -1291,8 +1285,11 @@
                  resolved_klass()->external_name());
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
   }
+
   // do lookup based on receiver klass
   methodHandle sel_method;
+  // This search must match the linktime preparation search for itable initialization
+  // to correctly enforce loader constraints for interface method inheritance
   lookup_instance_method_in_klasses(sel_method, recv_klass,
             resolved_method->name(),
             resolved_method->signature(), CHECK);
--- a/src/share/vm/interpreter/rewriter.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/rewriter.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -250,8 +250,8 @@
     // We will reverse the bytecode rewriting _after_ adjusting them.
     // Adjust the cache index by offset to the invokedynamic entries in the
     // cpCache plus the delta if the invokedynamic bytecodes were adjusted.
-    cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit;
-    int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index);
+    int adjustment = cp_cache_delta() + _first_iteration_cp_cache_limit;
+    int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index - adjustment);
     assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
     // zero out 4 bytes
     Bytes::put_Java_u4(p, 0);
@@ -453,18 +453,7 @@
   return method;
 }
 
-void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
-  ResourceMark rm(THREAD);
-  Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
-  // (That's all, folks.)
-}
-
-
-Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
-  : _klass(klass),
-    _pool(cpool),
-    _methods(methods)
-{
+void Rewriter::rewrite_bytecodes(TRAPS) {
   assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
 
   // determine index maps for Method* rewriting
@@ -508,6 +497,29 @@
   // May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref
   // entries had to be added.
   patch_invokedynamic_bytecodes();
+}
+
+void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
+  ResourceMark rm(THREAD);
+  Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
+  // (That's all, folks.)
+}
+
+
+Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
+  : _klass(klass),
+    _pool(cpool),
+    _methods(methods)
+{
+
+  // Rewrite bytecodes - exception here exits.
+  rewrite_bytecodes(CHECK);
+
+  // Stress restoring bytecodes
+  if (StressRewriter) {
+    restore_bytecodes();
+    rewrite_bytecodes(CHECK);
+  }
 
   // allocate constant pool cache, now that we've seen all the bytecodes
   make_constant_pool_cache(THREAD);
@@ -523,6 +535,7 @@
   // so methods with jsrs in custom class lists in aren't attempted to be
   // rewritten in the RO section of the shared archive.
   // Relocated bytecodes don't have to be restored, only the cp cache entries
+  int len = _methods->length();
   for (int i = len-1; i >= 0; i--) {
     methodHandle m(THREAD, _methods->at(i));
 
--- a/src/share/vm/interpreter/rewriter.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/rewriter.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -199,6 +199,9 @@
 
   void patch_invokedynamic_bytecodes();
 
+  // Do all the work.
+  void rewrite_bytecodes(TRAPS);
+
   // Revert bytecodes in case of an exception.
   void restore_bytecodes();
 
--- a/src/share/vm/interpreter/templateInterpreter.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/templateInterpreter.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/interpreter/templateTable.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/interpreter/templateTable.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,8 +40,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "interp_masm_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "interp_masm_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "interp_masm_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "interp_masm_ppc_64.hpp"
 #endif
 
 #ifndef CC_INTERP
@@ -370,8 +373,8 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "templateTable_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "templateTable_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "templateTable_ppc_32.hpp"
 #endif
 
 };
--- a/src/share/vm/libadt/port.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/libadt/port.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -91,8 +91,6 @@
 #define IMPLEMENTATION
 #include <stdlib.h>
 #include <memory.h>
-inline int min( int a, int b) { return a < b ? a : b; }
-inline int max( int a, int b) { return a > b ? a : b; }
 
 #elif defined(_MSC_VER)
 // Microsoft Visual C++
--- a/src/share/vm/memory/allocation.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/allocation.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -45,6 +45,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
@@ -71,9 +74,8 @@
   return MetaspaceShared::is_in_shared_space(this);
 }
 
-
 bool MetaspaceObj::is_metaspace_object() const {
-  return Metaspace::contains((void*)this);
+  return ClassLoaderDataGraph::contains((void*)this);
 }
 
 void MetaspaceObj::print_address_on(outputStream* st) const {
@@ -140,7 +142,7 @@
 void ResourceObj::set_allocation_type(address res, allocation_type type) {
     // Set allocation type in the resource object
     uintptr_t allocation = (uintptr_t)res;
-    assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
+    assert((allocation & allocation_mask) == 0, err_msg("address should be aligned to 4 bytes at least: " PTR_FORMAT, res));
     assert(type <= allocation_mask, "incorrect allocation type");
     ResourceObj* resobj = (ResourceObj *)res;
     resobj->_allocation_t[0] = ~(allocation + type);
--- a/src/share/vm/memory/allocation.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/allocation.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -220,8 +220,11 @@
 class StackObj ALLOCATION_SUPER_CLASS_SPEC {
  private:
   void* operator new(size_t size) throw();
+  void* operator new [](size_t size) throw();
+#ifdef __IBMCPP__
+ public:
+#endif
   void  operator delete(void* p);
-  void* operator new [](size_t size) throw();
   void  operator delete [](void* p);
 };
 
@@ -264,7 +267,7 @@
 
 class MetaspaceObj {
  public:
-  bool is_metaspace_object() const;  // more specific test but slower
+  bool is_metaspace_object() const;
   bool is_shared() const;
   void print_address_on(outputStream* st) const;  // nonvirtual address printing
 
@@ -576,8 +579,8 @@
   bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
   bool allocated_on_C_heap()   const { return get_allocation_type() == C_HEAP; }
   bool allocated_on_arena()    const { return get_allocation_type() == ARENA; }
-  ResourceObj(); // default construtor
-  ResourceObj(const ResourceObj& r); // default copy construtor
+  ResourceObj(); // default constructor
+  ResourceObj(const ResourceObj& r); // default copy constructor
   ResourceObj& operator=(const ResourceObj& r); // default copy assignment
   ~ResourceObj();
 #endif // ASSERT
--- a/src/share/vm/memory/barrierSet.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/barrierSet.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -100,9 +100,9 @@
 public:
 
   // ...then the post-write version.
-  inline void write_ref_field(void* field, oop new_val);
+  inline void write_ref_field(void* field, oop new_val, bool release = false);
 protected:
-  virtual void write_ref_field_work(void* field, oop new_val) = 0;
+  virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
 public:
 
   // Invoke the barrier, if any, necessary when writing the "bytes"-byte
@@ -124,7 +124,7 @@
   virtual bool has_read_region_opt() = 0;
   virtual bool has_write_region_opt() = 0;
 
-  // These operations should assert false unless the correponding operation
+  // These operations should assert false unless the corresponding operation
   // above returns true.  Otherwise, they should perform an appropriate
   // barrier for an array whose elements are all in the given memory region.
   virtual void read_ref_array(MemRegion mr) = 0;
@@ -165,7 +165,7 @@
   // normally reserve space for such tables, and commit parts of the table
   // "covering" parts of the heap that are committed.  The constructor is
   // passed the maximum number of independently committable subregions to
-  // be covered, and the "resize_covoered_region" function allows the
+  // be covered, and the "resize_covered_region" function allows the
   // sub-parts of the heap to inform the barrier set of changes of their
   // sizes.
   BarrierSet(int max_covered_regions) :
--- a/src/share/vm/memory/barrierSet.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/barrierSet.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -40,11 +40,11 @@
   }
 }
 
-void BarrierSet::write_ref_field(void* field, oop new_val) {
+void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
   if (kind() == CardTableModRef) {
-    ((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val);
+    ((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val, release);
   } else {
-    write_ref_field_work(field, new_val);
+    write_ref_field_work(field, new_val, release);
   }
 }
 
--- a/src/share/vm/memory/binaryTreeDictionary.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -44,19 +44,19 @@
 // This is currently used in the Concurrent Mark&Sweep implementation.
 ////////////////////////////////////////////////////////////////////////////////
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t TreeChunk<Chunk_t, FreeList_t>::_min_tree_chunk_size = sizeof(TreeChunk<Chunk_t,  FreeList_t>)/HeapWordSize;
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>* TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(Chunk_t* fc) {
   // Do some assertion checking here.
   return (TreeChunk<Chunk_t, FreeList_t>*) fc;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void TreeChunk<Chunk_t, FreeList_t>::verify_tree_chunk_list() const {
   TreeChunk<Chunk_t, FreeList_t>* nextTC = (TreeChunk<Chunk_t, FreeList_t>*)next();
-  if (prev() != NULL) { // interior list node shouldn'r have tree fields
+  if (prev() != NULL) { // interior list node shouldn't have tree fields
     guarantee(embedded_list()->parent() == NULL && embedded_list()->left() == NULL &&
               embedded_list()->right()  == NULL, "should be clear");
   }
@@ -67,11 +67,11 @@
   }
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>::TreeList() : _parent(NULL),
   _left(NULL), _right(NULL) {}
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>*
 TreeList<Chunk_t, FreeList_t>::as_TreeList(TreeChunk<Chunk_t,FreeList_t>* tc) {
   // This first free chunk in the list will be the tree list.
@@ -88,20 +88,7 @@
   return tl;
 }
 
-
-template <class Chunk_t, template <class> class FreeList_t>
-TreeList<Chunk_t, FreeList_t>*
-get_chunk(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither) {
-  FreeBlockDictionary<Chunk_t>::verify_par_locked();
-  Chunk_t* res = get_chunk_from_tree(size, dither);
-  assert(res == NULL || res->is_free(),
-         "Should be returning a free chunk");
-  assert(dither != FreeBlockDictionary<Chunk_t>::exactly ||
-         res->size() == size, "Not correct size");
-  return res;
-}
-
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>*
 TreeList<Chunk_t, FreeList_t>::as_TreeList(HeapWord* addr, size_t size) {
   TreeChunk<Chunk_t, FreeList_t>* tc = (TreeChunk<Chunk_t, FreeList_t>*) addr;
@@ -125,17 +112,17 @@
 // an over populated size.  The general get_better_list() just returns
 // the current list.
 template <>
-TreeList<FreeChunk, AdaptiveFreeList>*
-TreeList<FreeChunk, AdaptiveFreeList>::get_better_list(
-  BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList>* dictionary) {
+TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
+TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
+  BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
   // A candidate chunk has been found.  If it is already under
   // populated, get a chunk associated with the hint for this
   // chunk.
 
-  TreeList<FreeChunk, ::AdaptiveFreeList>* curTL = this;
+  TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
   if (surplus() <= 0) {
     /* Use the hint to find a size with a surplus, and reset the hint. */
-    TreeList<FreeChunk, ::AdaptiveFreeList>* hintTL = this;
+    TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
     while (hintTL->hint() != 0) {
       assert(hintTL->hint() > hintTL->size(),
         "hint points in the wrong direction");
@@ -163,14 +150,14 @@
 }
 #endif // INCLUDE_ALL_GCS
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>*
 TreeList<Chunk_t, FreeList_t>::get_better_list(
   BinaryTreeDictionary<Chunk_t, FreeList_t>* dictionary) {
   return this;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc) {
 
   TreeList<Chunk_t, FreeList_t>* retTL = this;
@@ -247,7 +234,7 @@
     prevFC->link_after(nextTC);
   }
 
-  // Below this point the embeded TreeList<Chunk_t, FreeList_t> being used for the
+  // Below this point the embedded TreeList<Chunk_t, FreeList_t> being used for the
   // tree node may have changed. Don't use "this"
   // TreeList<Chunk_t, FreeList_t>*.
   // chunk should still be a free chunk (bit set in _prev)
@@ -286,7 +273,7 @@
   return retTL;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void TreeList<Chunk_t, FreeList_t>::return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* chunk) {
   assert(chunk != NULL, "returning NULL chunk");
   assert(chunk->list() == this, "list should be set for chunk");
@@ -301,7 +288,7 @@
   this->link_tail(chunk);
 
   assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
-  FreeList_t<Chunk_t>::increment_count();
+  FreeList_t::increment_count();
   debug_only(this->increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
@@ -311,7 +298,7 @@
 // is defined to be after the chunk pointer to by head().  This is
 // because the TreeList<Chunk_t, FreeList_t> is embedded in the first TreeChunk<Chunk_t, FreeList_t> in the
 // list.  See the definition of TreeChunk<Chunk_t, FreeList_t>.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void TreeList<Chunk_t, FreeList_t>::return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* chunk) {
   assert(chunk->list() == this, "list should be set for chunk");
   assert(head() != NULL, "The tree list is embedded in the first chunk");
@@ -329,13 +316,13 @@
   }
   head()->link_after(chunk);
   assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
-  FreeList_t<Chunk_t>::increment_count();
+  FreeList_t::increment_count();
   debug_only(this->increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void TreeChunk<Chunk_t, FreeList_t>::assert_is_mangled() const {
   assert((ZapUnusedHeapArea &&
           SpaceMangler::is_mangled((HeapWord*) Chunk_t::size_addr()) &&
@@ -345,14 +332,14 @@
     "Space should be clear or mangled");
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::head_as_TreeChunk() {
   assert(head() == NULL || (TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head())->list() == this),
     "Wrong type of chunk?");
   return TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::first_available() {
   assert(head() != NULL, "The head of the list cannot be NULL");
   Chunk_t* fc = head()->next();
@@ -369,7 +356,7 @@
 // Returns the block with the largest heap address amongst
 // those in the list for this size; potentially slow and expensive,
 // use with caution!
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::largest_address() {
   assert(head() != NULL, "The head of the list cannot be NULL");
   Chunk_t* fc = head()->next();
@@ -392,7 +379,7 @@
   return retTC;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 BinaryTreeDictionary<Chunk_t, FreeList_t>::BinaryTreeDictionary(MemRegion mr) {
   assert((mr.byte_size() > min_size()), "minimum chunk size");
 
@@ -405,17 +392,17 @@
   assert(total_free_blocks() == 1, "reset check failed");
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::inc_total_size(size_t inc) {
   _total_size = _total_size + inc;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::dec_total_size(size_t dec) {
   _total_size = _total_size - dec;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(MemRegion mr) {
   assert((mr.byte_size() > min_size()), "minimum chunk size");
   set_root(TreeList<Chunk_t, FreeList_t>::as_TreeList(mr.start(), mr.word_size()));
@@ -423,13 +410,13 @@
   set_total_free_blocks(1);
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(HeapWord* addr, size_t byte_size) {
   MemRegion mr(addr, heap_word_size(byte_size));
   reset(mr);
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset() {
   set_root(NULL);
   set_total_size(0);
@@ -437,7 +424,7 @@
 }
 
 // Get a free block of size at least size from tree, or NULL.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>*
 BinaryTreeDictionary<Chunk_t, FreeList_t>::get_chunk_from_tree(
                               size_t size,
@@ -496,7 +483,7 @@
   return retTC;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_list(size_t size) const {
   TreeList<Chunk_t, FreeList_t>* curTL;
   for (curTL = root(); curTL != NULL;) {
@@ -515,7 +502,7 @@
 }
 
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 bool BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_chunk_in_free_list(Chunk_t* tc) const {
   size_t size = tc->size();
   TreeList<Chunk_t, FreeList_t>* tl = find_list(size);
@@ -526,7 +513,7 @@
   }
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_largest_dict() const {
   TreeList<Chunk_t, FreeList_t> *curTL = root();
   if (curTL != NULL) {
@@ -541,7 +528,7 @@
 // chunk in a list on a tree node, just unlink it.
 // If it is the last chunk in the list (the next link is NULL),
 // remove the node and repair the tree.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>*
 BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_chunk_from_tree(TreeChunk<Chunk_t, FreeList_t>* tc) {
   assert(tc != NULL, "Should not call with a NULL chunk");
@@ -682,7 +669,7 @@
 // Remove the leftmost node (lm) in the tree and return it.
 // If lm has a right child, link it to the left node of
 // the parent of lm.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_tree_minimum(TreeList<Chunk_t, FreeList_t>* tl) {
   assert(tl != NULL && tl->parent() != NULL, "really need a proper sub-tree");
   // locate the subtree minimum by walking down left branches
@@ -703,7 +690,7 @@
     // The only use of this method would not pass the root of the
     // tree (as indicated by the assertion above that the tree list
     // has a parent) but the specification does not explicitly exclude the
-    // passing of the root so accomodate it.
+    // passing of the root so accommodate it.
     set_root(NULL);
   }
   debug_only(
@@ -717,7 +704,7 @@
   return curTL;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::insert_chunk_in_tree(Chunk_t* fc) {
   TreeList<Chunk_t, FreeList_t> *curTL, *prevTL;
   size_t size = fc->size();
@@ -783,7 +770,7 @@
   }
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::max_chunk_size() const {
   FreeBlockDictionary<Chunk_t>::verify_par_locked();
   TreeList<Chunk_t, FreeList_t>* tc = root();
@@ -792,7 +779,7 @@
   return tc->size();
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_list_length(TreeList<Chunk_t, FreeList_t>* tl) const {
   size_t res;
   res = tl->count();
@@ -805,7 +792,7 @@
   return res;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_size_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
   if (tl == NULL)
     return 0;
@@ -814,7 +801,7 @@
          total_size_in_tree(tl->right());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 double BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_of_squared_block_sizes(TreeList<Chunk_t, FreeList_t>* const tl) const {
   if (tl == NULL) {
     return 0.0;
@@ -826,7 +813,7 @@
   return curr;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_free_blocks_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
   if (tl == NULL)
     return 0;
@@ -835,14 +822,14 @@
          total_free_blocks_in_tree(tl->right());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::num_free_blocks() const {
   assert(total_free_blocks_in_tree(root()) == total_free_blocks(),
          "_total_free_blocks inconsistency");
   return total_free_blocks();
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
   if (tl == NULL)
     return 0;
@@ -850,12 +837,12 @@
                   tree_height_helper(tl->right()));
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height() const {
   return tree_height_helper(root());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
   if (tl == NULL) {
     return 0;
@@ -864,18 +851,18 @@
     total_nodes_helper(tl->right());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
   return total_nodes_helper(root());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::dict_census_update(size_t size, bool split, bool birth){}
 
 #if INCLUDE_ALL_GCS
 template <>
-void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth){
-  TreeList<FreeChunk, AdaptiveFreeList>* nd = find_list(size);
+void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
+  TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
   if (nd) {
     if (split) {
       if (birth) {
@@ -903,7 +890,7 @@
 }
 #endif // INCLUDE_ALL_GCS
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 bool BinaryTreeDictionary<Chunk_t, FreeList_t>::coal_dict_over_populated(size_t size) {
   // For the general type of freelists, encourage coalescing by
   // returning true.
@@ -915,7 +902,7 @@
 bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
   if (FLSAlwaysCoalesceLarge) return true;
 
-  TreeList<FreeChunk, AdaptiveFreeList>* list_of_size = find_list(size);
+  TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
   // None of requested size implies overpopulated.
   return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
          list_of_size->count() > list_of_size->coal_desired();
@@ -928,15 +915,15 @@
 //   do_tree() walks the nodes in the binary tree applying do_list()
 //     to each list at each node.
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class TreeCensusClosure : public StackObj {
  protected:
-  virtual void do_list(FreeList_t<Chunk_t>* fl) = 0;
+  virtual void do_list(FreeList_t* fl) = 0;
  public:
   virtual void do_tree(TreeList<Chunk_t, FreeList_t>* tl) = 0;
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class AscendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
  public:
   void do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@@ -948,7 +935,7 @@
   }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class DescendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
  public:
   void do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@@ -962,7 +949,7 @@
 
 // For each list in the tree, calculate the desired, desired
 // coalesce, count before sweep, and surplus before sweep.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class BeginSweepClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   double _percentage;
   float _inter_sweep_current;
@@ -995,16 +982,16 @@
 // Similar to TreeCensusClosure but searches the
 // tree and returns promptly when found.
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class TreeSearchClosure : public StackObj {
  protected:
-  virtual bool do_list(FreeList_t<Chunk_t>* fl) = 0;
+  virtual bool do_list(FreeList_t* fl) = 0;
  public:
   virtual bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) = 0;
 };
 
 #if 0 //  Don't need this yet but here for symmetry.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class AscendTreeSearchClosure : public TreeSearchClosure<Chunk_t> {
  public:
   bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@@ -1018,7 +1005,7 @@
 };
 #endif
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class DescendTreeSearchClosure : public TreeSearchClosure<Chunk_t, FreeList_t> {
  public:
   bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@@ -1033,14 +1020,14 @@
 
 // Searches the tree for a chunk that ends at the
 // specified address.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class EndTreeSearchClosure : public DescendTreeSearchClosure<Chunk_t, FreeList_t> {
   HeapWord* _target;
   Chunk_t* _found;
 
  public:
   EndTreeSearchClosure(HeapWord* target) : _target(target), _found(NULL) {}
-  bool do_list(FreeList_t<Chunk_t>* fl) {
+  bool do_list(FreeList_t* fl) {
     Chunk_t* item = fl->head();
     while (item != NULL) {
       if (item->end() == (uintptr_t*) _target) {
@@ -1054,7 +1041,7 @@
   Chunk_t* found() { return _found; }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_chunk_ends_at(HeapWord* target) const {
   EndTreeSearchClosure<Chunk_t, FreeList_t> etsc(target);
   bool found_target = etsc.do_tree(root());
@@ -1063,7 +1050,7 @@
   return etsc.found();
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::begin_sweep_dict_census(double coalSurplusPercent,
   float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
   BeginSweepClosure<Chunk_t, FreeList_t> bsc(coalSurplusPercent, inter_sweep_current,
@@ -1075,32 +1062,32 @@
 // Closures and methods for calculating total bytes returned to the
 // free lists in the tree.
 #ifndef PRODUCT
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class InitializeDictReturnedBytesClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
    public:
-  void do_list(FreeList_t<Chunk_t>* fl) {
+  void do_list(FreeList_t* fl) {
     fl->set_returned_bytes(0);
   }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::initialize_dict_returned_bytes() {
   InitializeDictReturnedBytesClosure<Chunk_t, FreeList_t> idrb;
   idrb.do_tree(root());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class ReturnedBytesClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   size_t _dict_returned_bytes;
  public:
   ReturnedBytesClosure() { _dict_returned_bytes = 0; }
-  void do_list(FreeList_t<Chunk_t>* fl) {
+  void do_list(FreeList_t* fl) {
     _dict_returned_bytes += fl->returned_bytes();
   }
   size_t dict_returned_bytes() { return _dict_returned_bytes; }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_dict_returned_bytes() {
   ReturnedBytesClosure<Chunk_t, FreeList_t> rbc;
   rbc.do_tree(root());
@@ -1109,17 +1096,17 @@
 }
 
 // Count the number of entries in the tree.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class treeCountClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t> {
  public:
   uint count;
   treeCountClosure(uint c) { count = c; }
-  void do_list(FreeList_t<Chunk_t>* fl) {
+  void do_list(FreeList_t* fl) {
     count++;
   }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_count() {
   treeCountClosure<Chunk_t, FreeList_t> ctc(0);
   ctc.do_tree(root());
@@ -1128,7 +1115,7 @@
 #endif // PRODUCT
 
 // Calculate surpluses for the lists in the tree.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class setTreeSurplusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   double percentage;
  public:
@@ -1144,14 +1131,14 @@
 #endif // INCLUDE_ALL_GCS
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::set_tree_surplus(double splitSurplusPercent) {
   setTreeSurplusClosure<Chunk_t, FreeList_t> sts(splitSurplusPercent);
   sts.do_tree(root());
 }
 
 // Set hints for the lists in the tree.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class setTreeHintsClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t> {
   size_t hint;
  public:
@@ -1170,14 +1157,14 @@
 #endif // INCLUDE_ALL_GCS
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::set_tree_hints(void) {
   setTreeHintsClosure<Chunk_t, FreeList_t> sth(0);
   sth.do_tree(root());
 }
 
 // Save count before previous sweep and splits and coalesces.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class clearTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   void do_list(FreeList<Chunk_t>* fl) {}
 
@@ -1192,14 +1179,14 @@
 #endif // INCLUDE_ALL_GCS
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::clear_tree_census(void) {
   clearTreeCensusClosure<Chunk_t, FreeList_t> ctc;
   ctc.do_tree(root());
 }
 
 // Do reporting and post sweep clean up.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::end_sweep_dict_census(double splitSurplusPercent) {
   // Does walking the tree 3 times hurt?
   set_tree_surplus(splitSurplusPercent);
@@ -1211,7 +1198,7 @@
 }
 
 // Print summary statistics
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::report_statistics() const {
   FreeBlockDictionary<Chunk_t>::verify_par_locked();
   gclog_or_tty->print("Statistics for BinaryTreeDictionary:\n"
@@ -1230,22 +1217,22 @@
 // Print census information - counts, births, deaths, etc.
 // for each list in the tree.  Also print some summary
 // information.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   int _print_line;
   size_t _total_free;
-  FreeList_t<Chunk_t> _total;
+  FreeList_t _total;
 
  public:
   PrintTreeCensusClosure() {
     _print_line = 0;
     _total_free = 0;
   }
-  FreeList_t<Chunk_t>* total() { return &_total; }
+  FreeList_t* total() { return &_total; }
   size_t total_free() { return _total_free; }
   void do_list(FreeList<Chunk_t>* fl) {
     if (++_print_line >= 40) {
-      FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
+      FreeList_t::print_labels_on(gclog_or_tty, "size");
       _print_line = 0;
     }
     fl->print_on(gclog_or_tty);
@@ -1256,7 +1243,7 @@
 #if INCLUDE_ALL_GCS
   void do_list(AdaptiveFreeList<Chunk_t>* fl) {
     if (++_print_line >= 40) {
-      FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
+      FreeList_t::print_labels_on(gclog_or_tty, "size");
       _print_line = 0;
     }
     fl->print_on(gclog_or_tty);
@@ -1275,16 +1262,16 @@
 #endif // INCLUDE_ALL_GCS
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_dict_census(void) const {
 
   gclog_or_tty->print("\nBinaryTree\n");
-  FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
+  FreeList_t::print_labels_on(gclog_or_tty, "size");
   PrintTreeCensusClosure<Chunk_t, FreeList_t> ptc;
   ptc.do_tree(root());
 
-  FreeList_t<Chunk_t>* total = ptc.total();
-  FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, " ");
+  FreeList_t* total = ptc.total();
+  FreeList_t::print_labels_on(gclog_or_tty, " ");
 }
 
 #if INCLUDE_ALL_GCS
@@ -1293,7 +1280,7 @@
 
   gclog_or_tty->print("\nBinaryTree\n");
   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
-  PrintTreeCensusClosure<FreeChunk, AdaptiveFreeList> ptc;
+  PrintTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > ptc;
   ptc.do_tree(root());
 
   AdaptiveFreeList<FreeChunk>* total = ptc.total();
@@ -1311,7 +1298,7 @@
 }
 #endif // INCLUDE_ALL_GCS
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   outputStream* _st;
   int _print_line;
@@ -1321,9 +1308,9 @@
     _st = st;
     _print_line = 0;
   }
-  void do_list(FreeList_t<Chunk_t>* fl) {
+  void do_list(FreeList_t* fl) {
     if (++_print_line >= 40) {
-      FreeList_t<Chunk_t>::print_labels_on(_st, "size");
+      FreeList_t::print_labels_on(_st, "size");
       _print_line = 0;
     }
     fl->print_on(gclog_or_tty);
@@ -1337,10 +1324,10 @@
   }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_free_lists(outputStream* st) const {
 
-  FreeList_t<Chunk_t>::print_labels_on(st, "size");
+  FreeList_t::print_labels_on(st, "size");
   PrintFreeListsClosure<Chunk_t, FreeList_t> pflc(st);
   pflc.do_tree(root());
 }
@@ -1349,15 +1336,15 @@
 // . _root has no parent
 // . parent and child point to each other
 // . each node's key correctly related to that of its child(ren)
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree() const {
   guarantee(root() == NULL || total_free_blocks() == 0 ||
-    total_size() != 0, "_total_size should't be 0?");
+    total_size() != 0, "_total_size shouldn't be 0?");
   guarantee(root() == NULL || root()->parent() == NULL, "_root shouldn't have parent");
   verify_tree_helper(root());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_prev_free_ptrs(TreeList<Chunk_t, FreeList_t>* tl) {
   size_t ct = 0;
   for (Chunk_t* curFC = tl->head(); curFC != NULL; curFC = curFC->next()) {
@@ -1371,7 +1358,7 @@
 // Note: this helper is recursive rather than iterative, so use with
 // caution on very deep trees; and watch out for stack overflow errors;
 // In general, to be used only for debugging.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
   if (tl == NULL)
     return;
@@ -1400,25 +1387,25 @@
   verify_tree_helper(tl->right());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify() const {
   verify_tree();
   guarantee(total_size() == total_size_in_tree(root()), "Total Size inconsistency");
 }
 
-template class TreeList<Metablock, FreeList>;
-template class BinaryTreeDictionary<Metablock, FreeList>;
-template class TreeChunk<Metablock, FreeList>;
+template class TreeList<Metablock, FreeList<Metablock> >;
+template class BinaryTreeDictionary<Metablock, FreeList<Metablock> >;
+template class TreeChunk<Metablock, FreeList<Metablock> >;
 
-template class TreeList<Metachunk, FreeList>;
-template class BinaryTreeDictionary<Metachunk, FreeList>;
-template class TreeChunk<Metachunk, FreeList>;
+template class TreeList<Metachunk, FreeList<Metachunk> >;
+template class BinaryTreeDictionary<Metachunk, FreeList<Metachunk> >;
+template class TreeChunk<Metachunk, FreeList<Metachunk> >;
 
 
 #if INCLUDE_ALL_GCS
 // Explicitly instantiate these types for FreeChunk.
-template class TreeList<FreeChunk, AdaptiveFreeList>;
-template class BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>;
-template class TreeChunk<FreeChunk, AdaptiveFreeList>;
+template class TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >;
+template class BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> >;
+template class TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >;
 
 #endif // INCLUDE_ALL_GCS
--- a/src/share/vm/memory/binaryTreeDictionary.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/binaryTreeDictionary.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,18 +37,18 @@
 // A TreeList is a FreeList which can be used to maintain a
 // binary tree of free lists.
 
-template <class Chunk_t, template <class> class FreeList_t> class TreeChunk;
-template <class Chunk_t, template <class> class FreeList_t> class BinaryTreeDictionary;
-template <class Chunk_t, template <class> class FreeList_t> class AscendTreeCensusClosure;
-template <class Chunk_t, template <class> class FreeList_t> class DescendTreeCensusClosure;
-template <class Chunk_t, template <class> class FreeList_t> class DescendTreeSearchClosure;
+template <class Chunk_t, class FreeList_t> class TreeChunk;
+template <class Chunk_t, class FreeList_t> class BinaryTreeDictionary;
+template <class Chunk_t, class FreeList_t> class AscendTreeCensusClosure;
+template <class Chunk_t, class FreeList_t> class DescendTreeCensusClosure;
+template <class Chunk_t, class FreeList_t> class DescendTreeSearchClosure;
 
 class FreeChunk;
 template <class> class AdaptiveFreeList;
-typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList> AFLBinaryTreeDictionary;
+typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> > AFLBinaryTreeDictionary;
 
-template <class Chunk_t, template <class> class FreeList_t>
-class TreeList : public FreeList_t<Chunk_t> {
+template <class Chunk_t, class FreeList_t>
+class TreeList : public FreeList_t {
   friend class TreeChunk<Chunk_t, FreeList_t>;
   friend class BinaryTreeDictionary<Chunk_t, FreeList_t>;
   friend class AscendTreeCensusClosure<Chunk_t, FreeList_t>;
@@ -66,12 +66,12 @@
   TreeList<Chunk_t, FreeList_t>* right()  const { return _right;  }
 
   // Wrapper on call to base class, to get the template to compile.
-  Chunk_t* head() const { return FreeList_t<Chunk_t>::head(); }
-  Chunk_t* tail() const { return FreeList_t<Chunk_t>::tail(); }
-  void set_head(Chunk_t* head) { FreeList_t<Chunk_t>::set_head(head); }
-  void set_tail(Chunk_t* tail) { FreeList_t<Chunk_t>::set_tail(tail); }
+  Chunk_t* head() const { return FreeList_t::head(); }
+  Chunk_t* tail() const { return FreeList_t::tail(); }
+  void set_head(Chunk_t* head) { FreeList_t::set_head(head); }
+  void set_tail(Chunk_t* tail) { FreeList_t::set_tail(tail); }
 
-  size_t size() const { return FreeList_t<Chunk_t>::size(); }
+  size_t size() const { return FreeList_t::size(); }
 
   // Accessors for links in tree.
 
@@ -90,7 +90,7 @@
   void clear_left()               { _left = NULL;   }
   void clear_right()              { _right = NULL;  }
   void clear_parent()             { _parent = NULL; }
-  void initialize()               { clear_left(); clear_right(), clear_parent(); FreeList_t<Chunk_t>::initialize(); }
+  void initialize()               { clear_left(); clear_right(), clear_parent(); FreeList_t::initialize(); }
 
   // For constructing a TreeList from a Tree chunk or
   // address and size.
@@ -139,7 +139,7 @@
 // on the free list for a node in the tree and is only removed if
 // it is the last chunk on the free list.
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class TreeChunk : public Chunk_t {
   friend class TreeList<Chunk_t, FreeList_t>;
   TreeList<Chunk_t, FreeList_t>* _list;
@@ -173,7 +173,7 @@
 };
 
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class BinaryTreeDictionary: public FreeBlockDictionary<Chunk_t> {
   friend class VMStructs;
   size_t     _total_size;
@@ -322,7 +322,7 @@
   void       set_tree_hints(void);
   // Reset statistics for all the lists in the tree.
   void       clear_tree_census(void);
-  // Print the statistcis for all the lists in the tree.  Also may
+  // Print the statistics for all the lists in the tree.  Also may
   // print out summaries.
   void       print_dict_census(void) const;
   void       print_free_lists(outputStream* st) const;
--- a/src/share/vm/memory/blockOffsetTable.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/blockOffsetTable.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -590,7 +590,7 @@
 
   // Otherwise, find the block start using the table, but taking
   // care (cf block_start_unsafe() above) not to parse any objects/blocks
-  // on the cards themsleves.
+  // on the cards themselves.
   size_t index = _array->index_for(addr);
   assert(_array->address_for_index(index) == addr,
          "arg should be start of card");
--- a/src/share/vm/memory/blockOffsetTable.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/blockOffsetTable.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -424,7 +424,7 @@
     BlockOffsetArray(array, mr, false),
     _unallocated_block(_bottom) { }
 
-  // accessor
+  // Accessor
   HeapWord* unallocated_block() const {
     assert(BlockOffsetArrayUseUnallocatedBlock,
            "_unallocated_block is not being maintained");
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -54,8 +54,8 @@
 size_t CardTableModRefBS::compute_byte_map_size()
 {
   assert(_guard_index == cards_required(_whole_heap.word_size()) - 1,
-                                        "unitialized, check declaration order");
-  assert(_page_size != 0, "unitialized, check declaration order");
+                                        "uninitialized, check declaration order");
+  assert(_page_size != 0, "uninitialized, check declaration order");
   const size_t granularity = os::vm_allocation_granularity();
   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
 }
@@ -98,7 +98,7 @@
                                   "card marking array");
   }
 
-  // The assember store_check code will do an unsigned shift of the oop,
+  // The assembler store_check code will do an unsigned shift of the oop,
   // then add it to byte_map_base, i.e.
   //
   //   _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift)
@@ -243,7 +243,7 @@
   if (new_region.word_size() != old_region.word_size()) {
     // Commit new or uncommit old pages, if necessary.
     MemRegion cur_committed = _committed[ind];
-    // Extend the end of this _commited region
+    // Extend the end of this _committed region
     // to cover the end of any lower _committed regions.
     // This forms overlapping regions, but never interior regions.
     HeapWord* const max_prev_end = largest_prev_committed_end(ind);
@@ -419,8 +419,8 @@
 // Note that these versions are precise!  The scanning code has to handle the
 // fact that the write barrier may be either precise or imprecise.
 
-void CardTableModRefBS::write_ref_field_work(void* field, oop newVal) {
-  inline_write_ref_field(field, newVal);
+void CardTableModRefBS::write_ref_field_work(void* field, oop newVal, bool release) {
+  inline_write_ref_field(field, newVal, release);
 }
 
 
@@ -448,7 +448,7 @@
     // off parallelism is used, then active_workers can be used in
     // place of n_par_threads.
     //  This is an example of a path where n_par_threads is
-    // set to 0 to turn off parallism.
+    // set to 0 to turn off parallelism.
     //  [7] CardTableModRefBS::non_clean_card_iterate()
     //  [8] CardTableRS::younger_refs_in_space_iterate()
     //  [9] Generation::younger_refs_in_space_iterate()
--- a/src/share/vm/memory/cardTableModRefBS.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/cardTableModRefBS.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -292,7 +292,7 @@
   // these functions here for performance.
 protected:
   void write_ref_field_work(oop obj, size_t offset, oop newVal);
-  virtual void write_ref_field_work(void* field, oop newVal);
+  virtual void write_ref_field_work(void* field, oop newVal, bool release = false);
 public:
 
   bool has_write_ref_array_opt() { return true; }
@@ -324,9 +324,14 @@
 
   template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {}
 
-  template <class T> inline void inline_write_ref_field(T* field, oop newVal) {
+  template <class T> inline void inline_write_ref_field(T* field, oop newVal, bool release) {
     jbyte* byte = byte_for((void*)field);
-    *byte = dirty_card;
+    if (release) {
+      // Perform a releasing store if requested.
+      OrderAccess::release_store((volatile jbyte*) byte, dirty_card);
+    } else {
+      *byte = dirty_card;
+    }
   }
 
   // These are used by G1, when it uses the card table as a temporary data
--- a/src/share/vm/memory/cardTableRS.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/cardTableRS.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -590,7 +590,7 @@
       // Then, the case analysis above reveals that, in the worst case,
       // any such stale card will be scanned unnecessarily at most twice.
       //
-      // It is nonethelss advisable to try and get rid of some of this
+      // It is nonetheless advisable to try and get rid of some of this
       // redundant work in a subsequent (low priority) re-design of
       // the card-scanning code, if only to simplify the underlying
       // state machine analysis/proof. ysr 1/28/2002. XXX
--- a/src/share/vm/memory/cardTableRS.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/cardTableRS.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -105,8 +105,6 @@
   ~CardTableRS();
 
   // *** GenRemSet functions.
-  GenRemSet::Name rs_kind() { return GenRemSet::CardTable; }
-
   CardTableRS* as_CardTableRS() { return this; }
 
   CardTableModRefBS* ct_bs() { return _ct_bs; }
--- a/src/share/vm/memory/collectorPolicy.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/collectorPolicy.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -45,7 +45,7 @@
 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
 #endif // INCLUDE_ALL_GCS
 
-// CollectorPolicy methods.
+// CollectorPolicy methods
 
 CollectorPolicy::CollectorPolicy() :
     _space_alignment(0),
@@ -178,17 +178,14 @@
   // byte entry and the os page size is 4096, the maximum heap size should
   // be 512*4096 = 2MB aligned.
 
-  // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
-  // is supported.
-  // Requirements of any new remembered set implementations must be added here.
-  size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
+  size_t alignment = GenRemSet::max_alignment_constraint();
 
   // Parallel GC does its own alignment of the generations to avoid requiring a
   // large page (256M on some platforms) for the permanent generation.  The
   // other collectors should also be updated to do their own alignment and then
   // this use of lcm() should be removed.
   if (UseLargePages && !UseParallelGC) {
-      // in presence of large pages we have to make sure that our
+      // In presence of large pages we have to make sure that our
       // alignment is large page aware
       alignment = lcm(os::large_page_size(), alignment);
   }
@@ -196,7 +193,7 @@
   return alignment;
 }
 
-// GenCollectorPolicy methods.
+// GenCollectorPolicy methods
 
 GenCollectorPolicy::GenCollectorPolicy() :
     _min_gen0_size(0),
@@ -378,10 +375,10 @@
     _initial_heap_byte_size = InitialHeapSize;
   }
 
-  // adjust max heap size if necessary
+  // Adjust NewSize and OldSize or MaxHeapSize to match each other
   if (NewSize + OldSize > MaxHeapSize) {
     if (_max_heap_size_cmdline) {
-      // somebody set a maximum heap size with the intention that we should not
+      // Somebody has set a maximum heap size with the intention that we should not
       // exceed it. Adjust New/OldSize as necessary.
       uintx calculated_size = NewSize + OldSize;
       double shrink_factor = (double) MaxHeapSize / calculated_size;
@@ -442,32 +439,32 @@
   // minimum gen0 sizes.
 
   if (_max_heap_byte_size == _min_heap_byte_size) {
-    // The maximum and minimum heap sizes are the same so
-    // the generations minimum and initial must be the
-    // same as its maximum.
+    // The maximum and minimum heap sizes are the same so the generations
+    // minimum and initial must be the same as its maximum.
     _min_gen0_size = max_new_size;
     _initial_gen0_size = max_new_size;
     _max_gen0_size = max_new_size;
   } else {
     size_t desired_new_size = 0;
-    if (!FLAG_IS_DEFAULT(NewSize)) {
-      // If NewSize is set ergonomically (for example by cms), it
-      // would make sense to use it.  If it is used, also use it
-      // to set the initial size.  Although there is no reason
-      // the minimum size and the initial size have to be the same,
-      // the current implementation gets into trouble during the calculation
-      // of the tenured generation sizes if they are different.
-      // Note that this makes the initial size and the minimum size
-      // generally small compared to the NewRatio calculation.
+    if (FLAG_IS_CMDLINE(NewSize)) {
+      // If NewSize is set on the command line, we must use it as
+      // the initial size and it also makes sense to use it as the
+      // lower limit.
       _min_gen0_size = NewSize;
       desired_new_size = NewSize;
       max_new_size = MAX2(max_new_size, NewSize);
+    } else if (FLAG_IS_ERGO(NewSize)) {
+      // If NewSize is set ergonomically, we should use it as a lower
+      // limit, but use NewRatio to calculate the initial size.
+      _min_gen0_size = NewSize;
+      desired_new_size =
+        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
+      max_new_size = MAX2(max_new_size, NewSize);
     } else {
       // For the case where NewSize is the default, use NewRatio
       // to size the minimum and initial generation sizes.
       // Use the default NewSize as the floor for these values.  If
-      // NewRatio is overly large, the resulting sizes can be too
-      // small.
+      // NewRatio is overly large, the resulting sizes can be too small.
       _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
       desired_new_size =
         MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
@@ -486,8 +483,7 @@
     _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
 
     // At this point all three sizes have been checked against the
-    // maximum sizes but have not been checked for consistency
-    // among the three.
+    // maximum sizes but have not been checked for consistency among the three.
 
     // Final check min <= initial <= max
     _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
@@ -495,7 +491,7 @@
     _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
   }
 
-  // Write back to flags if necessary
+  // Write back to flags if necessary.
   if (NewSize != _initial_gen0_size) {
     FLAG_SET_ERGO(uintx, NewSize, _initial_gen0_size);
   }
@@ -541,7 +537,7 @@
 }
 
 // Minimum sizes of the generations may be different than
-// the initial sizes.  An inconsistently is permitted here
+// the initial sizes.  An inconsistency is permitted here
 // in the total size that can be specified explicitly by
 // command line specification of OldSize and NewSize and
 // also a command line specification of -Xms.  Issue a warning
@@ -553,12 +549,12 @@
   // At this point the minimum, initial and maximum sizes
   // of the overall heap and of gen0 have been determined.
   // The maximum gen1 size can be determined from the maximum gen0
-  // and maximum heap size since no explicit flags exits
+  // and maximum heap size since no explicit flags exist
   // for setting the gen1 maximum.
   _max_gen1_size = MAX2(_max_heap_byte_size - _max_gen0_size, _gen_alignment);
 
   // If no explicit command line flag has been set for the
-  // gen1 size, use what is left for gen1.
+  // gen1 size, use what is left for gen1
   if (!FLAG_IS_CMDLINE(OldSize)) {
     // The user has not specified any value but the ergonomics
     // may have chosen a value (which may or may not be consistent
@@ -570,14 +566,14 @@
     // _max_gen1_size has already been made consistent above
     FLAG_SET_ERGO(uintx, OldSize, _initial_gen1_size);
   } else {
-    // It's been explicitly set on the command line.  Use the
+    // OldSize has been explicitly set on the command line. Use the
     // OldSize and then determine the consequences.
     _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
     _initial_gen1_size = OldSize;
 
     // If the user has explicitly set an OldSize that is inconsistent
     // with other command line flags, issue a warning.
-    // The generation minimums and the overall heap mimimum should
+    // The generation minimums and the overall heap minimum should
     // be within one generation alignment.
     if ((_min_gen1_size + _min_gen0_size + _gen_alignment) < _min_heap_byte_size) {
       warning("Inconsistency between minimum heap size and minimum "
@@ -599,7 +595,7 @@
               _min_gen0_size, _initial_gen0_size, _max_gen0_size);
       }
     }
-    // Initial size
+    // The same as above for the old gen initial size.
     if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
                           _initial_heap_byte_size)) {
       if (PrintGCDetails && Verbose) {
@@ -609,10 +605,10 @@
       }
     }
   }
-  // Enforce the maximum gen1 size.
+
   _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
 
-  // Check that min gen1 <= initial gen1 <= max gen1
+  // Make sure that min gen1 <= initial gen1 <= max gen1.
   _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
   _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
 
@@ -653,10 +649,9 @@
 
   HeapWord* result = NULL;
 
-  // Loop until the allocation is satisified,
-  // or unsatisfied after GC.
+  // Loop until the allocation is satisfied, or unsatisfied after GC.
   for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
-    HandleMark hm; // discard any handles allocated in each iteration
+    HandleMark hm; // Discard any handles allocated in each iteration.
 
     // First allocation attempt is lock-free.
     Generation *gen0 = gch->get_gen(0);
@@ -669,7 +664,7 @@
         return result;
       }
     }
-    unsigned int gc_count_before;  // read inside the Heap_lock locked region
+    unsigned int gc_count_before;  // Read inside the Heap_lock locked region.
     {
       MutexLocker ml(Heap_lock);
       if (PrintGC && Verbose) {
@@ -688,19 +683,19 @@
 
       if (GC_locker::is_active_and_needs_gc()) {
         if (is_tlab) {
-          return NULL;  // Caller will retry allocating individual object
+          return NULL;  // Caller will retry allocating individual object.
         }
         if (!gch->is_maximal_no_gc()) {
-          // Try and expand heap to satisfy request
+          // Try and expand heap to satisfy request.
           result = expand_heap_and_allocate(size, is_tlab);
-          // result could be null if we are out of space
+          // Result could be null if we are out of space.
           if (result != NULL) {
             return result;
           }
         }
 
         if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
-          return NULL; // we didn't get to do a GC and we didn't get any memory
+          return NULL; // We didn't get to do a GC and we didn't get any memory.
         }
 
         // If this thread is not in a jni critical section, we stall
@@ -735,7 +730,7 @@
       result = op.result();
       if (op.gc_locked()) {
          assert(result == NULL, "must be NULL if gc_locked() is true");
-         continue;  // retry and/or stall as necessary
+         continue;  // Retry and/or stall as necessary.
       }
 
       // Allocation has failed and a collection
@@ -796,7 +791,7 @@
     if (!gch->is_maximal_no_gc()) {
       result = expand_heap_and_allocate(size, is_tlab);
     }
-    return result;   // could be null if we are out of space
+    return result;   // Could be null if we are out of space.
   } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
     // Do an incremental collection.
     gch->do_collection(false            /* full */,
@@ -918,10 +913,8 @@
                                        GCCause::_metadata_GC_threshold);
     VMThread::execute(&op);
 
-    // If GC was locked out, try again.  Check
-    // before checking success because the prologue
-    // could have succeeded and the GC still have
-    // been locked out.
+    // If GC was locked out, try again. Check before checking success because the
+    // prologue could have succeeded and the GC still have been locked out.
     if (op.gc_locked()) {
       continue;
     }
@@ -982,10 +975,117 @@
 }
 
 void MarkSweepPolicy::initialize_gc_policy_counters() {
-  // initialize the policy counters - 2 collectors, 3 generations
+  // Initialize the policy counters - 2 collectors, 3 generations.
   if (UseParNewGC) {
     _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
   } else {
     _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
   }
 }
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+// Testing that the NewSize flag is handled correct is hard because it
+// depends on so many other configurable variables. This test only tries to
+// verify that there are some basic rules for NewSize honored by the policies.
+class TestGenCollectorPolicy {
+public:
+  static void test() {
+    size_t flag_value;
+
+    save_flags();
+
+    // Set some limits that makes the math simple.
+    FLAG_SET_ERGO(uintx, MaxHeapSize, 180 * M);
+    FLAG_SET_ERGO(uintx, InitialHeapSize, 120 * M);
+    Arguments::set_min_heap_size(40 * M);
+
+    // If NewSize is set on the command line, it should be used
+    // for both min and initial young size if less than min heap.
+    flag_value = 20 * M;
+    FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
+    verify_min(flag_value);
+    verify_initial(flag_value);
+
+    // If NewSize is set on command line, but is larger than the min
+    // heap size, it should only be used for initial young size.
+    flag_value = 80 * M;
+    FLAG_SET_CMDLINE(uintx, NewSize, flag_value);
+    verify_initial(flag_value);
+
+    // If NewSize has been ergonomically set, the collector policy
+    // should use it for min but calculate the initial young size
+    // using NewRatio.
+    flag_value = 20 * M;
+    FLAG_SET_ERGO(uintx, NewSize, flag_value);
+    verify_min(flag_value);
+    verify_scaled_initial(InitialHeapSize);
+
+    restore_flags();
+
+  }
+
+  static void verify_min(size_t expected) {
+    MarkSweepPolicy msp;
+    msp.initialize_all();
+
+    assert(msp.min_gen0_size() <= expected, err_msg("%zu  > %zu", msp.min_gen0_size(), expected));
+  }
+
+  static void verify_initial(size_t expected) {
+    MarkSweepPolicy msp;
+    msp.initialize_all();
+
+    assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
+  }
+
+  static void verify_scaled_initial(size_t initial_heap_size) {
+    MarkSweepPolicy msp;
+    msp.initialize_all();
+
+    size_t expected = msp.scale_by_NewRatio_aligned(initial_heap_size);
+    assert(msp.initial_gen0_size() == expected, err_msg("%zu != %zu", msp.initial_gen0_size(), expected));
+    assert(FLAG_IS_ERGO(NewSize) && NewSize == expected,
+        err_msg("NewSize should have been set ergonomically to %zu, but was %zu", expected, NewSize));
+  }
+
+private:
+  static size_t original_InitialHeapSize;
+  static size_t original_MaxHeapSize;
+  static size_t original_MaxNewSize;
+  static size_t original_MinHeapDeltaBytes;
+  static size_t original_NewSize;
+  static size_t original_OldSize;
+
+  static void save_flags() {
+    original_InitialHeapSize   = InitialHeapSize;
+    original_MaxHeapSize       = MaxHeapSize;
+    original_MaxNewSize        = MaxNewSize;
+    original_MinHeapDeltaBytes = MinHeapDeltaBytes;
+    original_NewSize           = NewSize;
+    original_OldSize           = OldSize;
+  }
+
+  static void restore_flags() {
+    InitialHeapSize   = original_InitialHeapSize;
+    MaxHeapSize       = original_MaxHeapSize;
+    MaxNewSize        = original_MaxNewSize;
+    MinHeapDeltaBytes = original_MinHeapDeltaBytes;
+    NewSize           = original_NewSize;
+    OldSize           = original_OldSize;
+  }
+};
+
+size_t TestGenCollectorPolicy::original_InitialHeapSize   = 0;
+size_t TestGenCollectorPolicy::original_MaxHeapSize       = 0;
+size_t TestGenCollectorPolicy::original_MaxNewSize        = 0;
+size_t TestGenCollectorPolicy::original_MinHeapDeltaBytes = 0;
+size_t TestGenCollectorPolicy::original_NewSize           = 0;
+size_t TestGenCollectorPolicy::original_OldSize           = 0;
+
+void TestNewSize_test() {
+  TestGenCollectorPolicy::test();
+}
+
+#endif
--- a/src/share/vm/memory/collectorPolicy.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/collectorPolicy.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -76,10 +76,10 @@
   size_t _heap_alignment;
 
   // Needed to keep information if MaxHeapSize was set on the command line
-  // when the flag value is aligned etc by ergonomics
+  // when the flag value is aligned etc by ergonomics.
   bool _max_heap_size_cmdline;
 
-  // The sizing of the heap are controlled by a sizing policy.
+  // The sizing of the heap is controlled by a sizing policy.
   AdaptiveSizePolicy* _size_policy;
 
   // Set to true when policy wants soft refs cleared.
@@ -102,7 +102,7 @@
     initialize_size_info();
   }
 
-  // Return maximum heap alignment that may be imposed by the policy
+  // Return maximum heap alignment that may be imposed by the policy.
   static size_t compute_heap_alignment();
 
   size_t space_alignment()        { return _space_alignment; }
@@ -180,7 +180,7 @@
                                                        size_t size,
                                                        Metaspace::MetadataType mdtype);
 
-  // Performace Counter support
+  // Performance Counter support
   GCPolicyCounters* counters()     { return _gc_policy_counters; }
 
   // Create the jstat counters for the GC policy.  By default, policy's
@@ -220,6 +220,7 @@
 };
 
 class GenCollectorPolicy : public CollectorPolicy {
+friend class TestGenCollectorPolicy;
  protected:
   size_t _min_gen0_size;
   size_t _initial_gen0_size;
@@ -231,9 +232,8 @@
 
   GenerationSpec **_generations;
 
-  // Return true if an allocation should be attempted in the older
-  // generation if it fails in the younger generation.  Return
-  // false, otherwise.
+  // Return true if an allocation should be attempted in the older generation
+  // if it fails in the younger generation.  Return false, otherwise.
   virtual bool should_try_older_generation_allocation(size_t word_size) const;
 
   void initialize_flags();
@@ -245,7 +245,7 @@
   // Try to allocate space by expanding the heap.
   virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
 
-  // Compute max heap alignment
+  // Compute max heap alignment.
   size_t compute_max_alignment();
 
  // Scale the base_size by NewRatio according to
@@ -253,7 +253,7 @@
  // and align by min_alignment()
  size_t scale_by_NewRatio_aligned(size_t base_size);
 
- // Bound the value by the given maximum minus the min_alignment
+ // Bound the value by the given maximum minus the min_alignment.
  size_t bound_minus_alignment(size_t desired_size, size_t maximum_size);
 
  public:
--- a/src/share/vm/memory/defNewGeneration.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/defNewGeneration.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -61,7 +61,6 @@
 DefNewGeneration::KeepAliveClosure::
 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
   GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
-  assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind.");
   _rs = (CardTableRS*)rs;
 }
 
@@ -619,16 +618,14 @@
   assert(gch->no_allocs_since_save_marks(0),
          "save marks have not been newly set.");
 
-  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+  int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_ScavengeCodeCache;
 
   gch->gen_process_strong_roots(_level,
                                 true,  // Process younger gens, if any,
                                        // as strong roots.
                                 true,  // activate StrongRootsScope
-                                true,  // is scavenging
                                 SharedHeap::ScanningOption(so),
                                 &fsc_with_no_gc_barrier,
-                                true,   // walk *all* scavengable nmethods
                                 &fsc_with_gc_barrier,
                                 &klass_scan_closure);
 
@@ -667,9 +664,6 @@
     // for full GC's.
     AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
     size_policy->reset_gc_overhead_limit_count();
-    if (PrintGC && !PrintGCDetails) {
-      gch->print_heap_change(gch_prev_used);
-    }
     assert(!gch->incremental_collection_failed(), "Should be clear");
   } else {
     assert(_promo_failure_scan_stack.is_empty(), "post condition");
@@ -695,6 +689,9 @@
     // Reset the PromotionFailureALot counters.
     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
   }
+  if (PrintGC && !PrintGCDetails) {
+    gch->print_heap_change(gch_prev_used);
+  }
   // set new iteration safe limit for the survivor spaces
   from()->set_concurrent_iteration_safe_limit(from()->top());
   to()->set_concurrent_iteration_safe_limit(to()->top());
@@ -1086,6 +1083,10 @@
   return eden()->capacity();
 }
 
+size_t DefNewGeneration::tlab_used() const {
+  return eden()->used();
+}
+
 size_t DefNewGeneration::unsafe_max_tlab_alloc() const {
   return unsafe_max_alloc_nogc();
 }
--- a/src/share/vm/memory/defNewGeneration.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/defNewGeneration.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -239,6 +239,7 @@
   // Thread-local allocation buffers
   bool supports_tlab_allocation() const { return true; }
   size_t tlab_capacity() const;
+  size_t tlab_used() const;
   size_t unsafe_max_tlab_alloc() const;
 
   // Grow the generation by the specified number of bytes.
--- a/src/share/vm/memory/freeBlockDictionary.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/freeBlockDictionary.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/freeList.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/freeList.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/freeList.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/freeList.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/gcLocker.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/gcLocker.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/gcLocker.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/gcLocker.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/genCollectedHeap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -126,7 +126,7 @@
                         (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   // It is important to do this in a way such that concurrent readers can't
-  // temporarily think somethings in the heap.  (Seen this happen in asserts.)
+  // temporarily think something is in the heap.  (Seen this happen in asserts.)
   _reserved.set_word_size(0);
   _reserved.set_start((HeapWord*)heap_rs.base());
   size_t actual_heap_size = heap_rs.size();
@@ -592,23 +592,14 @@
 gen_process_strong_roots(int level,
                          bool younger_gens_as_roots,
                          bool activate_scope,
-                         bool is_scavenging,
                          SharedHeap::ScanningOption so,
                          OopsInGenClosure* not_older_gens,
-                         bool do_code_roots,
                          OopsInGenClosure* older_gens,
                          KlassClosure* klass_closure) {
   // General strong roots.
 
-  if (!do_code_roots) {
-    SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
-                                     not_older_gens, NULL, klass_closure);
-  } else {
-    bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
-    CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
-    SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
-                                     not_older_gens, &code_roots, klass_closure);
-  }
+  SharedHeap::process_strong_roots(activate_scope, so,
+                                   not_older_gens, klass_closure);
 
   if (younger_gens_as_roots) {
     if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
@@ -630,9 +621,8 @@
   _gen_process_strong_tasks->all_tasks_completed();
 }
 
-void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
-                                              CodeBlobClosure* code_roots) {
-  SharedHeap::process_weak_roots(root_closure, code_roots);
+void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
+  SharedHeap::process_weak_roots(root_closure);
   // "Local" "weak" refs
   for (int i = 0; i < _n_gens; i++) {
     _gens[i]->ref_processor()->weak_oops_do(root_closure);
@@ -673,10 +663,6 @@
   return _gens[0]->end_addr();
 }
 
-size_t GenCollectedHeap::unsafe_max_alloc() {
-  return _gens[0]->unsafe_max_alloc_nogc();
-}
-
 // public collection interfaces
 
 void GenCollectedHeap::collect(GCCause::Cause cause) {
@@ -937,6 +923,16 @@
   return result;
 }
 
+size_t GenCollectedHeap::tlab_used(Thread* thr) const {
+  size_t result = 0;
+  for (int i = 0; i < _n_gens; i += 1) {
+    if (_gens[i]->supports_tlab_allocation()) {
+      result += _gens[i]->tlab_used();
+    }
+  }
+  return result;
+}
+
 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
   size_t result = 0;
   for (int i = 0; i < _n_gens; i += 1) {
@@ -1267,7 +1263,7 @@
 };
 
 jlong GenCollectedHeap::millis_since_last_gc() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   GenTimeOfLastGCClosure tolgc_cl(now);
--- a/src/share/vm/memory/genCollectedHeap.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -166,14 +166,6 @@
   HeapWord** top_addr() const;
   HeapWord** end_addr() const;
 
-  // Return an estimate of the maximum allocation that could be performed
-  // without triggering any collection activity.  In a generational
-  // collector, for example, this is probably the largest allocation that
-  // could be supported in the youngest generation.  It is "unsafe" because
-  // no locks are taken; the result should be treated as an approximation,
-  // not a guarantee.
-  size_t unsafe_max_alloc();
-
   // Does this heap support heap inspection? (+PrintClassHistogram)
   virtual bool supports_heap_inspection() const { return true; }
 
@@ -256,6 +248,7 @@
   // Section on TLAB's.
   virtual bool supports_tlab_allocation() const;
   virtual size_t tlab_capacity(Thread* thr) const;
+  virtual size_t tlab_used(Thread* thr) const;
   virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
   virtual HeapWord* allocate_new_tlab(size_t size);
 
@@ -323,7 +316,7 @@
   }
 
   // Update the gc statistics for each generation.
-  // "level" is the level of the lastest collection
+  // "level" is the level of the latest collection.
   void update_gc_stats(int current_level, bool full) {
     for (int i = 0; i < _n_gens; i++) {
       _gens[i]->update_gc_stats(current_level, full);
@@ -419,18 +412,15 @@
                                 // The remaining arguments are in an order
                                 // consistent with SharedHeap::process_strong_roots:
                                 bool activate_scope,
-                                bool is_scavenging,
                                 SharedHeap::ScanningOption so,
                                 OopsInGenClosure* not_older_gens,
-                                bool do_code_roots,
                                 OopsInGenClosure* older_gens,
                                 KlassClosure* klass_closure);
 
-  // Apply "blk" to all the weak roots of the system.  These include
-  // JNI weak roots, the code cache, system dictionary, symbol table,
-  // string table, and referents of reachable weak refs.
-  void gen_process_weak_roots(OopClosure* root_closure,
-                              CodeBlobClosure* code_roots);
+  // Apply "root_closure" to all the weak roots of the system.
+  // These include JNI weak roots, string table,
+  // and referents of reachable weak refs.
+  void gen_process_weak_roots(OopClosure* root_closure);
 
   // Set the saved marks of generations, if that makes sense.
   // In particular, if any generation might iterate over the oops
--- a/src/share/vm/memory/genMarkSweep.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/genMarkSweep.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -148,8 +148,8 @@
   Universe::update_heap_info_at_gc();
 
   // Update time of last gc for all generations we collected
-  // (which curently is all the generations in the heap).
-  // We need to use a monotonically non-deccreasing time in ms
+  // (which currently is all the generations in the heap).
+  // We need to use a monotonically non-decreasing time in ms
   // or we will see time-warp warnings and os::javaTimeMillis()
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
@@ -210,10 +210,8 @@
   gch->gen_process_strong_roots(level,
                                 false, // Younger gens are not roots.
                                 true,  // activate StrongRootsScope
-                                false, // not scavenging
                                 SharedHeap::SO_SystemClasses,
                                 &follow_root_closure,
-                                true,   // walk code active on stacks
                                 &follow_root_closure,
                                 &follow_klass_closure);
 
@@ -296,19 +294,12 @@
   gch->gen_process_strong_roots(level,
                                 false, // Younger gens are not roots.
                                 true,  // activate StrongRootsScope
-                                false, // not scavenging
-                                SharedHeap::SO_AllClasses,
+                                SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
                                 &adjust_pointer_closure,
-                                false, // do not walk code
                                 &adjust_pointer_closure,
                                 &adjust_klass_closure);
 
-  // Now adjust pointers in remaining weak roots.  (All of which should
-  // have been cleared if they pointed to non-surviving objects.)
-  CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
-                                                   /*do_marking=*/ false);
-  gch->gen_process_weak_roots(&adjust_pointer_closure,
-                              &adjust_code_pointer_closure);
+  gch->gen_process_weak_roots(&adjust_pointer_closure);
 
   adjust_marks();
   GenAdjustPointersClosure blk;
--- a/src/share/vm/memory/genOopClosures.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/genOopClosures.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -45,7 +45,6 @@
   // Barrier set for the heap, must be set after heap is initialized
   if (_rs == NULL) {
     GenRemSet* rs = SharedHeap::heap()->rem_set();
-    assert(rs->rs_kind() == GenRemSet::CardTable, "Wrong rem set kind");
     _rs = (CardTableRS*)rs;
   }
 }
--- a/src/share/vm/memory/genRemSet.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/genRemSet.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,8 +31,7 @@
 // enumerate ref fields that have been modified (since the last
 // enumeration.)
 
-uintx GenRemSet::max_alignment_constraint(Name nm) {
-  assert(nm == GenRemSet::CardTable, "Unrecognized GenRemSet type.");
+uintx GenRemSet::max_alignment_constraint() {
   return CardTableRS::ct_max_alignment_constraint();
 }
 
--- a/src/share/vm/memory/genRemSet.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/genRemSet.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 
 #include "oops/oop.hpp"
 
-// A GenRemSet provides ways of iterating over pointers accross generations.
+// A GenRemSet provides ways of iterating over pointers across generations.
 // (This is especially useful for older-to-younger.)
 
 class Generation;
@@ -53,19 +53,12 @@
   KlassRemSet _klass_rem_set;
 
 public:
-  enum Name {
-    CardTable,
-    Other
-  };
-
   GenRemSet(BarrierSet * bs) : _bs(bs) {}
   GenRemSet() : _bs(NULL) {}
 
-  virtual Name rs_kind() = 0;
-
   // These are for dynamic downcasts.  Unfortunately that it names the
   // possible subtypes (but not that they are subtypes!)  Return NULL if
-  // the cast is invalide.
+  // the cast is invalid.
   virtual CardTableRS* as_CardTableRS() { return NULL; }
 
   // Return the barrier set associated with "this."
@@ -106,10 +99,9 @@
   // within the heap, this function tells whether they are met.
   virtual bool is_aligned(HeapWord* addr) = 0;
 
-  // If the RS (or BS) imposes an aligment constraint on maximum heap size.
-  // (This must be static, and dispatch on "nm", because it is called
-  // before an RS is created.)
-  static uintx max_alignment_constraint(Name nm);
+  // Returns any alignment constraint that the remembered set imposes upon the
+  // heap.
+  static uintx max_alignment_constraint();
 
   virtual void verify() = 0;
 
--- a/src/share/vm/memory/generation.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/generation.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -289,7 +289,7 @@
 
   // These functions return the addresses of the fields that define the
   // boundaries of the contiguous allocation area.  (These fields should be
-  // physicall near to one another.)
+  // physically near to one another.)
   virtual HeapWord** top_addr() const { return NULL; }
   virtual HeapWord** end_addr() const { return NULL; }
 
@@ -299,6 +299,10 @@
     guarantee(false, "Generation doesn't support thread local allocation buffers");
     return 0;
   }
+  virtual size_t tlab_used() const {
+    guarantee(false, "Generation doesn't support thread local allocation buffers");
+    return 0;
+  }
   virtual size_t unsafe_max_tlab_alloc() const {
     guarantee(false, "Generation doesn't support thread local allocation buffers");
     return 0;
@@ -485,7 +489,7 @@
   // General signature...
   virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0;
   // ...and specializations for de-virtualization.  (The general
-  // implemention of the _nv versions call the virtual version.
+  // implementation of the _nv versions call the virtual version.
   // Note that the _nv suffix is not really semantically necessary,
   // but it avoids some not-so-useful warnings on Solaris.)
 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)             \
--- a/src/share/vm/memory/generationSpec.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/generationSpec.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/heap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/heap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -183,7 +183,7 @@
   size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
   assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
 
-  // First check if we can satify request from freelist
+  // First check if we can satisfy request from freelist
   debug_only(verify());
   HeapBlock* block = search_freelist(number_of_segments, is_critical);
   debug_only(if (VerifyCodeCacheOften) verify());
@@ -372,7 +372,7 @@
   }
 
   // Scan for right place to put into list. List
-  // is sorted by increasing addresseses
+  // is sorted by increasing addresses
   FreeBlock* prev = NULL;
   FreeBlock* cur  = _freelist;
   while(cur != NULL && cur < b) {
--- a/src/share/vm/memory/heap.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/heap.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -127,8 +127,8 @@
   // Heap extents
   bool  reserve(size_t reserved_size, size_t committed_size, size_t segment_size);
   void  release();                               // releases all allocated memory
-  bool  expand_by(size_t size);                  // expands commited memory by size
-  void  shrink_by(size_t size);                  // shrinks commited memory by size
+  bool  expand_by(size_t size);                  // expands committed memory by size
+  void  shrink_by(size_t size);                  // shrinks committed memory by size
   void  clear();                                 // clears all heap contents
 
   // Memory allocation
--- a/src/share/vm/memory/heapInspection.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/heapInspection.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -347,7 +347,7 @@
 
 #endif // INCLUDE_SERVICES
 
-// These declarations are needed since teh declaration of KlassInfoTable and
+// These declarations are needed since the declaration of KlassInfoTable and
 // KlassInfoClosure are guarded by #if INLCUDE_SERVICES
 class KlassInfoTable;
 class KlassInfoClosure;
--- a/src/share/vm/memory/iterator.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/iterator.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/iterator.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/iterator.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -128,6 +128,11 @@
   virtual void do_klass(Klass* k) = 0;
 };
 
+class CLDClosure : public Closure {
+ public:
+  virtual void do_cld(ClassLoaderData* cld) = 0;
+};
+
 class KlassToOopClosure : public KlassClosure {
   OopClosure* _oop_closure;
  public:
@@ -135,7 +140,7 @@
   virtual void do_klass(Klass* k);
 };
 
-class CLDToOopClosure {
+class CLDToOopClosure : public CLDClosure {
   OopClosure* _oop_closure;
   KlassToOopClosure _klass_closure;
   bool _must_claim_cld;
--- a/src/share/vm/memory/metachunk.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/metachunk.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -143,6 +143,8 @@
   void set_is_tagged_free(bool v) { _is_tagged_free = v; }
 #endif
 
+  bool contains(const void* ptr) { return bottom() <= ptr && ptr < _top; }
+
   NOT_PRODUCT(void mangle();)
 
   void print_on(outputStream* st) const;
--- a/src/share/vm/memory/metaspace.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/metaspace.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -46,8 +46,8 @@
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
 
-typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
-typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
+typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
+typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
 
 // Set this constant to enable slow integrity checking of the free chunk lists
 const bool metaspace_slow_verify = false;
@@ -513,8 +513,6 @@
   // Unlink empty VirtualSpaceNodes and free it.
   void purge(ChunkManager* chunk_manager);
 
-  bool contains(const void *ptr);
-
   void print_on(outputStream* st) const;
 
   class VirtualSpaceListIterator : public StackObj {
@@ -558,7 +556,7 @@
 
  private:
 
-  // protects allocations and contains.
+  // protects allocations
   Mutex* const _lock;
 
   // Type of metadata allocated.
@@ -595,7 +593,11 @@
  private:
   // Accessors
   Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
-  void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
+  void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
+    // ensure lock-free iteration sees fully initialized node
+    OrderAccess::storestore();
+    _chunks_in_use[index] = v;
+  }
 
   BlockFreelist* block_freelists() const {
     return (BlockFreelist*) &_block_freelists;
@@ -708,6 +710,8 @@
   void print_on(outputStream* st) const;
   void locked_print_chunks_in_use_on(outputStream* st) const;
 
+  bool contains(const void *ptr);
+
   void verify();
   void verify_chunk_size(Metachunk* chunk);
   NOT_PRODUCT(void mangle_freed_chunks();)
@@ -742,7 +746,7 @@
   assert_lock_strong(SpaceManager::expand_lock());
   _container_count++;
   assert(_container_count == container_count_slow(),
-         err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
+         err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT
                  " container_count_slow() " SIZE_FORMAT,
                  _container_count, container_count_slow()));
 }
@@ -755,7 +759,7 @@
 #ifdef ASSERT
 void VirtualSpaceNode::verify_container_count() {
   assert(_container_count == container_count_slow(),
-    err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
+    err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT
             " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
 }
 #endif
@@ -786,7 +790,7 @@
     return NULL;
   }
 
-  if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
+  if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
     // Dark matter.  Too small for dictionary.
     return NULL;
   }
@@ -806,7 +810,7 @@
   MetaWord* new_block = (MetaWord*)free_block;
   assert(block_size >= word_size, "Incorrect size of block from freelist");
   const size_t unused = block_size - word_size;
-  if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
+  if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
     return_block(new_block + word_size, unused);
   }
 
@@ -1159,8 +1163,6 @@
   } else {
     assert(new_entry->reserved_words() == vs_word_size,
         "Reserved memory size differs from requested memory size");
-    // ensure lock-free iteration sees fully initialized node
-    OrderAccess::storestore();
     link_vs(new_entry);
     return true;
   }
@@ -1287,19 +1289,6 @@
   }
 }
 
-bool VirtualSpaceList::contains(const void *ptr) {
-  VirtualSpaceNode* list = virtual_space_list();
-  VirtualSpaceListIterator iter(list);
-  while (iter.repeat()) {
-    VirtualSpaceNode* node = iter.get_next();
-    if (node->reserved()->contains(ptr)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
 // MetaspaceGC methods
 
 // VM_CollectForMetadataAllocation is the vm operation used to GC.
@@ -1466,9 +1455,10 @@
 
   // No expansion, now see if we want to shrink
   // We would never want to shrink more than this
+  assert(capacity_until_GC >= minimum_desired_capacity,
+         err_msg(SIZE_FORMAT " >= " SIZE_FORMAT,
+                 capacity_until_GC, minimum_desired_capacity));
   size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
-  assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
-    max_shrink_bytes));
 
   // Should shrinking be considered?
   if (MaxMetaspaceFreeRatio < 100) {
@@ -2250,7 +2240,7 @@
 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
   assert_lock_strong(_lock);
   size_t raw_word_size = get_raw_word_size(word_size);
-  size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
+  size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
   assert(raw_word_size >= min_size,
          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
   block_freelists()->return_block(p, raw_word_size);
@@ -2306,7 +2296,7 @@
 void SpaceManager::retire_current_chunk() {
   if (current_chunk() != NULL) {
     size_t remaining_words = current_chunk()->free_word_size();
-    if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
+    if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
       inc_used_metrics(remaining_words);
     }
@@ -2392,9 +2382,24 @@
   return result;
 }
 
+// This function looks at the chunks in the metaspace without locking.
+// The chunks are added with store ordering and not deleted except for at
+// unloading time.
+bool SpaceManager::contains(const void *ptr) {
+  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i))
+  {
+    Metachunk* curr = chunks_in_use(i);
+    while (curr != NULL) {
+      if (curr->contains(ptr)) return true;
+      curr = curr->next();
+    }
+  }
+  return false;
+}
+
 void SpaceManager::verify() {
   // If there are blocks in the dictionary, then
-  // verfication of chunks does not work since
+  // verification of chunks does not work since
   // being in the dictionary alters a chunk.
   if (block_freelists()->total_size() == 0) {
     for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
@@ -2863,7 +2868,7 @@
     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
     // If compressed class space fits in lower 32G, we don't need a base.
     if (higher_address <= (address)klass_encoding_max) {
-      lower_base = 0; // effectively lower base is zero.
+      lower_base = 0; // Effectively lower base is zero.
     }
   }
 
@@ -3274,7 +3279,7 @@
     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
     // Don't take Heap_lock
     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
-    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
+    if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
       // Dark matter.  Too small for dictionary.
 #ifdef ASSERT
       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
@@ -3289,7 +3294,7 @@
   } else {
     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
 
-    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
+    if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
       // Dark matter.  Too small for dictionary.
 #ifdef ASSERT
       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
@@ -3463,17 +3468,12 @@
   }
 }
 
-bool Metaspace::contains(const void * ptr) {
-  if (MetaspaceShared::is_in_shared_space(ptr)) {
-    return true;
+bool Metaspace::contains(const void* ptr) {
+  if (vsm()->contains(ptr)) return true;
+  if (using_class_space()) {
+    return class_vsm()->contains(ptr);
   }
-  // This is checked while unlocked.  As long as the virtualspaces are added
-  // at the end, the pointer will be in one of them.  The virtual spaces
-  // aren't deleted presently.  When they are, some sort of locking might
-  // be needed.  Note, locking this can cause inversion problems with the
-  // caller in MetaspaceObj::is_metadata() function.
-  return space_list()->contains(ptr) ||
-         (using_class_space() && class_space_list()->contains(ptr));
+  return false;
 }
 
 void Metaspace::verify() {
--- a/src/share/vm/memory/metaspace.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/metaspace.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -120,6 +120,7 @@
   static size_t compressed_class_space_size() {
     return _compressed_class_space_size;
   }
+
   static void set_compressed_class_space_size(size_t size) {
     _compressed_class_space_size = size;
   }
@@ -225,7 +226,7 @@
   MetaWord* expand_and_allocate(size_t size,
                                 MetadataType mdtype);
 
-  static bool contains(const void *ptr);
+  bool contains(const void* ptr);
   void dump(outputStream* const out) const;
 
   // Free empty virtualspaces
--- a/src/share/vm/memory/metaspaceCounters.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/metaspaceCounters.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/metaspaceCounters.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/metaspaceCounters.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/metaspaceShared.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/metaspaceShared.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -487,7 +487,7 @@
   NOT_PRODUCT(SystemDictionary::verify();)
 
   // Copy the the symbol table, and the system dictionary to the shared
-  // space in usable form.  Copy the hastable
+  // space in usable form.  Copy the hashtable
   // buckets first [read-write], then copy the linked lists of entries
   // [read-only].
 
@@ -953,7 +953,7 @@
 
   // The following data in the shared misc data region are the linked
   // list elements (HashtableEntry objects) for the symbol table, string
-  // table, and shared dictionary.  The heap objects refered to by the
+  // table, and shared dictionary.  The heap objects referred to by the
   // symbol table, string table, and shared dictionary are permanent and
   // unmovable.  Since new entries added to the string and symbol tables
   // are always added at the beginning of the linked lists, THESE LINKED
--- a/src/share/vm/memory/modRefBarrierSet.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/modRefBarrierSet.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -60,7 +60,7 @@
   void read_ref_field(void* field) {}
   void read_prim_field(HeapWord* field, size_t bytes) {}
 protected:
-  virtual void write_ref_field_work(void* field, oop new_val) = 0;
+  virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
 public:
   void write_prim_field(HeapWord* field, size_t bytes,
                         juint val1, juint val2) {}
@@ -72,7 +72,7 @@
   bool has_read_region_opt() { return false; }
 
 
-  // These operations should assert false unless the correponding operation
+  // These operations should assert false unless the corresponding operation
   // above returns true.
   void read_ref_array(MemRegion mr) {
     assert(false, "can't call");
--- a/src/share/vm/memory/referenceProcessor.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/referenceProcessor.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -45,7 +45,7 @@
 }
 
 void ReferenceProcessor::init_statics() {
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 
@@ -62,7 +62,7 @@
   }
   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
             RefDiscoveryPolicy == ReferentBasedDiscovery,
-            "Unrecongnized RefDiscoveryPolicy");
+            "Unrecognized RefDiscoveryPolicy");
   _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
 }
 
@@ -95,12 +95,11 @@
                                        uint      mt_discovery_degree,
                                        bool      atomic_discovery,
                                        BoolObjectClosure* is_alive_non_header,
-                                       bool      discovered_list_needs_barrier)  :
+                                       bool      discovered_list_needs_post_barrier)  :
   _discovering_refs(false),
   _enqueuing_is_done(false),
   _is_alive_non_header(is_alive_non_header),
-  _discovered_list_needs_barrier(discovered_list_needs_barrier),
-  _bs(NULL),
+  _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier),
   _processing_is_mt(mt_processing),
   _next_id(0)
 {
@@ -126,10 +125,6 @@
     _discovered_refs[i].set_length(0);
   }
 
-  // If we do barriers, cache a copy of the barrier set.
-  if (discovered_list_needs_barrier) {
-    _bs = Universe::heap()->barrier_set();
-  }
   setup_policy(false /* default soft ref policy */);
 }
 
@@ -157,7 +152,7 @@
   // Update (advance) the soft ref master clock field. This must be done
   // after processing the soft ref list.
 
-  // We need a monotonically non-deccreasing time in ms but
+  // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   jlong soft_ref_clock = java_lang_ref_SoftReference::clock();
@@ -173,7 +168,7 @@
   // javaTimeNanos(), which is guaranteed to be monotonically
   // non-decreasing provided the underlying platform provides such
   // a time source (and it is bug free).
-  // In product mode, however, protect ourselves from non-monotonicty.
+  // In product mode, however, protect ourselves from non-monotonicity.
   if (now > _soft_ref_timestamp_clock) {
     _soft_ref_timestamp_clock = now;
     java_lang_ref_SoftReference::set_clock(now);
@@ -317,13 +312,9 @@
   // Enqueue references that are not made active again, and
   // clear the decks for the next collection (cycle).
   ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor);
-  // Do the oop-check on pending_list_addr missed in
-  // enqueue_discovered_reflist. We should probably
-  // do a raw oop_check so that future such idempotent
-  // oop_stores relying on the oop-check side-effect
-  // may be elided automatically and safely without
-  // affecting correctness.
-  oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
+  // Do the post-barrier on pending_list_addr missed in
+  // enqueue_discovered_reflist.
+  oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr));
 
   // Stop treating discovered references specially.
   ref->disable_discovery();
@@ -358,7 +349,7 @@
 
   oop obj = NULL;
   oop next_d = refs_list.head();
-  if (pending_list_uses_discovered_field()) { // New behaviour
+  if (pending_list_uses_discovered_field()) { // New behavior
     // Walk down the list, self-looping the next field
     // so that the References are not considered active.
     while (obj != next_d) {
@@ -372,18 +363,20 @@
       assert(java_lang_ref_Reference::next(obj) == NULL,
              "Reference not active; should not be discovered");
       // Self-loop next, so as to make Ref not active.
-      java_lang_ref_Reference::set_next(obj, obj);
+      // Post-barrier not needed when looping to self.
+      java_lang_ref_Reference::set_next_raw(obj, obj);
       if (next_d == obj) {  // obj is last
-        // Swap refs_list into pendling_list_addr and
+        // Swap refs_list into pending_list_addr and
         // set obj's discovered to what we read from pending_list_addr.
         oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
-        // Need oop_check on pending_list_addr above;
-        // see special oop-check code at the end of
+        // Need post-barrier on pending_list_addr above;
+        // see special post-barrier code at the end of
         // enqueue_discovered_reflists() further below.
-        java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL
+        java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL
+        oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old);
       }
     }
-  } else { // Old behaviour
+  } else { // Old behavior
     // Walk down the list, copying the discovered field into
     // the next field and clearing the discovered field.
     while (obj != next_d) {
@@ -397,7 +390,7 @@
       assert(java_lang_ref_Reference::next(obj) == NULL,
              "The reference should not be enqueued");
       if (next_d == obj) {  // obj is last
-        // Swap refs_list into pendling_list_addr and
+        // Swap refs_list into pending_list_addr and
         // set obj's next to what we read from pending_list_addr.
         oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
         // Need oop_check on pending_list_addr above;
@@ -497,13 +490,13 @@
   } else {
     new_next = _next;
   }
-
-  if (UseCompressedOops) {
-    // Remove Reference object from list.
-    oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
-  } else {
-    // Remove Reference object from list.
-    oopDesc::store_heap_oop((oop*)_prev_next, new_next);
+  // Remove Reference object from discovered list. Note that G1 does not need a
+  // pre-barrier here because we know the Reference has already been found/marked,
+  // that's how it ended up in the discovered list in the first place.
+  oop_store_raw(_prev_next, new_next);
+  if (_discovered_list_needs_post_barrier && _prev_next != _refs_list.adr_head()) {
+    // Needs post-barrier and this is not the list head (which is not on the heap)
+    oopDesc::bs()->write_ref_field(_prev_next, new_next);
   }
   NOT_PRODUCT(_removed++);
   _refs_list.dec_length(1);
@@ -516,13 +509,11 @@
   // the reference object and will fail
   // CT verification.
   if (UseG1GC) {
-    BarrierSet* bs = oopDesc::bs();
     HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
-
     if (UseCompressedOops) {
-      bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
+      oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL);
     } else {
-      bs->write_ref_field_pre((oop*)next_addr, NULL);
+      oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL);
     }
     java_lang_ref_Reference::set_next_raw(_ref, NULL);
   } else {
@@ -553,7 +544,7 @@
                                    OopClosure*        keep_alive,
                                    VoidClosure*       complete_gc) {
   assert(policy != NULL, "Must have a non-NULL policy");
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   // Decide which softly reachable refs should be kept alive.
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
@@ -593,7 +584,7 @@
                              BoolObjectClosure* is_alive,
                              OopClosure*        keep_alive) {
   assert(discovery_is_atomic(), "Error");
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
     DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
@@ -630,7 +621,7 @@
                                                   OopClosure*        keep_alive,
                                                   VoidClosure*       complete_gc) {
   assert(!discovery_is_atomic(), "Error");
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj());
@@ -673,7 +664,7 @@
                                    OopClosure*        keep_alive,
                                    VoidClosure*       complete_gc) {
   ResourceMark rm;
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.update_discovered();
     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
@@ -790,10 +781,9 @@
 };
 
 void ReferenceProcessor::set_discovered(oop ref, oop value) {
-  if (_discovered_list_needs_barrier) {
-    java_lang_ref_Reference::set_discovered(ref, value);
-  } else {
-    java_lang_ref_Reference::set_discovered_raw(ref, value);
+  java_lang_ref_Reference::set_discovered_raw(ref, value);
+  if (_discovered_list_needs_post_barrier) {
+    oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(ref), value);
   }
 }
 
@@ -990,7 +980,7 @@
 
 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
   assert(!discovery_is_atomic(), "Else why call this method?");
-  DiscoveredListIterator iter(refs_list, NULL, NULL);
+  DiscoveredListIterator iter(refs_list, NULL, NULL, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     oop next = java_lang_ref_Reference::next(iter.obj());
@@ -1085,8 +1075,8 @@
   // so this will expand to nothing. As a result, we have manually
   // elided this out for G1, but left in the test for some future
   // collector that might have need for a pre-barrier here, e.g.:-
-  // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
-  assert(!_discovered_list_needs_barrier || UseG1GC,
+  // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
+  assert(!_discovered_list_needs_post_barrier || UseG1GC,
          "Need to check non-G1 collector: "
          "may need a pre-write-barrier for CAS from NULL below");
   oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
@@ -1097,8 +1087,8 @@
     // is necessary.
     refs_list.set_head(obj);
     refs_list.inc_length(1);
-    if (_discovered_list_needs_barrier) {
-      _bs->write_ref_field((void*)discovered_addr, next_discovered);
+    if (_discovered_list_needs_post_barrier) {
+      oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
     }
 
     if (TraceReferenceGC) {
@@ -1250,7 +1240,7 @@
   if (_discovery_is_mt) {
     add_to_discovered_list_mt(*list, obj, discovered_addr);
   } else {
-    // If "_discovered_list_needs_barrier", we do write barriers when
+    // If "_discovered_list_needs_post_barrier", we do write barriers when
     // updating the discovered reference list.  Otherwise, we do a raw store
     // here: the field will be visited later when processing the discovered
     // references.
@@ -1260,13 +1250,13 @@
 
     // As in the case further above, since we are over-writing a NULL
     // pre-value, we can safely elide the pre-barrier here for the case of G1.
-    // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
+    // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
     assert(discovered == NULL, "control point invariant");
-    assert(!_discovered_list_needs_barrier || UseG1GC,
+    assert(!_discovered_list_needs_post_barrier || UseG1GC,
            "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
     oop_store_raw(discovered_addr, next_discovered);
-    if (_discovered_list_needs_barrier) {
-      _bs->write_ref_field((void*)discovered_addr, next_discovered);
+    if (_discovered_list_needs_post_barrier) {
+      oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered);
     }
     list->set_head(obj);
     list->inc_length(1);
@@ -1351,7 +1341,7 @@
 // whose referents are still alive, whose referents are NULL or which
 // are not active (have a non-NULL next field). NOTE: When we are
 // thus precleaning the ref lists (which happens single-threaded today),
-// we do not disable refs discovery to honour the correct semantics of
+// we do not disable refs discovery to honor the correct semantics of
 // java.lang.Reference. As a result, we need to be careful below
 // that ref removal steps interleave safely with ref discovery steps
 // (in this thread).
@@ -1361,7 +1351,7 @@
                                                 OopClosure*        keep_alive,
                                                 VoidClosure*       complete_gc,
                                                 YieldClosure*      yield) {
-  DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
+  DiscoveredListIterator iter(refs_list, keep_alive, is_alive, _discovered_list_needs_post_barrier);
   while (iter.has_next()) {
     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
     oop obj = iter.obj();
--- a/src/share/vm/memory/referenceProcessor.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/referenceProcessor.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -99,6 +99,7 @@
   oop                _referent;
   OopClosure*        _keep_alive;
   BoolObjectClosure* _is_alive;
+  bool               _discovered_list_needs_post_barrier;
 
   DEBUG_ONLY(
   oop                _first_seen; // cyclic linked list check
@@ -112,7 +113,8 @@
 public:
   inline DiscoveredListIterator(DiscoveredList&    refs_list,
                                 OopClosure*        keep_alive,
-                                BoolObjectClosure* is_alive):
+                                BoolObjectClosure* is_alive,
+                                bool               discovered_list_needs_post_barrier = false):
     _refs_list(refs_list),
     _prev_next(refs_list.adr_head()),
     _prev(NULL),
@@ -126,7 +128,8 @@
 #endif
     _next(NULL),
     _keep_alive(keep_alive),
-    _is_alive(is_alive)
+    _is_alive(is_alive),
+    _discovered_list_needs_post_barrier(discovered_list_needs_post_barrier)
 { }
 
   // End Of List.
@@ -228,14 +231,13 @@
   bool        _discovery_is_mt;         // true if reference discovery is MT.
 
   // If true, setting "next" field of a discovered refs list requires
-  // write barrier(s).  (Must be true if used in a collector in which
+  // write post barrier.  (Must be true if used in a collector in which
   // elements of a discovered list may be moved during discovery: for
   // example, a collector like Garbage-First that moves objects during a
   // long-term concurrent marking phase that does weak reference
   // discovery.)
-  bool        _discovered_list_needs_barrier;
+  bool        _discovered_list_needs_post_barrier;
 
-  BarrierSet* _bs;                      // Cached copy of BarrierSet.
   bool        _enqueuing_is_done;       // true if all weak references enqueued
   bool        _processing_is_mt;        // true during phases when
                                         // reference processing is MT.
@@ -381,8 +383,8 @@
 
  protected:
   // Set the 'discovered' field of the given reference to
-  // the given value - emitting barriers depending upon
-  // the value of _discovered_list_needs_barrier.
+  // the given value - emitting post barriers depending upon
+  // the value of _discovered_list_needs_post_barrier.
   void set_discovered(oop ref, oop value);
 
   // "Preclean" the given discovered reference list
@@ -420,32 +422,13 @@
   void update_soft_ref_master_clock();
 
  public:
-  // constructor
-  ReferenceProcessor():
-    _span((HeapWord*)NULL, (HeapWord*)NULL),
-    _discovered_refs(NULL),
-    _discoveredSoftRefs(NULL),  _discoveredWeakRefs(NULL),
-    _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL),
-    _discovering_refs(false),
-    _discovery_is_atomic(true),
-    _enqueuing_is_done(false),
-    _discovery_is_mt(false),
-    _discovered_list_needs_barrier(false),
-    _bs(NULL),
-    _is_alive_non_header(NULL),
-    _num_q(0),
-    _max_num_q(0),
-    _processing_is_mt(false),
-    _next_id(0)
-  { }
-
   // Default parameters give you a vanilla reference processor.
   ReferenceProcessor(MemRegion span,
                      bool mt_processing = false, uint mt_processing_degree = 1,
                      bool mt_discovery  = false, uint mt_discovery_degree  = 1,
                      bool atomic_discovery = true,
                      BoolObjectClosure* is_alive_non_header = NULL,
-                     bool discovered_list_needs_barrier = false);
+                     bool discovered_list_needs_post_barrier = false);
 
   // RefDiscoveryPolicy values
   enum DiscoveryPolicy {
@@ -494,7 +477,7 @@
   bool processing_is_mt() const { return _processing_is_mt; }
   void set_mt_processing(bool mt) { _processing_is_mt = mt; }
 
-  // whether all enqueuing of weak references is complete
+  // whether all enqueueing of weak references is complete
   bool enqueuing_is_done()  { return _enqueuing_is_done; }
   void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; }
 
--- a/src/share/vm/memory/resourceArea.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/resourceArea.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -196,7 +196,7 @@
 // leveraging existing data structures if we simply create a way to manage this one
 // special need for a ResourceMark. If ResourceMark simply inherited from CHeapObj
 // then existing ResourceMarks would work fine since no one use new to allocate them
-// and they would be stack allocated. This leaves open the possibilty of accidental
+// and they would be stack allocated. This leaves open the possibility of accidental
 // misuse so we simple duplicate the ResourceMark functionality here.
 
 class DeoptResourceMark: public CHeapObj<mtInternal> {
--- a/src/share/vm/memory/sharedHeap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/sharedHeap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -137,10 +137,8 @@
 }
 
 void SharedHeap::process_strong_roots(bool activate_scope,
-                                      bool is_scavenging,
                                       ScanningOption so,
                                       OopClosure* roots,
-                                      CodeBlobClosure* code_roots,
                                       KlassClosure* klass_closure) {
   StrongRootsScope srs(this, activate_scope);
 
@@ -157,13 +155,17 @@
   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
     JNIHandles::oops_do(roots);
 
-  // All threads execute this; the individual threads are task groups.
+  CodeBlobToOopClosure code_roots(roots, true);
+
   CLDToOopClosure roots_from_clds(roots);
-  CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
+  // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
+  // CLDs which are strongly reachable from the thread stacks.
+  CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
+  // All threads execute this; the individual threads are task groups.
   if (CollectedHeap::use_parallel_gc_threads()) {
-    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
+    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, &code_roots);
   } else {
-    Threads::oops_do(roots, roots_from_clds_p, code_roots);
+    Threads::oops_do(roots, roots_from_clds_p, &code_roots);
   }
 
   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
@@ -187,9 +189,9 @@
 
   if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) {
     if (so & SO_AllClasses) {
-      ClassLoaderDataGraph::oops_do(roots, klass_closure, !is_scavenging);
+      ClassLoaderDataGraph::oops_do(roots, klass_closure, /* must_claim */ false);
     } else if (so & SO_SystemClasses) {
-      ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, !is_scavenging);
+      ClassLoaderDataGraph::always_strong_oops_do(roots, klass_closure, /* must_claim */ true);
     }
   }
 
@@ -204,17 +206,18 @@
   }
 
   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
-    if (so & SO_CodeCache) {
-      assert(code_roots != NULL, "must supply closure for code cache");
+    if (so & SO_ScavengeCodeCache) {
+      assert(&code_roots != NULL, "must supply closure for code cache");
 
-      if (is_scavenging) {
-        // We only visit parts of the CodeCache when scavenging.
-        CodeCache::scavenge_root_nmethods_do(code_roots);
-      } else {
-        // CMSCollector uses this to do intermediate-strength collections.
-        // We scan the entire code cache, since CodeCache::do_unloading is not called.
-        CodeCache::blobs_do(code_roots);
-      }
+      // We only visit parts of the CodeCache when scavenging.
+      CodeCache::scavenge_root_nmethods_do(&code_roots);
+    }
+    if (so & SO_AllCodeCache) {
+      assert(&code_roots != NULL, "must supply closure for code cache");
+
+      // CMSCollector uses this to do intermediate-strength collections.
+      // We scan the entire code cache, since CodeCache::do_unloading is not called.
+      CodeCache::blobs_do(&code_roots);
     }
     // Verify that the code cache contents are not subject to
     // movement by a scavenging collection.
@@ -231,13 +234,9 @@
 };
 static AlwaysTrueClosure always_true;
 
-void SharedHeap::process_weak_roots(OopClosure* root_closure,
-                                    CodeBlobClosure* code_roots) {
+void SharedHeap::process_weak_roots(OopClosure* root_closure) {
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(&always_true, root_closure);
-
-  CodeCache::blobs_do(code_roots);
-  StringTable::oops_do(root_closure);
 }
 
 void SharedHeap::set_barrier_set(BarrierSet* bs) {
--- a/src/share/vm/memory/sharedHeap.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/sharedHeap.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -92,7 +92,7 @@
 //  0 is a "special" value in set_n_threads() which translates to
 //  setting _n_threads to 1.
 //
-//  Some code uses _n_terminiation to decide if work should be done in
+//  Some code uses _n_termination to decide if work should be done in
 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
 //  is an example of such code.  Look for variable "is_par" for other
 //  examples.
@@ -221,7 +221,8 @@
     SO_AllClasses          = 0x1,
     SO_SystemClasses       = 0x2,
     SO_Strings             = 0x4,
-    SO_CodeCache           = 0x8
+    SO_AllCodeCache        = 0x8,
+    SO_ScavengeCodeCache   = 0x10
   };
 
   FlexibleWorkGang* workers() const { return _workers; }
@@ -232,19 +233,15 @@
   // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
   // "SO_SystemClasses" to all the "system" classes and loaders;
   // "SO_Strings" applies the closure to all entries in StringTable;
-  // "SO_CodeCache" applies the closure to all elements of the CodeCache.
+  // "SO_AllCodeCache" applies the closure to all elements of the CodeCache.
+  // "SO_ScavengeCodeCache" applies the closure to elements on the scavenge root list in the CodeCache.
   void process_strong_roots(bool activate_scope,
-                            bool is_scavenging,
                             ScanningOption so,
                             OopClosure* roots,
-                            CodeBlobClosure* code_roots,
                             KlassClosure* klass_closure);
 
-  // Apply "blk" to all the weak roots of the system.  These include
-  // JNI weak roots, the code cache, system dictionary, symbol table,
-  // string table.
-  void process_weak_roots(OopClosure* root_closure,
-                          CodeBlobClosure* code_roots);
+  // Apply "root_closure" to the JNI weak roots..
+  void process_weak_roots(OopClosure* root_closure);
 
   // The functions below are helper functions that a subclass of
   // "SharedHeap" can use in the implementation of its virtual
@@ -274,4 +271,8 @@
                              size_t capacity);
 };
 
+inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) {
+  return static_cast<SharedHeap::ScanningOption>(static_cast<int>(so0) | static_cast<int>(so1));
+}
+
 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP
--- a/src/share/vm/memory/space.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/space.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -112,7 +112,7 @@
   // cards are processed. For instance, CMS must remember mutator updates
   // (i.e. dirty cards) so as to re-scan mutated objects.
   // Such work can be piggy-backed here on dirty card scanning, so as to make
-  // it slightly more efficient than doing a complete non-detructive pre-scan
+  // it slightly more efficient than doing a complete non-destructive pre-scan
   // of the card table.
   MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
   if (pCl != NULL) {
@@ -324,8 +324,8 @@
 }
 
 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
-  // Space should not advertize an increase in size
-  // until after the underlying offest table has been enlarged.
+  // Space should not advertise an increase in size
+  // until after the underlying offset table has been enlarged.
   _offsets.resize(pointer_delta(new_end, bottom()));
   Space::set_end(new_end);
 }
@@ -729,7 +729,7 @@
   object_iterate_from(bm, blk);
 }
 
-// For a continguous space object_iterate() and safe_object_iterate()
+// For a ContiguousSpace object_iterate() and safe_object_iterate()
 // are the same.
 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
   object_iterate(blk);
--- a/src/share/vm/memory/space.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/space.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
@@ -56,7 +59,7 @@
 
 // Here's the Space hierarchy:
 //
-// - Space               -- an asbtract base class describing a heap area
+// - Space               -- an abstract base class describing a heap area
 //   - CompactibleSpace  -- a space supporting compaction
 //     - CompactibleFreeListSpace -- (used for CMS generation)
 //     - ContiguousSpace -- a compactible space in which all free space
@@ -159,7 +162,7 @@
   // (that is, if the space is contiguous), then this region must contain only
   // such objects: the memregion will be from the bottom of the region to the
   // saved mark.  Otherwise, the "obj_allocated_since_save_marks" method of
-  // the space must distiguish between objects in the region allocated before
+  // the space must distinguish between objects in the region allocated before
   // and after the call to save marks.
   virtual MemRegion used_region_at_save_marks() const {
     return MemRegion(bottom(), saved_mark_word());
@@ -190,7 +193,7 @@
 
   // Returns true iff the given the space contains the
   // given address as part of an allocated object. For
-  // ceratin kinds of spaces, this might be a potentially
+  // certain kinds of spaces, this might be a potentially
   // expensive operation. To prevent performance problems
   // on account of its inadvertent use in product jvm's,
   // we restrict its use to assertion checks only.
@@ -244,13 +247,13 @@
   // Return an address indicating the extent of the iteration in the
   // event that the iteration had to return because of finding an
   // uninitialized object in the space, or if the closure "cl"
-  // signalled early termination.
+  // signaled early termination.
   virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
   virtual HeapWord* object_iterate_careful_m(MemRegion mr,
                                              ObjectClosureCareful* cl);
 
   // Create and return a new dirty card to oop closure. Can be
-  // overriden to return the appropriate type of closure
+  // overridden to return the appropriate type of closure
   // depending on the type of space in which the closure will
   // operate. ResourceArea allocated.
   virtual DirtyCardToOopClosure* new_dcto_cl(ExtendedOopClosure* cl,
@@ -474,13 +477,13 @@
   // be one, since compaction must succeed -- we go to the first space of
   // the previous generation if necessary, updating "cp"), reset compact_top
   // and then forward.  In either case, returns the new value of "compact_top".
-  // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
+  // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
   // function of the then-current compaction space, and updates "cp->threshold
   // accordingly".
   virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
                     HeapWord* compact_top);
 
-  // Return a size with adjusments as required of the space.
+  // Return a size with adjustments as required of the space.
   virtual size_t adjust_object_size_v(size_t size) const { return size; }
 
 protected:
@@ -500,7 +503,7 @@
 
   // Requires "allowed_deadspace_words > 0", that "q" is the start of a
   // free block of the given "word_len", and that "q", were it an object,
-  // would not move if forwared.  If the size allows, fill the free
+  // would not move if forwarded.  If the size allows, fill the free
   // block with an object, to prevent excessive compaction.  Returns "true"
   // iff the free region was made deadspace, and modifies
   // "allowed_deadspace_words" to reflect the number of available deadspace
--- a/src/share/vm/memory/specialized_oop_closures.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/specialized_oop_closures.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/tenuredGeneration.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/tenuredGeneration.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -135,7 +135,7 @@
                     free());
     }
   }
-  // If we had to expand to accomodate promotions from younger generations
+  // If we had to expand to accommodate promotions from younger generations
   if (!result && _capacity_at_prologue < capacity()) {
     result = true;
     if (PrintGC && Verbose) {
--- a/src/share/vm/memory/tenuredGeneration.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/tenuredGeneration.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/memory/threadLocalAllocBuffer.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/threadLocalAllocBuffer.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -34,6 +34,7 @@
 // Thread-Local Edens support
 
 // static member initialization
+size_t           ThreadLocalAllocBuffer::_max_size       = 0;
 unsigned         ThreadLocalAllocBuffer::_target_refills = 0;
 GlobalTLABStats* ThreadLocalAllocBuffer::_global_stats   = NULL;
 
@@ -45,7 +46,7 @@
 void ThreadLocalAllocBuffer::accumulate_statistics_before_gc() {
   global_stats()->initialize();
 
-  for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
+  for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
     thread->tlab().accumulate_statistics();
     thread->tlab().initialize_statistics();
   }
@@ -60,28 +61,32 @@
 }
 
 void ThreadLocalAllocBuffer::accumulate_statistics() {
-  size_t capacity = Universe::heap()->tlab_capacity(myThread()) / HeapWordSize;
-  size_t unused   = Universe::heap()->unsafe_max_tlab_alloc(myThread()) / HeapWordSize;
-  size_t used     = capacity - unused;
-
-  // Update allocation history if a reasonable amount of eden was allocated.
-  bool update_allocation_history = used > 0.5 * capacity;
+  Thread* thread = myThread();
+  size_t capacity = Universe::heap()->tlab_capacity(thread);
+  size_t used     = Universe::heap()->tlab_used(thread);
 
   _gc_waste += (unsigned)remaining();
+  size_t total_allocated = thread->allocated_bytes();
+  size_t allocated_since_last_gc = total_allocated - _allocated_before_last_gc;
+  _allocated_before_last_gc = total_allocated;
 
   if (PrintTLAB && (_number_of_refills > 0 || Verbose)) {
     print_stats("gc");
   }
 
   if (_number_of_refills > 0) {
+    // Update allocation history if a reasonable amount of eden was allocated.
+    bool update_allocation_history = used > 0.5 * capacity;
 
     if (update_allocation_history) {
       // Average the fraction of eden allocated in a tlab by this
       // thread for use in the next resize operation.
       // _gc_waste is not subtracted because it's included in
       // "used".
-      size_t allocation = _number_of_refills * desired_size();
-      double alloc_frac = allocation / (double) used;
+      // The result can be larger than 1.0 due to direct to old allocations.
+      // These allocations should ideally not be counted but since it is not possible
+      // to filter them out here we just cap the fraction to be at most 1.0.
+      double alloc_frac = MIN2(1.0, (double) allocated_since_last_gc / used);
       _allocation_fraction.sample(alloc_frac);
     }
     global_stats()->update_allocating_threads();
@@ -127,33 +132,32 @@
 }
 
 void ThreadLocalAllocBuffer::resize_all_tlabs() {
-  for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
-    thread->tlab().resize();
+  if (ResizeTLAB) {
+    for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+      thread->tlab().resize();
+    }
   }
 }
 
 void ThreadLocalAllocBuffer::resize() {
+  // Compute the next tlab size using expected allocation amount
+  assert(ResizeTLAB, "Should not call this otherwise");
+  size_t alloc = (size_t)(_allocation_fraction.average() *
+                          (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize));
+  size_t new_size = alloc / _target_refills;
 
-  if (ResizeTLAB) {
-    // Compute the next tlab size using expected allocation amount
-    size_t alloc = (size_t)(_allocation_fraction.average() *
-                            (Universe::heap()->tlab_capacity(myThread()) / HeapWordSize));
-    size_t new_size = alloc / _target_refills;
-
-    new_size = MIN2(MAX2(new_size, min_size()), max_size());
-
-    size_t aligned_new_size = align_object_size(new_size);
+  new_size = MIN2(MAX2(new_size, min_size()), max_size());
 
-    if (PrintTLAB && Verbose) {
-      gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
-                          " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n",
-                          myThread(), myThread()->osthread()->thread_id(),
-                          _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
-    }
-    set_desired_size(aligned_new_size);
+  size_t aligned_new_size = align_object_size(new_size);
 
-    set_refill_waste_limit(initial_refill_waste_limit());
+  if (PrintTLAB && Verbose) {
+    gclog_or_tty->print("TLAB new size: thread: " INTPTR_FORMAT " [id: %2d]"
+                        " refills %d  alloc: %8.6f desired_size: " SIZE_FORMAT " -> " SIZE_FORMAT "\n",
+                        myThread(), myThread()->osthread()->thread_id(),
+                        _target_refills, _allocation_fraction.average(), desired_size(), aligned_new_size);
   }
+  set_desired_size(aligned_new_size);
+  set_refill_waste_limit(initial_refill_waste_limit());
 }
 
 void ThreadLocalAllocBuffer::initialize_statistics() {
@@ -249,31 +253,13 @@
   return init_sz;
 }
 
-const size_t ThreadLocalAllocBuffer::max_size() {
-
-  // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
-  // This restriction could be removed by enabling filling with multiple arrays.
-  // If we compute that the reasonable way as
-  //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
-  // we'll overflow on the multiply, so we do the divide first.
-  // We actually lose a little by dividing first,
-  // but that just makes the TLAB  somewhat smaller than the biggest array,
-  // which is fine, since we'll be able to fill that.
-
-  size_t unaligned_max_size = typeArrayOopDesc::header_size(T_INT) +
-                              sizeof(jint) *
-                              ((juint) max_jint / (size_t) HeapWordSize);
-  return align_size_down(unaligned_max_size, MinObjAlignment);
-}
-
 void ThreadLocalAllocBuffer::print_stats(const char* tag) {
   Thread* thrd = myThread();
   size_t waste = _gc_waste + _slow_refill_waste + _fast_refill_waste;
   size_t alloc = _number_of_refills * _desired_size;
   double waste_percent = alloc == 0 ? 0.0 :
                       100.0 * waste / alloc;
-  size_t tlab_used  = Universe::heap()->tlab_capacity(thrd) -
-                      Universe::heap()->unsafe_max_tlab_alloc(thrd);
+  size_t tlab_used  = Universe::heap()->tlab_used(thrd);
   gclog_or_tty->print("TLAB: %s thread: " INTPTR_FORMAT " [id: %2d]"
                       " desired_size: " SIZE_FORMAT "KB"
                       " slow allocs: %d  refill waste: " SIZE_FORMAT "B"
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -45,7 +45,9 @@
   HeapWord* _end;                                // allocation end (excluding alignment_reserve)
   size_t    _desired_size;                       // desired size   (including alignment_reserve)
   size_t    _refill_waste_limit;                 // hold onto tlab if free() is larger than this
+  size_t    _allocated_before_last_gc;           // total bytes allocated up until the last gc
 
+  static size_t   _max_size;                     // maximum size of any TLAB
   static unsigned _target_refills;               // expected number of refills between GCs
 
   unsigned  _number_of_refills;
@@ -99,12 +101,13 @@
   static GlobalTLABStats* global_stats() { return _global_stats; }
 
 public:
-  ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight) {
+  ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0) {
     // do nothing.  tlabs must be inited by initialize() calls
   }
 
   static const size_t min_size()                 { return align_object_size(MinTLABSize / HeapWordSize); }
-  static const size_t max_size();
+  static const size_t max_size()                 { assert(_max_size != 0, "max_size not set up"); return _max_size; }
+  static void set_max_size(size_t max_size)      { _max_size = max_size; }
 
   HeapWord* start() const                        { return _start; }
   HeapWord* end() const                          { return _end; }
--- a/src/share/vm/memory/universe.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/memory/universe.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -759,7 +759,7 @@
       // the correct no-access prefix.
       // The final value will be set in initialize_heap() below.
       Universe::set_narrow_oop_base((address)UnscaledOopHeapMax);
-#ifdef _WIN64
+#if defined(_WIN64) || defined(AIX)
       if (UseLargePages) {
         // Cannot allocate guard pages for implicit checks in indexed
         // addressing mode when large pages are specified on windows.
@@ -816,6 +816,8 @@
     Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
   }
 
+  ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
+
   jint status = Universe::heap()->initialize();
   if (status != JNI_OK) {
     return status;
@@ -839,6 +841,11 @@
       // Can't reserve heap below 32Gb.
       // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
+#ifdef AIX
+      // There is no protected page before the heap. This assures all oops
+      // are decoded so that NULL is preserved, so this page will not be accessed.
+      Universe::set_narrow_oop_use_implicit_null_checks(false);
+#endif
       if (verbose) {
         tty->print(", %s: "PTR_FORMAT,
             narrow_oop_mode_to_string(HeapBasedNarrowOop),
@@ -1136,7 +1143,7 @@
       SystemDictionary::ProtectionDomain_klass(), m);;
   }
 
-  // The folowing is initializing converter functions for serialization in
+  // The following is initializing converter functions for serialization in
   // JVM.cpp. If we clean up the StrictMath code above we may want to find
   // a better solution for this as well.
   initialize_converter_functions();
@@ -1178,7 +1185,7 @@
   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
 
   // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped dring the safepoint so CodeCache will be safe to update without
+  // stopped during the safepoint so CodeCache will be safe to update without
   // holding the CodeCache_lock.
 
   KlassDepChange changes(dependee);
@@ -1199,7 +1206,7 @@
   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
 
   // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped dring the safepoint so CodeCache will be safe to update without
+  // stopped during the safepoint so CodeCache will be safe to update without
   // holding the CodeCache_lock.
 
   CallSiteDepChange changes(call_site(), method_handle());
@@ -1230,7 +1237,7 @@
   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
 
   // CodeCache can only be updated by a thread_in_VM and they will all be
-  // stopped dring the safepoint so CodeCache will be safe to update without
+  // stopped during the safepoint so CodeCache will be safe to update without
   // holding the CodeCache_lock.
 
   // Compute the dependent nmethods
--- a/src/share/vm/oops/arrayKlass.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/arrayKlass.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -214,8 +214,8 @@
 
 // Verification
 
-void ArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
-  Klass::verify_on(st, check_dictionary);
+void ArrayKlass::verify_on(outputStream* st) {
+  Klass::verify_on(st);
 
   if (component_mirror() != NULL) {
     guarantee(component_mirror()->klass() != NULL, "should have a class");
--- a/src/share/vm/oops/arrayKlass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/arrayKlass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -146,7 +146,7 @@
   void oop_print_on(oop obj, outputStream* st);
 
   // Verification
-  void verify_on(outputStream* st, bool check_dictionary);
+  void verify_on(outputStream* st);
 
   void oop_verify_on(oop obj, outputStream* st);
 };
--- a/src/share/vm/oops/arrayOop.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/arrayOop.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/compiledICHolder.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/compiledICHolder.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/constantPool.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/constantPool.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -82,6 +82,9 @@
 void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
   MetadataFactory::free_metadata(loader_data, cache());
   set_cache(NULL);
+  MetadataFactory::free_array<u2>(loader_data, reference_map());
+  set_reference_map(NULL);
+
   MetadataFactory::free_array<jushort>(loader_data, operands());
   set_operands(NULL);
 
@@ -1874,7 +1877,6 @@
 // Printing
 
 void ConstantPool::print_on(outputStream* st) const {
-  EXCEPTION_MARK;
   assert(is_constantPool(), "must be constantPool");
   st->print_cr(internal_name());
   if (flags() != 0) {
--- a/src/share/vm/oops/cpCache.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/cpCache.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -363,7 +363,7 @@
   // Decode the action of set_method and set_interface_call
   Bytecodes::Code invoke_code = bytecode_1();
   if (invoke_code != (Bytecodes::Code)0) {
-    Metadata* f1 = (Metadata*)_f1;
+    Metadata* f1 = f1_ord();
     if (f1 != NULL) {
       switch (invoke_code) {
       case Bytecodes::_invokeinterface:
--- a/src/share/vm/oops/cpCache.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/cpCache.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -138,7 +138,7 @@
 
   void set_bytecode_1(Bytecodes::Code code);
   void set_bytecode_2(Bytecodes::Code code);
-  void set_f1(Metadata* f1)                            {
+  void set_f1(Metadata* f1) {
     Metadata* existing_f1 = (Metadata*)_f1; // read once
     assert(existing_f1 == NULL || existing_f1 == f1, "illegal field change");
     _f1 = f1;
@@ -325,14 +325,21 @@
 
   // Accessors
   int indices() const                            { return _indices; }
+  int indices_ord() const                        { return (intx)OrderAccess::load_ptr_acquire(&_indices); }
   int constant_pool_index() const                { return (indices() & cp_index_mask); }
-  Bytecodes::Code bytecode_1() const             { return Bytecodes::cast((indices() >> bytecode_1_shift) & bytecode_1_mask); }
-  Bytecodes::Code bytecode_2() const             { return Bytecodes::cast((indices() >> bytecode_2_shift) & bytecode_2_mask); }
-  Method* f1_as_method() const                   { Metadata* f1 = (Metadata*)_f1; assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; }
-  Klass*    f1_as_klass() const                  { Metadata* f1 = (Metadata*)_f1; assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; }
-  bool      is_f1_null() const                   { Metadata* f1 = (Metadata*)_f1; return f1 == NULL; }  // classifies a CPC entry as unbound
+  Bytecodes::Code bytecode_1() const             { return Bytecodes::cast((indices_ord() >> bytecode_1_shift) & bytecode_1_mask); }
+  Bytecodes::Code bytecode_2() const             { return Bytecodes::cast((indices_ord() >> bytecode_2_shift) & bytecode_2_mask); }
+  Metadata* f1_ord() const                       { return (Metadata *)OrderAccess::load_ptr_acquire(&_f1); }
+  Method*   f1_as_method() const                 { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_method(), ""); return (Method*)f1; }
+  Klass*    f1_as_klass() const                  { Metadata* f1 = f1_ord(); assert(f1 == NULL || f1->is_klass(), ""); return (Klass*)f1; }
+  // Use the accessor f1() to acquire _f1's value. This is needed for
+  // example in BytecodeInterpreter::run(), where is_f1_null() is
+  // called to check if an invokedynamic call is resolved. This load
+  // of _f1 must be ordered with the loads performed by
+  // cache->main_entry_index().
+  bool      is_f1_null() const                   { Metadata* f1 = f1_ord(); return f1 == NULL; }  // classifies a CPC entry as unbound
   int       f2_as_index() const                  { assert(!is_vfinal(), ""); return (int) _f2; }
-  Method* f2_as_vfinal_method() const            { assert(is_vfinal(), ""); return (Method*)_f2; }
+  Method*   f2_as_vfinal_method() const          { assert(is_vfinal(), ""); return (Method*)_f2; }
   int  field_index() const                       { assert(is_field_entry(),  ""); return (_flags & field_index_mask); }
   int  parameter_size() const                    { assert(is_method_entry(), ""); return (_flags & parameter_size_mask); }
   bool is_volatile() const                       { return (_flags & (1 << is_volatile_shift))       != 0; }
--- a/src/share/vm/oops/fieldInfo.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/fieldInfo.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/generateOopMap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/generateOopMap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1863,11 +1863,8 @@
   constantTag tag = cp->tag_at(ldc.pool_index()); // idx is index in resolved_references
   BasicType       bt  = ldc.result_type();
   CellTypeState   cts;
-  if (tag.is_klass() ||
-      tag.is_unresolved_klass() ||
-      tag.is_string() ||
-      tag.is_method_handle() ||
-      tag.is_method_type()) {
+  if (tag.basic_type() == T_OBJECT) {
+    assert(!tag.is_string_index() && !tag.is_klass_index(), "Unexpected index tag");
     assert(bt == T_OBJECT, "Guard is incorrect");
     cts = CellTypeState::make_line_ref(bci);
   } else {
--- a/src/share/vm/oops/instanceClassLoaderKlass.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/instanceClassLoaderKlass.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/instanceClassLoaderKlass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/instanceClassLoaderKlass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/instanceKlass.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/instanceKlass.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "prims/jvmtiRedefineClasses.hpp"
+#include "prims/jvmtiThreadState.hpp"
 #include "prims/methodComparator.hpp"
 #include "runtime/fieldDescriptor.hpp"
 #include "runtime/handles.inline.hpp"
@@ -77,51 +78,6 @@
 
 #ifdef DTRACE_ENABLED
 
-#ifndef USDT2
-
-HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
-  char*, intptr_t, oop, intptr_t);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
-  char*, intptr_t, oop, intptr_t, int);
-
-#define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)          \
-  {                                                              \
-    char* data = NULL;                                           \
-    int len = 0;                                                 \
-    Symbol* name = (clss)->name();                               \
-    if (name != NULL) {                                          \
-      data = (char*)name->bytes();                               \
-      len = name->utf8_length();                                 \
-    }                                                            \
-    HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
-      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type);           \
-  }
-
-#define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
-  {                                                              \
-    char* data = NULL;                                           \
-    int len = 0;                                                 \
-    Symbol* name = (clss)->name();                               \
-    if (name != NULL) {                                          \
-      data = (char*)name->bytes();                               \
-      len = name->utf8_length();                                 \
-    }                                                            \
-    HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
-      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type, wait);     \
-  }
-#else /* USDT2 */
 
 #define HOTSPOT_CLASS_INITIALIZATION_required HOTSPOT_CLASS_INITIALIZATION_REQUIRED
 #define HOTSPOT_CLASS_INITIALIZATION_recursive HOTSPOT_CLASS_INITIALIZATION_RECURSIVE
@@ -156,7 +112,6 @@
     HOTSPOT_CLASS_INITIALIZATION_##type(                         \
       data, len, (clss)->class_loader(), thread_type, wait);     \
   }
-#endif /* USDT2 */
 
 #else //  ndef DTRACE_ENABLED
 
@@ -908,10 +863,16 @@
     // Step 10 and 11
     Handle e(THREAD, PENDING_EXCEPTION);
     CLEAR_PENDING_EXCEPTION;
+    // JVMTI has already reported the pending exception
+    // JVMTI internal flag reset is needed in order to report ExceptionInInitializerError
+    JvmtiExport::clear_detected_exception((JavaThread*)THREAD);
     {
       EXCEPTION_MARK;
       this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
       CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, class initialization error is thrown below
+      // JVMTI has already reported the pending exception
+      // JVMTI internal flag reset is needed in order to report ExceptionInInitializerError
+      JvmtiExport::clear_detected_exception((JavaThread*)THREAD);
     }
     DTRACE_CLASSINIT_PROBE_WAIT(error, InstanceKlass::cast(this_oop()), -1,wait);
     if (e->is_a(SystemDictionary::Error_klass())) {
@@ -1203,7 +1164,11 @@
     MutexLocker x(OopMapCacheAlloc_lock);
     // First time use. Allocate a cache in C heap
     if (_oop_map_cache == NULL) {
-      _oop_map_cache = new OopMapCache();
+      // Release stores from OopMapCache constructor before assignment
+      // to _oop_map_cache. C++ compilers on ppc do not emit the
+      // required memory barrier only because of the volatile
+      // qualifier of _oop_map_cache.
+      OrderAccess::release_store_ptr(&_oop_map_cache, new OopMapCache());
     }
   }
   // _oop_map_cache is constant after init; lookup below does is own locking.
@@ -1498,13 +1463,18 @@
   return -1;
 }
 
-// lookup_method searches both the local methods array and all superclasses methods arrays
+// uncached_lookup_method searches both the local class methods array and all
+// superclasses methods arrays, skipping any overpass methods in superclasses.
 Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
   Klass* klass = const_cast<InstanceKlass*>(this);
+  bool dont_ignore_overpasses = true;  // For the class being searched, find its overpasses.
   while (klass != NULL) {
     Method* method = InstanceKlass::cast(klass)->find_method(name, signature);
-    if (method != NULL) return method;
+    if ((method != NULL) && (dont_ignore_overpasses || !method->is_overpass())) {
+      return method;
+    }
     klass = InstanceKlass::cast(klass)->super();
+    dont_ignore_overpasses = false;  // Ignore overpass methods in all superclasses.
   }
   return NULL;
 }
@@ -1519,7 +1489,7 @@
   }
   // Look up interfaces
   if (m == NULL) {
-    m = lookup_method_in_all_interfaces(name, signature);
+    m = lookup_method_in_all_interfaces(name, signature, false);
   }
   return m;
 }
@@ -1528,14 +1498,16 @@
 // Do NOT return private or static methods, new in JDK8 which are not externally visible
 // They should only be found in the initial InterfaceMethodRef
 Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
-                                                         Symbol* signature) const {
+                                                       Symbol* signature,
+                                                       bool skip_default_methods) const {
   Array<Klass*>* all_ifs = transitive_interfaces();
   int num_ifs = all_ifs->length();
   InstanceKlass *ik = NULL;
   for (int i = 0; i < num_ifs; i++) {
     ik = InstanceKlass::cast(all_ifs->at(i));
     Method* m = ik->lookup_method(name, signature);
-    if (m != NULL && m->is_public() && !m->is_static()) {
+    if (m != NULL && m->is_public() && !m->is_static() &&
+        (!skip_default_methods || !m->is_default_method())) {
       return m;
     }
   }
@@ -2227,15 +2199,7 @@
   for (int m = 0; m < methods()->length(); m++) {
     MethodData* mdo = methods()->at(m)->method_data();
     if (mdo != NULL) {
-      for (ProfileData* data = mdo->first_data();
-           mdo->is_valid(data);
-           data = mdo->next_data(data)) {
-        data->clean_weak_klass_links(is_alive);
-      }
-      ParametersTypeData* parameters = mdo->parameters_type_data();
-      if (parameters != NULL) {
-        parameters->clean_weak_klass_links(is_alive);
-      }
+      mdo->clean_method_data(is_alive);
     }
   }
 }
@@ -2754,7 +2718,7 @@
   Method* m = n->method();
   // Search for match
   while(cur != NULL && cur != n) {
-    if (TieredCompilation) {
+    if (TieredCompilation && m == cur->method()) {
       // Find max level before n
       max_level = MAX2(max_level, cur->comp_level());
     }
@@ -2776,7 +2740,9 @@
     cur = next;
     while (cur != NULL) {
       // Find max level after n
-      max_level = MAX2(max_level, cur->comp_level());
+      if (m == cur->method()) {
+        max_level = MAX2(max_level, cur->comp_level());
+      }
       cur = cur->osr_link();
     }
     m->set_highest_osr_comp_level(max_level);
@@ -3022,8 +2988,7 @@
         offset          <= (juint) value->length() &&
         offset + length <= (juint) value->length()) {
       st->print(BULLET"string: ");
-      Handle h_obj(obj);
-      java_lang_String::print(h_obj, st);
+      java_lang_String::print(obj, st);
       st->cr();
       if (!WizardMode)  return;  // that is enough
     }
@@ -3174,7 +3139,7 @@
   virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
 };
 
-void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
+void InstanceKlass::verify_on(outputStream* st) {
 #ifndef PRODUCT
   // Avoid redundant verifies, this really should be in product.
   if (_verify_count == Universe::verify_count()) return;
@@ -3182,14 +3147,11 @@
 #endif
 
   // Verify Klass
-  Klass::verify_on(st, check_dictionary);
-
-  // Verify that klass is present in SystemDictionary if not already
-  // verifying the SystemDictionary.
-  if (is_loaded() && !is_anonymous() && check_dictionary) {
-    Symbol* h_name = name();
-    SystemDictionary::verify_obj_klass_present(h_name, class_loader_data());
-  }
+  Klass::verify_on(st);
+
+  // Verify that klass is present in ClassLoaderData
+  guarantee(class_loader_data()->contains_klass(this),
+            "this class isn't found in class loader data");
 
   // Verify vtables
   if (is_linked()) {
--- a/src/share/vm/oops/instanceKlass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/instanceKlass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -306,7 +306,7 @@
   //   three cases:
   //     NULL: no implementor.
   //     A Klass* that's not itself: one implementor.
-  //     Itsef: more than one implementors.
+  //     Itself: more than one implementors.
   // embedded host klass follows here
   //   The embedded host klass only exists in an anonymous class for
   //   dynamic language support (JSR 292 enabled). The host class grants
@@ -525,7 +525,8 @@
 
   // lookup a method in all the interfaces that this class implements
   // (returns NULL if not found)
-  Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature) const;
+  Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature, bool skip_default_methods) const;
+
   // lookup a method in local defaults then in all interfaces
   // (returns NULL if not found)
   Method* lookup_method_in_ordered_interfaces(Symbol* name, Symbol* signature) const;
@@ -553,6 +554,7 @@
     if (hk == NULL) {
       return NULL;
     } else {
+      assert(*hk != NULL, "host klass should always be set if the address is not null");
       return *hk;
     }
   }
@@ -1085,7 +1087,7 @@
   const char* internal_name() const;
 
   // Verification
-  void verify_on(outputStream* st, bool check_dictionary);
+  void verify_on(outputStream* st);
 
   void oop_verify_on(oop obj, outputStream* st);
 };
--- a/src/share/vm/oops/instanceMirrorKlass.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/instanceMirrorKlass.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/instanceOop.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/instanceOop.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/instanceRefKlass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/instanceRefKlass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/klass.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/klass.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -376,8 +376,6 @@
 }
 
 bool Klass::is_loader_alive(BoolObjectClosure* is_alive) {
-  assert(ClassLoaderDataGraph::contains((address)this), "is in the metaspace");
-
 #ifdef ASSERT
   // The class is alive iff the class loader is alive.
   oop loader = class_loader();
@@ -640,7 +638,7 @@
 
 // Verification
 
-void Klass::verify_on(outputStream* st, bool check_dictionary) {
+void Klass::verify_on(outputStream* st) {
 
   // This can be expensive, but it is worth checking that this klass is actually
   // in the CLD graph but not in production.
@@ -694,3 +692,21 @@
 }
 
 #endif
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestKlass {
+ public:
+  static void test_oop_is_instanceClassLoader() {
+    assert(SystemDictionary::ClassLoader_klass()->oop_is_instanceClassLoader(), "assert");
+    assert(!SystemDictionary::String_klass()->oop_is_instanceClassLoader(), "assert");
+  }
+};
+
+void TestKlass_test() {
+  TestKlass::test_oop_is_instanceClassLoader();
+}
+
+#endif
--- a/src/share/vm/oops/klass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/klass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -502,6 +502,7 @@
   virtual bool oop_is_objArray_slow()       const { return false; }
   virtual bool oop_is_typeArray_slow()      const { return false; }
  public:
+  virtual bool oop_is_instanceClassLoader() const { return false; }
   virtual bool oop_is_instanceMirror()      const { return false; }
   virtual bool oop_is_instanceRef()         const { return false; }
 
@@ -699,8 +700,8 @@
   virtual const char* internal_name() const = 0;
 
   // Verification
-  virtual void verify_on(outputStream* st, bool check_dictionary);
-  void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
+  virtual void verify_on(outputStream* st);
+  void verify() { verify_on(tty); }
 
 #ifndef PRODUCT
   bool verify_vtable_index(int index);
--- a/src/share/vm/oops/klassPS.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/klassPS.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/klassVtable.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/klassVtable.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -622,7 +622,7 @@
   // this check for all access permissions.
   InstanceKlass *sk = InstanceKlass::cast(super);
   if (sk->has_miranda_methods()) {
-    if (sk->lookup_method_in_all_interfaces(name, signature) != NULL) {
+    if (sk->lookup_method_in_all_interfaces(name, signature, false) != NULL) {
       return false;  // found a matching miranda; we do not need a new entry
     }
   }
@@ -743,7 +743,7 @@
       if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all?
         InstanceKlass *sk = InstanceKlass::cast(super);
         // check if it is a duplicate of a super's miranda
-        if (sk->lookup_method_in_all_interfaces(im->name(), im->signature()) == NULL) {
+        if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), false) == NULL) {
           new_mirandas->append(im);
         }
         if (all_mirandas != NULL) {
@@ -1085,6 +1085,8 @@
     Method* m = methods->at(i);
     methodHandle target;
     if (m->has_itable_index()) {
+      // This search must match the runtime resolution, i.e. selection search for invokeinterface
+      // to correctly enforce loader constraints for interface method inheritance
       LinkResolver::lookup_instance_method_in_klasses(target, _klass, m->name(), m->signature(), CHECK);
     }
     if (target == NULL || !target->is_public() || target->is_abstract()) {
--- a/src/share/vm/oops/metadata.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/metadata.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
   int identity_hash()                { return (int)(uintptr_t)this; }
 
   // Rehashing support for tables containing pointers to this
-  unsigned int new_hash(jint seed)   { ShouldNotReachHere();  return 0; }
+  unsigned int new_hash(juint seed)   { ShouldNotReachHere();  return 0; }
 
   virtual bool is_klass()              const volatile { return false; }
   virtual bool is_method()             const volatile { return false; }
--- a/src/share/vm/oops/method.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/method.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -38,13 +38,11 @@
 #include "utilities/accessFlags.hpp"
 #include "utilities/growableArray.hpp"
 
-// A Method* represents a Java method.
+// A Method represents a Java method.
 //
 // Memory layout (each line represents a word). Note that most applications load thousands of methods,
 // so keeping the size of this structure small has a big impact on footprint.
 //
-// We put all oops and method_size first for better gc cache locality.
-//
 // The actual bytecodes are inlined after the end of the Method struct.
 //
 // There are bits in the access_flags telling whether inlined tables are present.
@@ -64,17 +62,17 @@
 // | header                                               |
 // | klass                                                |
 // |------------------------------------------------------|
-// | ConstMethod*                   (oop)                 |
+// | ConstMethod*                   (metadata)            |
 // |------------------------------------------------------|
-// | methodData                     (oop)                 |
-// | methodCounters                                       |
+// | MethodData*                    (metadata)            |
+// | MethodCounters                                       |
 // |------------------------------------------------------|
 // | access_flags                                         |
 // | vtable_index                                         |
 // |------------------------------------------------------|
 // | result_index (C++ interpreter only)                  |
 // |------------------------------------------------------|
-// | method_size             |   intrinsic_id|   flags    |
+// | method_size             | intrinsic_id  |   flags    |
 // |------------------------------------------------------|
 // | code                           (pointer)             |
 // | i2i                            (pointer)             |
@@ -350,16 +348,21 @@
   }
 
   void set_method_data(MethodData* data)       {
-    _method_data = data;
+    // The store into method must be released. On platforms without
+    // total store order (TSO) the reference may become visible before
+    // the initialization of data otherwise.
+    OrderAccess::release_store_ptr((volatile void *)&_method_data, data);
   }
 
   MethodCounters* method_counters() const {
     return _method_counters;
   }
 
-
   void set_method_counters(MethodCounters* counters) {
-    _method_counters = counters;
+    // The store into method must be released. On platforms without
+    // total store order (TSO) the reference may become visible before
+    // the initialization of data otherwise.
+    OrderAccess::release_store_ptr((volatile void *)&_method_counters, counters);
   }
 
 #ifdef TIERED
--- a/src/share/vm/oops/methodData.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/methodData.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -80,8 +80,42 @@
   _data = NULL;
 }
 
+char* ProfileData::print_data_on_helper(const MethodData* md) const {
+  DataLayout* dp  = md->extra_data_base();
+  DataLayout* end = md->extra_data_limit();
+  stringStream ss;
+  for (;; dp = MethodData::next_extra(dp)) {
+    assert(dp < end, "moved past end of extra data");
+    switch(dp->tag()) {
+    case DataLayout::speculative_trap_data_tag:
+      if (dp->bci() == bci()) {
+        SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+        int trap = data->trap_state();
+        char buf[100];
+        ss.print("trap/");
+        data->method()->print_short_name(&ss);
+        ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
+      }
+      break;
+    case DataLayout::bit_data_tag:
+      break;
+    case DataLayout::no_tag:
+    case DataLayout::arg_info_data_tag:
+      return ss.as_string();
+      break;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
+    }
+  }
+  return NULL;
+}
+
+void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
+  print_data_on(st, print_data_on_helper(md));
+}
+
 #ifndef PRODUCT
-void ProfileData::print_shared(outputStream* st, const char* name) const {
+void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
   st->print("bci: %d", bci());
   st->fill_to(tab_width_one);
   st->print("%s", name);
@@ -91,9 +125,13 @@
     char buf[100];
     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
   }
+  if (extra != NULL) {
+    st->print(extra);
+  }
   int flags = data()->flags();
-  if (flags != 0)
+  if (flags != 0) {
     st->print("flags(%d) ", flags);
+  }
 }
 
 void ProfileData::tab(outputStream* st, bool first) const {
@@ -109,8 +147,8 @@
 
 
 #ifndef PRODUCT
-void BitData::print_data_on(outputStream* st) const {
-  print_shared(st, "BitData");
+void BitData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "BitData", extra);
 }
 #endif // !PRODUCT
 
@@ -120,8 +158,8 @@
 // A CounterData corresponds to a simple counter.
 
 #ifndef PRODUCT
-void CounterData::print_data_on(outputStream* st) const {
-  print_shared(st, "CounterData");
+void CounterData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "CounterData", extra);
   st->print_cr("count(%u)", count());
 }
 #endif // !PRODUCT
@@ -150,8 +188,8 @@
 }
 
 #ifndef PRODUCT
-void JumpData::print_data_on(outputStream* st) const {
-  print_shared(st, "JumpData");
+void JumpData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "JumpData", extra);
   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 }
 #endif // !PRODUCT
@@ -332,8 +370,8 @@
   st->cr();
 }
 
-void CallTypeData::print_data_on(outputStream* st) const {
-  CounterData::print_data_on(st);
+void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
+  CounterData::print_data_on(st, extra);
   if (has_arguments()) {
     tab(st, true);
     st->print("argument types");
@@ -346,8 +384,8 @@
   }
 }
 
-void VirtualCallTypeData::print_data_on(outputStream* st) const {
-  VirtualCallData::print_data_on(st);
+void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
+  VirtualCallData::print_data_on(st, extra);
   if (has_arguments()) {
     tab(st, true);
     st->print("argument types");
@@ -416,8 +454,8 @@
     }
   }
 }
-void ReceiverTypeData::print_data_on(outputStream* st) const {
-  print_shared(st, "ReceiverTypeData");
+void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ReceiverTypeData", extra);
   print_receiver_data_on(st);
 }
 
@@ -446,8 +484,8 @@
 }
 #endif
 
-void VirtualCallData::print_data_on(outputStream* st) const {
-  print_shared(st, "VirtualCallData");
+void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "VirtualCallData", extra);
   print_receiver_data_on(st);
 #ifdef GRAAL
   print_method_data_on(st);
@@ -499,10 +537,15 @@
   return mdp;
 }
 
+#ifdef CC_INTERP
+DataLayout* RetData::advance(MethodData *md, int bci) {
+  return (DataLayout*) md->bci_to_dp(bci);
+}
+#endif // CC_INTERP
 
 #ifndef PRODUCT
-void RetData::print_data_on(outputStream* st) const {
-  print_shared(st, "RetData");
+void RetData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "RetData", extra);
   uint row;
   int entries = 0;
   for (row = 0; row < row_limit(); row++) {
@@ -536,8 +579,8 @@
 }
 
 #ifndef PRODUCT
-void BranchData::print_data_on(outputStream* st) const {
-  print_shared(st, "BranchData");
+void BranchData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "BranchData", extra);
   st->print_cr("taken(%u) displacement(%d)",
                taken(), displacement());
   tab(st);
@@ -610,8 +653,8 @@
 }
 
 #ifndef PRODUCT
-void MultiBranchData::print_data_on(outputStream* st) const {
-  print_shared(st, "MultiBranchData");
+void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "MultiBranchData", extra);
   st->print_cr("default_count(%u) displacement(%d)",
                default_count(), default_displacement());
   int cases = number_of_cases();
@@ -624,8 +667,8 @@
 #endif
 
 #ifndef PRODUCT
-void ArgInfoData::print_data_on(outputStream* st) const {
-  print_shared(st, "ArgInfoData");
+void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ArgInfoData", extra);
   int nargs = number_of_args();
   for (int i = 0; i < nargs; i++) {
     st->print("  0x%x", arg_modified(i));
@@ -656,10 +699,17 @@
 }
 
 #ifndef PRODUCT
-void ParametersTypeData::print_data_on(outputStream* st) const {
-  st->print("parameter types");
+void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
+  st->print("parameter types", extra);
   _parameters.print_data_on(st);
 }
+
+void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "SpeculativeTrapData", extra);
+  tab(st);
+  method()->print_short_name(st);
+  st->cr();
+}
 #endif
 
 // ==================================================================
@@ -785,15 +835,35 @@
   return DataLayout::compute_size_in_bytes(cell_count);
 }
 
+bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
+  // Bytecodes for which we may use speculation
+  switch (code) {
+  case Bytecodes::_checkcast:
+  case Bytecodes::_instanceof:
+  case Bytecodes::_aastore:
+  case Bytecodes::_invokevirtual:
+  case Bytecodes::_invokeinterface:
+  case Bytecodes::_if_acmpeq:
+  case Bytecodes::_if_acmpne:
+  case Bytecodes::_invokestatic:
+#ifdef COMPILER2
+    return UseTypeSpeculation;
+#endif
+  default:
+    return false;
+  }
+  return false;
+}
+
 #ifdef GRAAL
-int MethodData::compute_extra_data_count(int data_size, int empty_bc_count) {
+int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
   if (!ProfileTraps) return 0;
 
   // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
   return MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
 }
 #else
-int MethodData::compute_extra_data_count(int data_size, int empty_bc_count) {
+int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
   if (ProfileTraps) {
     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
@@ -804,7 +874,18 @@
       extra_data_count = one_percent_of_data;
     if (extra_data_count > empty_bc_count)
       extra_data_count = empty_bc_count;  // no need for more
-    return extra_data_count;
+
+    // Make sure we have a minimum number of extra data slots to
+    // allocate SpeculativeTrapData entries. We would want to have one
+    // entry per compilation that inlines this method and for which
+    // some type speculation assumption fails. So the room we need for
+    // the SpeculativeTrapData entries doesn't directly depend on the
+    // size of the method. Because it's hard to estimate, we reserve
+    // space for an arbitrary number of entries.
+    int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
+      (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
+
+    return MAX2(extra_data_count, spec_data_count);
   } else {
     return 0;
   }
@@ -818,15 +899,17 @@
   BytecodeStream stream(method);
   Bytecodes::Code c;
   int empty_bc_count = 0;  // number of bytecodes lacking data
+  bool needs_speculative_traps = false;
   while ((c = stream.next()) >= 0) {
     int size_in_bytes = compute_data_size(&stream);
     data_size += size_in_bytes;
     if (size_in_bytes == 0 GRAAL_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
+    needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
   }
   int object_size = in_bytes(data_offset()) + data_size;
 
   // Add some extra DataLayout cells (at least one) to track stray traps.
-  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count);
+  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
 
   // Add a cell to record information about modified arguments.
@@ -1062,24 +1145,28 @@
   _data[0] = 0;  // apparently not set below.
   BytecodeStream stream(method());
   Bytecodes::Code c;
+  bool needs_speculative_traps = false;
   while ((c = stream.next()) >= 0) {
     int size_in_bytes = initialize_data(&stream, data_size);
     data_size += size_in_bytes;
     if (size_in_bytes == 0 GRAAL_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
+    needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
   }
   _data_size = data_size;
   int object_size = in_bytes(data_offset()) + data_size;
 
   // Add some extra DataLayout cells (at least one) to track stray traps.
-  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count);
+  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
 
-#ifdef GRAAL
-  if (for_reprofile) {
-    // Clear out extra data
-    Copy::zero_to_bytes((HeapWord*) extra_data_base(), extra_size);
-  }
-#endif
+//#ifdef GRAAL
+//  if (for_reprofile) {
+//    // Clear out extra data
+//    Copy::zero_to_bytes((HeapWord*) extra_data_base(), extra_size);
+//  }
+//#endif
+  // Let's zero the space for the extra data
+  Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
 
   // Add a cell to record information about modified arguments.
   // Set up _args_modified array after traps cells so that
@@ -1092,17 +1179,17 @@
   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
   object_size += extra_size + arg_data_size;
 
-  int args_cell = ParametersTypeData::compute_cell_count(method());
+  int parms_cell = ParametersTypeData::compute_cell_count(method());
   // If we are profiling parameters, we reserver an area near the end
   // of the MDO after the slots for bytecodes (because there's no bci
   // for method entry so they don't fit with the framework for the
   // profiling of bytecodes). We store the offset within the MDO of
   // this area (or -1 if no parameter is profiled)
-  if (args_cell > 0) {
-    object_size += DataLayout::compute_size_in_bytes(args_cell);
+  if (parms_cell > 0) {
+    object_size += DataLayout::compute_size_in_bytes(parms_cell);
     _parameters_type_data_di = data_size + extra_size + arg_data_size;
     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
-    dp->initialize(DataLayout::parameters_type_data_tag, 0, args_cell);
+    dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
   } else {
     _parameters_type_data_di = -1;
   }
@@ -1194,39 +1281,113 @@
       break;
     }
   }
-  return bci_to_extra_data(bci, false);
+  return bci_to_extra_data(bci, NULL, false);
 }
 
-// Translate a bci to its corresponding extra data, or NULL.
-ProfileData* MethodData::bci_to_extra_data(int bci, bool create_if_missing) {
-  DataLayout* dp    = extra_data_base();
-  DataLayout* end   = extra_data_limit();
-  DataLayout* avail = NULL;
-  for (; dp < end; dp = next_extra(dp)) {
+DataLayout* MethodData::next_extra(DataLayout* dp) {
+  int nb_cells = 0;
+  switch(dp->tag()) {
+  case DataLayout::bit_data_tag:
+  case DataLayout::no_tag:
+    nb_cells = BitData::static_cell_count();
+    break;
+  case DataLayout::speculative_trap_data_tag:
+    nb_cells = SpeculativeTrapData::static_cell_count();
+    break;
+  default:
+    fatal(err_msg("unexpected tag %d", dp->tag()));
+  }
+  return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
+}
+
+ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp) {
+  DataLayout* end = extra_data_limit();
+
+  for (;; dp = next_extra(dp)) {
+    assert(dp < end, "moved past end of extra data");
     // No need for "OrderAccess::load_acquire" ops,
     // since the data structure is monotonic.
-    if (dp->tag() == DataLayout::no_tag)  break;
-    if (dp->tag() == DataLayout::arg_info_data_tag) {
-      dp = end; // ArgInfoData is at the end of extra data section.
+    switch(dp->tag()) {
+    case DataLayout::no_tag:
+      return NULL;
+    case DataLayout::arg_info_data_tag:
+      dp = end;
+      return NULL; // ArgInfoData is at the end of extra data section.
+    case DataLayout::bit_data_tag:
+      if (m == NULL && dp->bci() == bci) {
+        return new BitData(dp);
+      }
       break;
-    }
-    if (dp->bci() == bci) {
-      assert(dp->tag() == DataLayout::bit_data_tag, "sane");
-      return new BitData(dp);
+    case DataLayout::speculative_trap_data_tag:
+      if (m != NULL) {
+        SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+        // data->method() may be null in case of a concurrent
+        // allocation. Assume it's for the same method and use that
+        // entry in that case.
+        if (dp->bci() == bci) {
+          if (data->method() == NULL) {
+            return NULL;
+          } else if (data->method() == m) {
+            return data;
+          }
+        }
+      }
+      break;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
     }
   }
-  if (create_if_missing && dp < end) {
-    // Allocate this one.  There is no mutual exclusion,
-    // so two threads could allocate different BCIs to the
-    // same data layout.  This means these extra data
-    // records, like most other MDO contents, must not be
-    // trusted too much.
-    DataLayout temp;
-    temp.initialize(DataLayout::bit_data_tag, bci, 0);
-    dp->release_set_header(temp.header());
-    assert(dp->tag() == DataLayout::bit_data_tag, "sane");
-    //NO: assert(dp->bci() == bci, "no concurrent allocation");
-    return new BitData(dp);
+  return NULL;
+}
+
+
+// Translate a bci to its corresponding extra data, or NULL.
+ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
+  // This code assumes an entry for a SpeculativeTrapData is 2 cells
+  assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
+         DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
+         "code needs to be adjusted");
+
+  DataLayout* dp  = extra_data_base();
+  DataLayout* end = extra_data_limit();
+
+  // Allocation in the extra data space has to be atomic because not
+  // all entries have the same size and non atomic concurrent
+  // allocation would result in a corrupted extra data space.
+  while (true) {
+    ProfileData* result = bci_to_extra_data_helper(bci, m, dp);
+    if (result != NULL) {
+      return result;
+    }
+
+    if (create_if_missing && dp < end) {
+      assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
+      assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
+      u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
+      // SpeculativeTrapData is 2 slots. Make sure we have room.
+      if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
+        return NULL;
+      }
+      DataLayout temp;
+      temp.initialize(tag, bci, 0);
+      // May have been set concurrently
+      if (dp->header() != temp.header() && !dp->atomic_set_header(temp.header())) {
+        // Allocation failure because of concurrent allocation. Try
+        // again.
+        continue;
+      }
+      assert(dp->tag() == tag, "sane");
+      assert(dp->bci() == bci, "no concurrent allocation");
+      if (tag == DataLayout::bit_data_tag) {
+        return new BitData(dp);
+      } else {
+        // If being allocated concurrently, one trap may be lost
+        SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+        data->set_method(m);
+        return data;
+      }
+    }
+    return NULL;
   }
   return NULL;
 }
@@ -1271,25 +1432,35 @@
   for ( ; is_valid(data); data = next_data(data)) {
     st->print("%d", dp_to_di(data->dp()));
     st->fill_to(6);
-    data->print_data_on(st);
+    data->print_data_on(st, this);
   }
   st->print_cr("--- Extra data:");
   DataLayout* dp    = extra_data_base();
   DataLayout* end   = extra_data_limit();
-  for (; dp < end; dp = next_extra(dp)) {
+  for (;; dp = next_extra(dp)) {
+    assert(dp < end, "moved past end of extra data");
     // No need for "OrderAccess::load_acquire" ops,
     // since the data structure is monotonic.
-    if (dp->tag() == DataLayout::no_tag)  continue;
-    if (dp->tag() == DataLayout::bit_data_tag) {
+    switch(dp->tag()) {
+    case DataLayout::no_tag:
+      continue;
+    case DataLayout::bit_data_tag:
       data = new BitData(dp);
-    } else {
-      assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo");
+      break;
+    case DataLayout::speculative_trap_data_tag:
+      data = new SpeculativeTrapData(dp);
+      break;
+    case DataLayout::arg_info_data_tag:
       data = new ArgInfoData(dp);
       dp = end; // ArgInfoData is at the end of extra data section.
+      break;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
     }
     st->print("%d", dp_to_di(data->dp()));
     st->fill_to(6);
     data->print_data_on(st);
+    if (dp >= end) return;
   }
 }
 #endif
@@ -1412,3 +1583,110 @@
   assert(profile_parameters_jsr292_only(), "inconsistent");
   return m->is_compiled_lambda_form();
 }
+
+void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
+  if (shift == 0) {
+    return;
+  }
+  if (!reset) {
+    // Move all cells of trap entry at dp left by "shift" cells
+    intptr_t* start = (intptr_t*)dp;
+    intptr_t* end = (intptr_t*)next_extra(dp);
+    for (intptr_t* ptr = start; ptr < end; ptr++) {
+      *(ptr-shift) = *ptr;
+    }
+  } else {
+    // Reset "shift" cells stopping at dp
+    intptr_t* start = ((intptr_t*)dp) - shift;
+    intptr_t* end = (intptr_t*)dp;
+    for (intptr_t* ptr = start; ptr < end; ptr++) {
+      *ptr = 0;
+    }
+  }
+}
+
+// Remove SpeculativeTrapData entries that reference an unloaded
+// method
+void MethodData::clean_extra_data(BoolObjectClosure* is_alive) {
+  DataLayout* dp  = extra_data_base();
+  DataLayout* end = extra_data_limit();
+
+  int shift = 0;
+  for (; dp < end; dp = next_extra(dp)) {
+    switch(dp->tag()) {
+    case DataLayout::speculative_trap_data_tag: {
+      SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+      Method* m = data->method();
+      assert(m != NULL, "should have a method");
+      if (!m->method_holder()->is_loader_alive(is_alive)) {
+        // "shift" accumulates the number of cells for dead
+        // SpeculativeTrapData entries that have been seen so
+        // far. Following entries must be shifted left by that many
+        // cells to remove the dead SpeculativeTrapData entries.
+        shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
+      } else {
+        // Shift this entry left if it follows dead
+        // SpeculativeTrapData entries
+        clean_extra_data_helper(dp, shift);
+      }
+      break;
+    }
+    case DataLayout::bit_data_tag:
+      // Shift this entry left if it follows dead SpeculativeTrapData
+      // entries
+      clean_extra_data_helper(dp, shift);
+      continue;
+    case DataLayout::no_tag:
+    case DataLayout::arg_info_data_tag:
+      // We are at end of the live trap entries. The previous "shift"
+      // cells contain entries that are either dead or were shifted
+      // left. They need to be reset to no_tag
+      clean_extra_data_helper(dp, shift, true);
+      return;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
+    }
+  }
+}
+
+// Verify there's no unloaded method referenced by a
+// SpeculativeTrapData entry
+void MethodData::verify_extra_data_clean(BoolObjectClosure* is_alive) {
+#ifdef ASSERT
+  DataLayout* dp  = extra_data_base();
+  DataLayout* end = extra_data_limit();
+
+  for (; dp < end; dp = next_extra(dp)) {
+    switch(dp->tag()) {
+    case DataLayout::speculative_trap_data_tag: {
+      SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+      Method* m = data->method();
+      assert(m != NULL && m->method_holder()->is_loader_alive(is_alive), "Method should exist");
+      break;
+    }
+    case DataLayout::bit_data_tag:
+      continue;
+    case DataLayout::no_tag:
+    case DataLayout::arg_info_data_tag:
+      return;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
+    }
+  }
+#endif
+}
+
+void MethodData::clean_method_data(BoolObjectClosure* is_alive) {
+  for (ProfileData* data = first_data();
+       is_valid(data);
+       data = next_data(data)) {
+    data->clean_weak_klass_links(is_alive);
+  }
+  ParametersTypeData* parameters = parameters_type_data();
+  if (parameters != NULL) {
+    parameters->clean_weak_klass_links(is_alive);
+  }
+
+  clean_extra_data(is_alive);
+  verify_extra_data_clean(is_alive);
+}
--- a/src/share/vm/oops/methodData.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/methodData.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -120,7 +120,8 @@
     arg_info_data_tag,
     call_type_data_tag,
     virtual_call_type_data_tag,
-    parameters_type_data_tag
+    parameters_type_data_tag,
+    speculative_trap_data_tag
   };
 
   enum {
@@ -189,8 +190,11 @@
   void set_header(intptr_t value) {
     _header._bits = value;
   }
-  void release_set_header(intptr_t value) {
-    OrderAccess::release_store_ptr(&_header._bits, value);
+  bool atomic_set_header(intptr_t value) {
+    if (Atomic::cmpxchg_ptr(value, (volatile intptr_t*)&_header._bits, 0) == 0) {
+      return true;
+    }
+    return false;
   }
   intptr_t header() {
     return _header._bits;
@@ -230,6 +234,11 @@
   static ByteSize cell_offset(int index) {
     return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
   }
+#ifdef CC_INTERP
+  static int cell_offset_in_bytes(int index) {
+    return (int)offset_of(DataLayout, _cells[index]);
+  }
+#endif // CC_INTERP
   // Return a value which, when or-ed as a byte into _flags, sets the flag.
   static int flag_number_to_byte_constant(int flag_number) {
     assert(0 <= flag_number && flag_number < flag_limit, "oob");
@@ -266,6 +275,7 @@
 class     MultiBranchData;
 class     ArgInfoData;
 class     ParametersTypeData;
+class   SpeculativeTrapData;
 
 // ProfileData
 //
@@ -286,6 +296,8 @@
   // This is a pointer to a section of profiling data.
   DataLayout* _data;
 
+  char* print_data_on_helper(const MethodData* md) const;
+
 protected:
   DataLayout* data() { return _data; }
   const DataLayout* data() const { return _data; }
@@ -367,6 +379,41 @@
     _data = data;
   }
 
+#ifdef CC_INTERP
+  // Static low level accessors for DataLayout with ProfileData's semantics.
+
+  static int cell_offset_in_bytes(int index) {
+    return DataLayout::cell_offset_in_bytes(index);
+  }
+
+  static void increment_uint_at_no_overflow(DataLayout* layout, int index,
+                                            int inc = DataLayout::counter_increment) {
+    uint count = ((uint)layout->cell_at(index)) + inc;
+    if (count == 0) return;
+    layout->set_cell_at(index, (intptr_t) count);
+  }
+
+  static int int_at(DataLayout* layout, int index) {
+    return (int)layout->cell_at(index);
+  }
+
+  static int uint_at(DataLayout* layout, int index) {
+    return (uint)layout->cell_at(index);
+  }
+
+  static oop oop_at(DataLayout* layout, int index) {
+    return cast_to_oop(layout->cell_at(index));
+  }
+
+  static void set_intptr_at(DataLayout* layout, int index, intptr_t value) {
+    layout->set_cell_at(index, (intptr_t) value);
+  }
+
+  static void set_flag_at(DataLayout* layout, int flag_number) {
+    layout->set_flag_at(flag_number);
+  }
+#endif // CC_INTERP
+
 public:
   // Constructor for invalid ProfileData.
   ProfileData();
@@ -400,6 +447,7 @@
   virtual bool is_CallTypeData()    const { return false; }
   virtual bool is_VirtualCallTypeData()const { return false; }
   virtual bool is_ParametersTypeData() const { return false; }
+  virtual bool is_SpeculativeTrapData()const { return false; }
 
 
   BitData* as_BitData() const {
@@ -454,6 +502,10 @@
     assert(is_ParametersTypeData(), "wrong type");
     return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL;
   }
+  SpeculativeTrapData* as_SpeculativeTrapData() const {
+    assert(is_SpeculativeTrapData(), "wrong type");
+    return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : NULL;
+  }
 
 
   // Subclass specific initialization
@@ -469,12 +521,14 @@
   // translation here, and the required translators are in the ci subclasses.
   virtual void translate_from(const ProfileData* data) {}
 
-  virtual void print_data_on(outputStream* st) const {
+  virtual void print_data_on(outputStream* st, const char* extra = NULL) const {
     ShouldNotReachHere();
   }
 
+  void print_data_on(outputStream* st, const MethodData* md) const;
+
 #ifndef PRODUCT
-  void print_shared(outputStream* st, const char* name) const;
+  void print_shared(outputStream* st, const char* name, const char* extra) const;
   void tab(outputStream* st, bool first = false) const;
 #endif
 };
@@ -529,8 +583,22 @@
     return cell_offset(bit_cell_count);
   }
 
+#ifdef CC_INTERP
+  static int bit_data_size_in_bytes() {
+    return cell_offset_in_bytes(bit_cell_count);
+  }
+
+  static void set_null_seen(DataLayout* layout) {
+    set_flag_at(layout, null_seen_flag);
+  }
+
+  static DataLayout* advance(DataLayout* layout) {
+    return (DataLayout*) (((address)layout) + (ssize_t)BitData::bit_data_size_in_bytes());
+  }
+#endif // CC_INTERP
+
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -573,8 +641,27 @@
     set_uint_at(count_off, count);
   }
 
+#ifdef CC_INTERP
+  static int counter_data_size_in_bytes() {
+    return cell_offset_in_bytes(counter_cell_count);
+  }
+
+  static void increment_count_no_overflow(DataLayout* layout) {
+    increment_uint_at_no_overflow(layout, count_off);
+  }
+
+  // Support counter decrementation at checkcast / subtype check failed.
+  static void decrement_count(DataLayout* layout) {
+    increment_uint_at_no_overflow(layout, count_off, -1);
+  }
+
+  static DataLayout* advance(DataLayout* layout) {
+    return (DataLayout*) (((address)layout) + (ssize_t)CounterData::counter_data_size_in_bytes());
+  }
+#endif // CC_INTERP
+
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -643,11 +730,25 @@
     return cell_offset(displacement_off_set);
   }
 
+#ifdef CC_INTERP
+  static void increment_taken_count_no_overflow(DataLayout* layout) {
+    increment_uint_at_no_overflow(layout, taken_off_set);
+  }
+
+  static DataLayout* advance_taken(DataLayout* layout) {
+    return (DataLayout*) (((address)layout) + (ssize_t)int_at(layout, displacement_off_set));
+  }
+
+  static uint taken_count(DataLayout* layout) {
+    return (uint) uint_at(layout, taken_off_set);
+  }
+#endif // CC_INTERP
+
   // Specific initialization.
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1058,7 +1159,7 @@
   }
 
 #ifndef PRODUCT
-  virtual void print_data_on(outputStream* st) const;
+  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1197,9 +1298,46 @@
   // GC support
   virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
 
+#ifdef CC_INTERP
+  static int receiver_type_data_size_in_bytes() {
+    return cell_offset_in_bytes(static_cell_count());
+  }
+
+  static Klass *receiver_unchecked(DataLayout* layout, uint row) {
+    Klass* recv = (Klass*)layout->cell_at(receiver_cell_index(row));
+    return recv;
+  }
+
+  static void increment_receiver_count_no_overflow(DataLayout* layout, Klass *rcvr) {
+    const int num_rows = row_limit();
+    // Receiver already exists?
+    for (int row = 0; row < num_rows; row++) {
+      if (receiver_unchecked(layout, row) == rcvr) {
+        increment_uint_at_no_overflow(layout, receiver_count_cell_index(row));
+        return;
+      }
+    }
+    // New receiver, find a free slot.
+    for (int row = 0; row < num_rows; row++) {
+      if (receiver_unchecked(layout, row) == NULL) {
+        set_intptr_at(layout, receiver_cell_index(row), (intptr_t)rcvr);
+        increment_uint_at_no_overflow(layout, receiver_count_cell_index(row));
+        return;
+      }
+    }
+    // Receiver did not match any saved receiver and there is no empty row for it.
+    // Increment total counter to indicate polymorphic case.
+    increment_count_no_overflow(layout);
+  }
+
+  static DataLayout* advance(DataLayout* layout) {
+    return (DataLayout*) (((address)layout) + (ssize_t)ReceiverTypeData::receiver_type_data_size_in_bytes());
+  }
+#endif // CC_INTERP
+
 #ifndef PRODUCT
   void print_receiver_data_on(outputStream* st) const;
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1231,6 +1369,16 @@
     return cell_offset(static_cell_count());
   }
 
+#ifdef CC_INTERP
+  static int virtual_call_data_size_in_bytes() {
+    return cell_offset_in_bytes(static_cell_count());
+  }
+
+  static DataLayout* advance(DataLayout* layout) {
+    return (DataLayout*) (((address)layout) + (ssize_t)VirtualCallData::virtual_call_data_size_in_bytes());
+  }
+#endif // CC_INTERP
+
 #ifdef GRAAL
   static ByteSize method_offset(uint row) {
     return cell_offset(method_cell_index(row));
@@ -1287,7 +1435,7 @@
 #ifdef GRAAL
   void print_method_data_on(outputStream* st) const;
 #endif
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1413,7 +1561,7 @@
   }
 
 #ifndef PRODUCT
-  virtual void print_data_on(outputStream* st) const;
+  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1508,11 +1656,15 @@
     return cell_offset(bci_displacement_cell_index(row));
   }
 
+#ifdef CC_INTERP
+  static DataLayout* advance(MethodData *md, int bci);
+#endif // CC_INTERP
+
   // Specific initialization.
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1572,11 +1724,25 @@
     return cell_offset(branch_cell_count);
   }
 
+#ifdef CC_INTERP
+  static int branch_data_size_in_bytes() {
+    return cell_offset_in_bytes(branch_cell_count);
+  }
+
+  static void increment_not_taken_count_no_overflow(DataLayout* layout) {
+    increment_uint_at_no_overflow(layout, not_taken_off_set);
+  }
+
+  static DataLayout* advance_not_taken(DataLayout* layout) {
+    return (DataLayout*) (((address)layout) + (ssize_t)BranchData::branch_data_size_in_bytes());
+  }
+#endif // CC_INTERP
+
   // Specific initialization.
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1611,6 +1777,20 @@
     set_int_at(aindex, value);
   }
 
+#ifdef CC_INTERP
+  // Static low level accessors for DataLayout with ArrayData's semantics.
+
+  static void increment_array_uint_at_no_overflow(DataLayout* layout, int index) {
+    int aindex = index + array_start_off_set;
+    increment_uint_at_no_overflow(layout, aindex);
+  }
+
+  static int array_int_at(DataLayout* layout, int index) {
+    int aindex = index + array_start_off_set;
+    return int_at(layout, aindex);
+  }
+#endif // CC_INTERP
+
   // Code generation support for subclasses.
   static ByteSize array_element_offset(int index) {
     return cell_offset(array_start_off_set + index);
@@ -1729,11 +1909,33 @@
     return in_ByteSize(relative_displacement_off_set) * cell_size;
   }
 
+#ifdef CC_INTERP
+  static void increment_count_no_overflow(DataLayout* layout, int index) {
+    if (index == -1) {
+      increment_array_uint_at_no_overflow(layout, default_count_off_set);
+    } else {
+      increment_array_uint_at_no_overflow(layout, case_array_start +
+                                                  index * per_case_cell_count +
+                                                  relative_count_off_set);
+    }
+  }
+
+  static DataLayout* advance(DataLayout* layout, int index) {
+    if (index == -1) {
+      return (DataLayout*) (((address)layout) + (ssize_t)array_int_at(layout, default_disaplacement_off_set));
+    } else {
+      return (DataLayout*) (((address)layout) + (ssize_t)array_int_at(layout, case_array_start +
+                                                                              index * per_case_cell_count +
+                                                                              relative_displacement_off_set));
+    }
+  }
+#endif // CC_INTERP
+
   // Specific initialization.
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1760,7 +1962,7 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1821,7 +2023,7 @@
   }
 
 #ifndef PRODUCT
-  virtual void print_data_on(outputStream* st) const;
+  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 
   static ByteSize stack_slot_offset(int i) {
@@ -1833,6 +2035,54 @@
   }
 };
 
+// SpeculativeTrapData
+//
+// A SpeculativeTrapData is used to record traps due to type
+// speculation. It records the root of the compilation: that type
+// speculation is wrong in the context of one compilation (for
+// method1) doesn't mean it's wrong in the context of another one (for
+// method2). Type speculation could have more/different data in the
+// context of the compilation of method2 and it's worthwhile to try an
+// optimization that failed for compilation of method1 in the context
+// of compilation of method2.
+// Space for SpeculativeTrapData entries is allocated from the extra
+// data space in the MDO. If we run out of space, the trap data for
+// the ProfileData at that bci is updated.
+class SpeculativeTrapData : public ProfileData {
+protected:
+  enum {
+    method_offset,
+    speculative_trap_cell_count
+  };
+public:
+  SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
+    assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
+  }
+
+  virtual bool is_SpeculativeTrapData() const { return true; }
+
+  static int static_cell_count() {
+    return speculative_trap_cell_count;
+  }
+
+  virtual int cell_count() const {
+    return static_cell_count();
+  }
+
+  // Direct accessor
+  Method* method() const {
+    return (Method*)intptr_at(method_offset);
+  }
+
+  void set_method(Method* m) {
+    set_intptr_at(method_offset, (intptr_t)m);
+  }
+
+#ifndef PRODUCT
+  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
+#endif
+};
+
 // MethodData*
 //
 // A MethodData* holds information which has been collected about
@@ -1876,8 +2126,11 @@
 // adjusted in the event of a change in control flow.
 //
 
+CC_INTERP_ONLY(class BytecodeInterpreter;)
+
 class MethodData : public Metadata {
   friend class VMStructs;
+  CC_INTERP_ONLY(friend class BytecodeInterpreter;)
 private:
   friend class ProfileData;
 
@@ -1963,6 +2216,7 @@
   // Helper for size computation
   static int compute_data_size(BytecodeStream* stream);
   static int bytecode_cell_count(Bytecodes::Code code);
+  static bool is_speculative_trap_bytecode(Bytecodes::Code code);
   enum { no_profile_data = -1, variable_cell_count = -2 };
 
   // Helper for initialization
@@ -2006,8 +2260,9 @@
   // What is the index of the first data entry?
   int first_di() const { return 0; }
 
+  ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp);
   // Find or create an extra ProfileData:
-  ProfileData* bci_to_extra_data(int bci, bool create_if_missing);
+  ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
 
   // return the argument info cell
   ArgInfoData *arg_info();
@@ -2030,6 +2285,10 @@
   static bool profile_parameters_jsr292_only();
   static bool profile_all_parameters();
 
+  void clean_extra_data(BoolObjectClosure* is_alive);
+  void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
+  void verify_extra_data_clean(BoolObjectClosure* is_alive);
+
 public:
   static int header_size() {
     return sizeof(MethodData)/wordSize;
@@ -2038,7 +2297,7 @@
   // Compute the size of a MethodData* before it is created.
   static int compute_allocation_size_in_bytes(methodHandle method);
   static int compute_allocation_size_in_words(methodHandle method);
-  static int compute_extra_data_count(int data_size, int empty_bc_count);
+  static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
 
   // Determine if a given bytecode can have profile information.
   static bool bytecode_has_profile(Bytecodes::Code code) {
@@ -2179,9 +2438,26 @@
   ProfileData* bci_to_data(int bci);
 
   // Same, but try to create an extra_data record if one is needed:
-  ProfileData* allocate_bci_to_data(int bci) {
-    ProfileData* data = bci_to_data(bci);
-    return (data != NULL) ? data : bci_to_extra_data(bci, true);
+  ProfileData* allocate_bci_to_data(int bci, Method* m) {
+    ProfileData* data = NULL;
+    // If m not NULL, try to allocate a SpeculativeTrapData entry
+    if (m == NULL) {
+      data = bci_to_data(bci);
+    }
+    if (data != NULL) {
+      return data;
+    }
+    data = bci_to_extra_data(bci, m, true);
+    if (data != NULL) {
+      return data;
+    }
+    // If SpeculativeTrapData allocation fails try to allocate a
+    // regular entry
+    data = bci_to_data(bci);
+    if (data != NULL) {
+      return data;
+    }
+    return bci_to_extra_data(bci, NULL, true);
   }
 
   // Add a handful of extra data records, for trap tracking.
@@ -2189,7 +2465,7 @@
   DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
   int extra_data_size() const { return (address)extra_data_limit()
                                - (address)extra_data_base(); }
-  static DataLayout* next_extra(DataLayout* dp) { return (DataLayout*)((address)dp + in_bytes(DataLayout::cell_offset(0))); }
+  static DataLayout* next_extra(DataLayout* dp);
 
   // Return (uint)-1 for overflow.
   uint trap_count(int reason) const {
@@ -2289,6 +2565,8 @@
   static bool profile_return();
   static bool profile_parameters();
   static bool profile_return_jsr292_only();
+
+  void clean_method_data(BoolObjectClosure* is_alive);
 };
 
 #endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
--- a/src/share/vm/oops/objArrayKlass.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/objArrayKlass.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -674,8 +674,8 @@
 
 // Verification
 
-void ObjArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
-  ArrayKlass::verify_on(st, check_dictionary);
+void ObjArrayKlass::verify_on(outputStream* st) {
+  ArrayKlass::verify_on(st);
   guarantee(element_klass()->is_klass(), "should be klass");
   guarantee(bottom_klass()->is_klass(), "should be klass");
   Klass* bk = bottom_klass();
--- a/src/share/vm/oops/objArrayKlass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/objArrayKlass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -151,7 +151,7 @@
   const char* internal_name() const;
 
   // Verification
-  void verify_on(outputStream* st, bool check_dictionary);
+  void verify_on(outputStream* st);
 
   void oop_verify_on(oop obj, outputStream* st);
 };
--- a/src/share/vm/oops/objArrayKlass.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/objArrayKlass.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/oop.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/oop.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -102,7 +102,7 @@
 }
 
 // When String table needs to rehash
-unsigned int oopDesc::new_hash(jint seed) {
+unsigned int oopDesc::new_hash(juint seed) {
   EXCEPTION_MARK;
   ResourceMark rm;
   int length;
--- a/src/share/vm/oops/oop.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/oop.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -109,12 +109,13 @@
   int size_given_klass(Klass* klass);
 
   // type test operations (inlined in oop.inline.h)
-  bool is_instance()           const;
-  bool is_instanceMirror()     const;
-  bool is_instanceRef()        const;
-  bool is_array()              const;
-  bool is_objArray()           const;
-  bool is_typeArray()          const;
+  bool is_instance()            const;
+  bool is_instanceMirror()      const;
+  bool is_instanceClassLoader() const;
+  bool is_instanceRef()         const;
+  bool is_array()               const;
+  bool is_objArray()            const;
+  bool is_typeArray()           const;
 
  private:
   // field addresses in oop
@@ -362,7 +363,7 @@
   intptr_t slow_identity_hash();
 
   // Alternate hashing code if string table is rehashed
-  unsigned int new_hash(jint seed);
+  unsigned int new_hash(juint seed);
 
   // marks are forwarded to stack when object is locked
   bool     has_displaced_mark() const;
--- a/src/share/vm/oops/oop.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/oop.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -147,12 +147,13 @@
 
 inline bool oopDesc::is_a(Klass* k)        const { return klass()->is_subtype_of(k); }
 
-inline bool oopDesc::is_instance()           const { return klass()->oop_is_instance(); }
-inline bool oopDesc::is_instanceMirror()     const { return klass()->oop_is_instanceMirror(); }
-inline bool oopDesc::is_instanceRef()        const { return klass()->oop_is_instanceRef(); }
-inline bool oopDesc::is_array()              const { return klass()->oop_is_array(); }
-inline bool oopDesc::is_objArray()           const { return klass()->oop_is_objArray(); }
-inline bool oopDesc::is_typeArray()          const { return klass()->oop_is_typeArray(); }
+inline bool oopDesc::is_instance()            const { return klass()->oop_is_instance(); }
+inline bool oopDesc::is_instanceClassLoader() const { return klass()->oop_is_instanceClassLoader(); }
+inline bool oopDesc::is_instanceMirror()      const { return klass()->oop_is_instanceMirror(); }
+inline bool oopDesc::is_instanceRef()         const { return klass()->oop_is_instanceRef(); }
+inline bool oopDesc::is_array()               const { return klass()->oop_is_array(); }
+inline bool oopDesc::is_objArray()            const { return klass()->oop_is_objArray(); }
+inline bool oopDesc::is_typeArray()           const { return klass()->oop_is_typeArray(); }
 
 inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
 
@@ -490,9 +491,9 @@
   return size_given_klass(klass());
 }
 
-inline void update_barrier_set(void* p, oop v) {
+inline void update_barrier_set(void* p, oop v, bool release = false) {
   assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
-  oopDesc::bs()->write_ref_field(p, v);
+  oopDesc::bs()->write_ref_field(p, v, release);
 }
 
 template <class T> inline void update_barrier_set_pre(T* p, oop v) {
@@ -505,7 +506,10 @@
   } else {
     update_barrier_set_pre(p, v);
     oopDesc::encode_store_heap_oop(p, v);
-    update_barrier_set((void*)p, v);  // cast away type
+    // always_do_update_barrier == false =>
+    // Either we are at a safepoint (in GC) or CMS is not used. In both
+    // cases it's unnecessary to mark the card as dirty with release sematics.
+    update_barrier_set((void*)p, v, false /* release */);  // cast away type
   }
 }
 
@@ -513,7 +517,12 @@
   update_barrier_set_pre((T*)p, v);   // cast away volatile
   // Used by release_obj_field_put, so use release_store_ptr.
   oopDesc::release_encode_store_heap_oop(p, v);
-  update_barrier_set((void*)p, v);    // cast away type
+  // When using CMS we must mark the card corresponding to p as dirty
+  // with release sematics to prevent that CMS sees the dirty card but
+  // not the new value v at p due to reordering of the two
+  // stores. Note that CMS has a concurrent precleaning phase, where
+  // it reads the card table while the Java threads are running.
+  update_barrier_set((void*)p, v, true /* release */);    // cast away type
 }
 
 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
--- a/src/share/vm/oops/oop.pcgc.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/oop.pcgc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/oop.psgc.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/oop.psgc.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/symbol.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/symbol.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -207,7 +207,7 @@
 }
 
 // Alternate hashing for unbalanced symbol tables.
-unsigned int Symbol::new_hash(jint seed) {
+unsigned int Symbol::new_hash(juint seed) {
   ResourceMark rm;
   // Use alternate hashing algorithm on this symbol.
   return AltHashing::murmur3_32(seed, (const jbyte*)as_C_string(), utf8_length());
--- a/src/share/vm/oops/symbol.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/symbol.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -154,7 +154,7 @@
   int identity_hash()       { return _identity_hash; }
 
   // For symbol table alternate hashing
-  unsigned int new_hash(jint seed);
+  unsigned int new_hash(juint seed);
 
   // Reference counting.  See comments above this class for when to use.
   int refcount() const      { return _refcount; }
--- a/src/share/vm/oops/typeArrayKlass.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/typeArrayKlass.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/typeArrayKlass.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/typeArrayKlass.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/oops/typeArrayOop.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/oops/typeArrayOop.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -51,6 +51,9 @@
 #ifdef TARGET_OS_ARCH_linux_ppc
 # include "orderAccess_linux_ppc.inline.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "orderAccess_aix_ppc.inline.hpp"
+#endif
 #ifdef TARGET_OS_ARCH_bsd_x86
 # include "orderAccess_bsd_x86.inline.hpp"
 #endif
--- a/src/share/vm/opto/block.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/block.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -144,6 +144,10 @@
   remove_node(find_node(n));
 }
 
+bool Block::contains(const Node *n) const {
+  return _nodes.contains(n);
+}
+
 // Return empty status of a block.  Empty blocks contain only the head, other
 // ideal nodes, and an optional trailing goto.
 int Block::is_Empty() const {
@@ -526,18 +530,27 @@
 
 // Does this block end in a multiway branch that cannot have the default case
 // flipped for another case?
-static bool no_flip_branch( Block *b ) {
+static bool no_flip_branch(Block *b) {
   int branch_idx = b->number_of_nodes() - b->_num_succs-1;
-  if( branch_idx < 1 ) return false;
-  Node *bra = b->get_node(branch_idx);
-  if( bra->is_Catch() )
+  if (branch_idx < 1) {
+    return false;
+  }
+  Node *branch = b->get_node(branch_idx);
+  if (branch->is_Catch()) {
     return true;
-  if( bra->is_Mach() ) {
-    if( bra->is_MachNullCheck() )
+  }
+  if (branch->is_Mach()) {
+    if (branch->is_MachNullCheck()) {
       return true;
-    int iop = bra->as_Mach()->ideal_Opcode();
-    if( iop == Op_FastLock || iop == Op_FastUnlock )
+    }
+    int iop = branch->as_Mach()->ideal_Opcode();
+    if (iop == Op_FastLock || iop == Op_FastUnlock) {
       return true;
+    }
+    // Don't flip if branch has an implicit check.
+    if (branch->as_Mach()->is_TrapBasedCheckNode()) {
+      return true;
+    }
   }
   return false;
 }
@@ -696,10 +709,66 @@
   } // End of for all blocks
 }
 
+Block *PhaseCFG::fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext) {
+  // Trap based checks must fall through to the successor with
+  // PROB_ALWAYS.
+  // They should be an If with 2 successors.
+  assert(branch->is_MachIf(),   "must be If");
+  assert(block->_num_succs == 2, "must have 2 successors");
+
+  // Get the If node and the projection for the first successor.
+  MachIfNode *iff   = block->get_node(block->number_of_nodes()-3)->as_MachIf();
+  ProjNode   *proj0 = block->get_node(block->number_of_nodes()-2)->as_Proj();
+  ProjNode   *proj1 = block->get_node(block->number_of_nodes()-1)->as_Proj();
+  ProjNode   *projt = (proj0->Opcode() == Op_IfTrue)  ? proj0 : proj1;
+  ProjNode   *projf = (proj0->Opcode() == Op_IfFalse) ? proj0 : proj1;
+
+  // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
+  assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
+  assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1");
+
+  ProjNode *proj_always;
+  ProjNode *proj_never;
+  // We must negate the branch if the implicit check doesn't follow
+  // the branch's TRUE path. Then, the new TRUE branch target will
+  // be the old FALSE branch target.
+  if (iff->_prob <= 2*PROB_NEVER) {   // There are small rounding errors.
+    proj_never  = projt;
+    proj_always = projf;
+  } else {
+    // We must negate the branch if the trap doesn't follow the
+    // branch's TRUE path. Then, the new TRUE branch target will
+    // be the old FALSE branch target.
+    proj_never  = projf;
+    proj_always = projt;
+    iff->negate();
+  }
+  assert(iff->_prob <= 2*PROB_NEVER, "Trap based checks are expected to trap never!");
+  // Map the successors properly
+  block->_succs.map(0, get_block_for_node(proj_never ->raw_out(0)));   // The target of the trap.
+  block->_succs.map(1, get_block_for_node(proj_always->raw_out(0)));   // The fall through target.
+
+  if (block->get_node(block->number_of_nodes() - block->_num_succs + 1) != proj_always) {
+    block->map_node(proj_never,  block->number_of_nodes() - block->_num_succs + 0);
+    block->map_node(proj_always, block->number_of_nodes() - block->_num_succs + 1);
+  }
+
+  // Place the fall through block after this block.
+  Block *bs1 = block->non_connector_successor(1);
+  if (bs1 != bnext && move_to_next(bs1, block_pos)) {
+    bnext = bs1;
+  }
+  // If the fall through block still is not the next block, insert a goto.
+  if (bs1 != bnext) {
+    insert_goto_at(block_pos, 1);
+  }
+  return bnext;
+}
+
 // Fix up the final control flow for basic blocks.
 void PhaseCFG::fixup_flow() {
   // Fixup final control flow for the blocks.  Remove jump-to-next
-  // block.  If neither arm of a IF follows the conditional branch, we
+  // block. If neither arm of an IF follows the conditional branch, we
   // have to add a second jump after the conditional.  We place the
   // TRUE branch target in succs[0] for both GOTOs and IFs.
   for (uint i = 0; i < number_of_blocks(); i++) {
@@ -719,25 +788,39 @@
     // Check for multi-way branches where I cannot negate the test to
     // exchange the true and false targets.
     if (no_flip_branch(block)) {
-      // Find fall through case - if must fall into its target
+      // Find fall through case - if must fall into its target.
+      // Get the index of the branch's first successor.
       int branch_idx = block->number_of_nodes() - block->_num_succs;
-      for (uint j2 = 0; j2 < block->_num_succs; j2++) {
-        const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
-        if (p->_con == 0) {
-          // successor j2 is fall through case
-          if (block->non_connector_successor(j2) != bnext) {
-            // but it is not the next block => insert a goto
-            insert_goto_at(i, j2);
+
+      // The branch is 1 before the branch's first successor.
+      Node *branch = block->get_node(branch_idx-1);
+
+      // Handle no-flip branches which have implicit checks and which require
+      // special block ordering and individual semantics of the 'fall through
+      // case'.
+      if ((TrapBasedNullChecks || TrapBasedRangeChecks) &&
+          branch->is_Mach() && branch->as_Mach()->is_TrapBasedCheckNode()) {
+        bnext = fixup_trap_based_check(branch, block, i, bnext);
+      } else {
+        // Else, default handling for no-flip branches
+        for (uint j2 = 0; j2 < block->_num_succs; j2++) {
+          const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
+          if (p->_con == 0) {
+            // successor j2 is fall through case
+            if (block->non_connector_successor(j2) != bnext) {
+              // but it is not the next block => insert a goto
+              insert_goto_at(i, j2);
+            }
+            // Put taken branch in slot 0
+            if (j2 == 0 && block->_num_succs == 2) {
+              // Flip targets in succs map
+              Block *tbs0 = block->_succs[0];
+              Block *tbs1 = block->_succs[1];
+              block->_succs.map(0, tbs1);
+              block->_succs.map(1, tbs0);
+            }
+            break;
           }
-          // Put taken branch in slot 0
-          if (j2 == 0 && block->_num_succs == 2) {
-            // Flip targets in succs map
-            Block *tbs0 = block->_succs[0];
-            Block *tbs1 = block->_succs[1];
-            block->_succs.map(0, tbs1);
-            block->_succs.map(1, tbs0);
-          }
-          break;
         }
       }
 
@@ -844,6 +927,228 @@
 }
 
 
+// postalloc_expand: Expand nodes after register allocation.
+//
+// postalloc_expand has to be called after register allocation, just
+// before output (i.e. scheduling). It only gets called if
+// Matcher::require_postalloc_expand is true.
+//
+// Background:
+//
+// Nodes that are expandend (one compound node requiring several
+// assembler instructions to be implemented split into two or more
+// non-compound nodes) after register allocation are not as nice as
+// the ones expanded before register allocation - they don't
+// participate in optimizations as global code motion. But after
+// register allocation we can expand nodes that use registers which
+// are not spillable or registers that are not allocated, because the
+// old compound node is simply replaced (in its location in the basic
+// block) by a new subgraph which does not contain compound nodes any
+// more. The scheduler called during output can later on process these
+// non-compound nodes.
+//
+// Implementation:
+//
+// Nodes requiring postalloc expand are specified in the ad file by using
+// a postalloc_expand statement instead of ins_encode. A postalloc_expand
+// contains a single call to an encoding, as does an ins_encode
+// statement. Instead of an emit() function a postalloc_expand() function
+// is generated that doesn't emit assembler but creates a new
+// subgraph. The code below calls this postalloc_expand function for each
+// node with the appropriate attribute. This function returns the new
+// nodes generated in an array passed in the call. The old node,
+// potential MachTemps before and potential Projs after it then get
+// disconnected and replaced by the new nodes. The instruction
+// generating the result has to be the last one in the array. In
+// general it is assumed that Projs after the node expanded are
+// kills. These kills are not required any more after expanding as
+// there are now explicitly visible def-use chains and the Projs are
+// removed. This does not hold for calls: They do not only have
+// kill-Projs but also Projs defining values. Therefore Projs after
+// the node expanded are removed for all but for calls. If a node is
+// to be reused, it must be added to the nodes list returned, and it
+// will be added again.
+//
+// Implementing the postalloc_expand function for a node in an enc_class
+// is rather tedious. It requires knowledge about many node details, as
+// the nodes and the subgraph must be hand crafted. To simplify this,
+// adlc generates some utility variables into the postalloc_expand function,
+// e.g., holding the operands as specified by the postalloc_expand encoding
+// specification, e.g.:
+//  * unsigned idx_<par_name>  holding the index of the node in the ins
+//  * Node *n_<par_name>       holding the node loaded from the ins
+//  * MachOpnd *op_<par_name>  holding the corresponding operand
+//
+// The ordering of operands can not be determined by looking at a
+// rule. Especially if a match rule matches several different trees,
+// several nodes are generated from one instruct specification with
+// different operand orderings. In this case the adlc generated
+// variables are the only way to access the ins and operands
+// deterministically.
+//
+// If assigning a register to a node that contains an oop, don't
+// forget to call ra_->set_oop() for the node.
+void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) {
+  GrowableArray <Node *> new_nodes(32); // Array with new nodes filled by postalloc_expand function of node.
+  GrowableArray <Node *> remove(32);
+  GrowableArray <Node *> succs(32);
+  unsigned int max_idx = C->unique();   // Remember to distinguish new from old nodes.
+  DEBUG_ONLY(bool foundNode = false);
+
+  // for all blocks
+  for (uint i = 0; i < number_of_blocks(); i++) {
+    Block *b = _blocks[i];
+    // For all instructions in the current block.
+    for (uint j = 0; j < b->number_of_nodes(); j++) {
+      Node *n = b->get_node(j);
+      if (n->is_Mach() && n->as_Mach()->requires_postalloc_expand()) {
+#ifdef ASSERT
+        if (TracePostallocExpand) {
+          if (!foundNode) {
+            foundNode = true;
+            tty->print("POSTALLOC EXPANDING %d %s\n", C->compile_id(),
+                       C->method() ? C->method()->name()->as_utf8() : C->stub_name());
+          }
+          tty->print("  postalloc expanding "); n->dump();
+          if (Verbose) {
+            tty->print("    with ins:\n");
+            for (uint k = 0; k < n->len(); ++k) {
+              if (n->in(k)) { tty->print("        "); n->in(k)->dump(); }
+            }
+          }
+        }
+#endif
+        new_nodes.clear();
+        // Collect nodes that have to be removed from the block later on.
+        uint req = n->req();
+        remove.clear();
+        for (uint k = 0; k < req; ++k) {
+          if (n->in(k) && n->in(k)->is_MachTemp()) {
+            remove.push(n->in(k)); // MachTemps which are inputs to the old node have to be removed.
+            n->in(k)->del_req(0);
+            j--;
+          }
+        }
+
+        // Check whether we can allocate enough nodes. We set a fix limit for
+        // the size of postalloc expands with this.
+        uint unique_limit = C->unique() + 40;
+        if (unique_limit >= _ra->node_regs_max_index()) {
+          Compile::current()->record_failure("out of nodes in postalloc expand");
+          return;
+        }
+
+        // Emit (i.e. generate new nodes).
+        n->as_Mach()->postalloc_expand(&new_nodes, _ra);
+
+        assert(C->unique() < unique_limit, "You allocated too many nodes in your postalloc expand.");
+
+        // Disconnect the inputs of the old node.
+        //
+        // We reuse MachSpillCopy nodes. If we need to expand them, there
+        // are many, so reusing pays off. If reused, the node already
+        // has the new ins. n must be the last node on new_nodes list.
+        if (!n->is_MachSpillCopy()) {
+          for (int k = req - 1; k >= 0; --k) {
+            n->del_req(k);
+          }
+        }
+
+#ifdef ASSERT
+        // Check that all nodes have proper operands.
+        for (int k = 0; k < new_nodes.length(); ++k) {
+          if (new_nodes.at(k)->_idx < max_idx || !new_nodes.at(k)->is_Mach()) continue; // old node, Proj ...
+          MachNode *m = new_nodes.at(k)->as_Mach();
+          for (unsigned int l = 0; l < m->num_opnds(); ++l) {
+            if (MachOper::notAnOper(m->_opnds[l])) {
+              outputStream *os = tty;
+              os->print("Node %s ", m->Name());
+              os->print("has invalid opnd %d: %p\n", l, m->_opnds[l]);
+              assert(0, "Invalid operands, see inline trace in hs_err_pid file.");
+            }
+          }
+        }
+#endif
+
+        // Collect succs of old node in remove (for projections) and in succs (for
+        // all other nodes) do _not_ collect projections in remove (but in succs)
+        // in case the node is a call. We need the projections for calls as they are
+        // associated with registes (i.e. they are defs).
+        succs.clear();
+        for (DUIterator k = n->outs(); n->has_out(k); k++) {
+          if (n->out(k)->is_Proj() && !n->is_MachCall() && !n->is_MachBranch()) {
+            remove.push(n->out(k));
+          } else {
+            succs.push(n->out(k));
+          }
+        }
+        // Replace old node n as input of its succs by last of the new nodes.
+        for (int k = 0; k < succs.length(); ++k) {
+          Node *succ = succs.at(k);
+          for (uint l = 0; l < succ->req(); ++l) {
+            if (succ->in(l) == n) {
+              succ->set_req(l, new_nodes.at(new_nodes.length() - 1));
+            }
+          }
+          for (uint l = succ->req(); l < succ->len(); ++l) {
+            if (succ->in(l) == n) {
+              succ->set_prec(l, new_nodes.at(new_nodes.length() - 1));
+            }
+          }
+        }
+
+        // Index of old node in block.
+        uint index = b->find_node(n);
+        // Insert new nodes into block and map them in nodes->blocks array
+        // and remember last node in n2.
+        Node *n2 = NULL;
+        for (int k = 0; k < new_nodes.length(); ++k) {
+          n2 = new_nodes.at(k);
+          b->insert_node(n2, ++index);
+          map_node_to_block(n2, b);
+        }
+
+        // Add old node n to remove and remove them all from block.
+        remove.push(n);
+        j--;
+#ifdef ASSERT
+        if (TracePostallocExpand && Verbose) {
+          tty->print("    removing:\n");
+          for (int k = 0; k < remove.length(); ++k) {
+            tty->print("        "); remove.at(k)->dump();
+          }
+          tty->print("    inserting:\n");
+          for (int k = 0; k < new_nodes.length(); ++k) {
+            tty->print("        "); new_nodes.at(k)->dump();
+          }
+        }
+#endif
+        for (int k = 0; k < remove.length(); ++k) {
+          if (b->contains(remove.at(k))) {
+            b->find_remove(remove.at(k));
+          } else {
+            assert(remove.at(k)->is_Proj() && (remove.at(k)->in(0)->is_MachBranch()), "");
+          }
+        }
+        // If anything has been inserted (n2 != NULL), continue after last node inserted.
+        // This does not always work. Some postalloc expands don't insert any nodes, if they
+        // do optimizations (e.g., max(x,x)). In this case we decrement j accordingly.
+        j = n2 ? b->find_node(n2) : j;
+      }
+    }
+  }
+
+#ifdef ASSERT
+  if (foundNode) {
+    tty->print("FINISHED %d %s\n", C->compile_id(),
+               C->method() ? C->method()->name()->as_utf8() : C->stub_name());
+    tty->flush();
+  }
+#endif
+}
+
+
+//------------------------------dump-------------------------------------------
 #ifndef PRODUCT
 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited  ) const {
   const Node *x = end->is_block_proj();
--- a/src/share/vm/opto/block.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/block.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -90,9 +90,9 @@
 class CFGElement : public ResourceObj {
   friend class VMStructs;
  public:
-  float _freq; // Execution frequency (estimate)
+  double _freq; // Execution frequency (estimate)
 
-  CFGElement() : _freq(0.0f) {}
+  CFGElement() : _freq(0.0) {}
   virtual bool is_block() { return false; }
   virtual bool is_loop()  { return false; }
   Block*   as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
@@ -202,7 +202,7 @@
   // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies.
   // It is currently also used to scale such frequencies relative to
   // FreqCountInvocations relative to the old value of 1500.
-#define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations)
+#define BLOCK_FREQUENCY(f) ((f * (double) 1500) / FreqCountInvocations)
 
   // Register Pressure (estimate) for Splitting heuristic
   uint _reg_pressure;
@@ -313,10 +313,12 @@
   // Add an instruction to an existing block.  It must go after the head
   // instruction and before the end instruction.
   void add_inst( Node *n ) { insert_node(n, end_idx()); }
-  // Find node in block
+  // Find node in block. Fails if node not in block.
   uint find_node( const Node *n ) const;
   // Find and remove n from block list
   void find_remove( const Node *n );
+  // Check wether the node is in the block.
+  bool contains (const Node *n) const;
 
   // Return the empty status of a block
   enum { not_empty, empty_with_goto, completely_empty };
@@ -391,7 +393,7 @@
   CFGLoop* _root_loop;
 
   // Outmost loop frequency
-  float _outer_loop_frequency;
+  double _outer_loop_frequency;
 
   // Per node latency estimation, valid only during GCM
   GrowableArray<uint>* _node_latency;
@@ -506,7 +508,7 @@
   }
 
   // Get the outer most frequency
-  float get_outer_loop_frequency() const {
+  double get_outer_loop_frequency() const {
     return _outer_loop_frequency;
   }
 
@@ -588,6 +590,7 @@
 
   // Remove empty basic blocks
   void remove_empty_blocks();
+  Block *fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext);
   void fixup_flow();
 
   // Insert a node into a block at index and map the node to the block
@@ -596,6 +599,9 @@
     map_node_to_block(n, b);
   }
 
+  // Check all nodes and postalloc_expand them if necessary.
+  void postalloc_expand(PhaseRegAlloc* _ra);
+
 #ifndef PRODUCT
   bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
 
@@ -650,13 +656,13 @@
 class BlockProbPair VALUE_OBJ_CLASS_SPEC {
 protected:
   Block* _target;      // block target
-  float  _prob;        // probability of edge to block
+  double  _prob;        // probability of edge to block
 public:
   BlockProbPair() : _target(NULL), _prob(0.0) {}
-  BlockProbPair(Block* b, float p) : _target(b), _prob(p) {}
+  BlockProbPair(Block* b, double p) : _target(b), _prob(p) {}
 
   Block* get_target() const { return _target; }
-  float get_prob() const { return _prob; }
+  double get_prob() const { return _prob; }
 };
 
 //------------------------------CFGLoop-------------------------------------------
@@ -669,8 +675,8 @@
   CFGLoop *_child;       // first child, use child's sibling to visit all immediately nested loops
   GrowableArray<CFGElement*> _members; // list of members of loop
   GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities
-  float _exit_prob;       // probability any loop exit is taken on a single loop iteration
-  void update_succ_freq(Block* b, float freq);
+  double _exit_prob;       // probability any loop exit is taken on a single loop iteration
+  void update_succ_freq(Block* b, double freq);
 
  public:
   CFGLoop(int id) :
@@ -696,9 +702,9 @@
   void compute_loop_depth(int depth);
   void compute_freq(); // compute frequency with loop assuming head freq 1.0f
   void scale_freq();   // scale frequency by loop trip count (including outer loops)
-  float outer_loop_freq() const; // frequency of outer loop
+  double outer_loop_freq() const; // frequency of outer loop
   bool in_loop_nest(Block* b);
-  float trip_count() const { return 1.0f / _exit_prob; }
+  double trip_count() const { return 1.0 / _exit_prob; }
   virtual bool is_loop()  { return true; }
   int id() { return _id; }
 
@@ -717,7 +723,7 @@
  private:
   Block * _from;        // Source basic block
   Block * _to;          // Destination basic block
-  float _freq;          // Execution frequency (estimate)
+  double _freq;          // Execution frequency (estimate)
   int   _state;
   bool  _infrequent;
   int   _from_pct;
@@ -736,13 +742,13 @@
     interior            // edge is interior to trace (could be backedge)
   };
 
-  CFGEdge(Block *from, Block *to, float freq, int from_pct, int to_pct) :
+  CFGEdge(Block *from, Block *to, double freq, int from_pct, int to_pct) :
     _from(from), _to(to), _freq(freq),
     _from_pct(from_pct), _to_pct(to_pct), _state(open) {
     _infrequent = from_infrequent() || to_infrequent();
   }
 
-  float  freq() const { return _freq; }
+  double  freq() const { return _freq; }
   Block* from() const { return _from; }
   Block* to  () const { return _to;   }
   int  infrequent() const { return _infrequent; }
--- a/src/share/vm/opto/buildOopMap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/buildOopMap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/bytecodeInfo.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/bytecodeInfo.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,10 @@
   _subtrees(c->comp_arena(), 2, 0, NULL),
   _msg(NULL)
 {
-  NOT_PRODUCT(_count_inlines = 0;)
+#ifndef PRODUCT
+  _count_inlines = 0;
+  _forced_inline = false;
+#endif
   if (_caller_jvms != NULL) {
     // Keep a private copy of the caller_jvms:
     _caller_jvms = new (C) JVMState(caller_jvms->method(), caller_tree->caller_jvms());
@@ -60,31 +63,14 @@
   assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
   assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter");
   assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter");
-  if (UseOldInlining) {
-    // Update hierarchical counts, count_inline_bcs() and count_inlines()
-    InlineTree *caller = (InlineTree *)caller_tree;
-    for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) {
-      caller->_count_inline_bcs += count_inline_bcs();
-      NOT_PRODUCT(caller->_count_inlines++;)
-    }
+  // Update hierarchical counts, count_inline_bcs() and count_inlines()
+  InlineTree *caller = (InlineTree *)caller_tree;
+  for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) {
+    caller->_count_inline_bcs += count_inline_bcs();
+    NOT_PRODUCT(caller->_count_inlines++;)
   }
 }
 
-InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms,
-                       float site_invoke_ratio, int max_inline_level) :
-  C(c),
-  _caller_jvms(caller_jvms),
-  _caller_tree(NULL),
-  _method(callee_method),
-  _site_invoke_ratio(site_invoke_ratio),
-  _max_inline_level(max_inline_level),
-  _count_inline_bcs(method()->code_size()),
-  _msg(NULL)
-{
-  NOT_PRODUCT(_count_inlines = 0;)
-  assert(!UseOldInlining, "do not use for old stuff");
-}
-
 /**
  *  Return true when EA is ON and a java constructor is called or
  *  a super constructor is called from an inlined java constructor.
@@ -128,9 +114,19 @@
       tty->print_cr("Inlined method is hot: ");
     }
     set_msg("force inline by CompilerOracle");
+    _forced_inline = true;
     return true;
   }
 
+#ifndef PRODUCT
+  int inline_depth = inline_level()+1;
+  if (ciReplay::should_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) {
+    set_msg("force inline by ciReplay");
+    _forced_inline = true;
+    return true;
+  }
+#endif
+
   int size = callee_method->code_size_for_inlining();
 
   // Check for too many throws (and not too huge)
@@ -145,11 +141,6 @@
     return true;
   }
 
-  if (!UseOldInlining) {
-    set_msg("!UseOldInlining");
-    return true;  // size and frequency are represented in a new way
-  }
-
   int default_max_inline_size = C->max_inline_size();
   int inline_small_code_size  = InlineSmallCode / 4;
   int max_inline_size         = default_max_inline_size;
@@ -213,35 +204,6 @@
     fail_msg = "don't inline by annotation";
   }
 
-  if (!UseOldInlining) {
-    if (fail_msg != NULL) {
-      *wci_result = *(WarmCallInfo::always_cold());
-      set_msg(fail_msg);
-      return true;
-    }
-
-    if (callee_method->has_unloaded_classes_in_signature()) {
-      wci_result->set_profit(wci_result->profit() * 0.1);
-    }
-
-    // don't inline exception code unless the top method belongs to an
-    // exception class
-    if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
-      ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
-      if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
-        wci_result->set_profit(wci_result->profit() * 0.1);
-      }
-    }
-
-    if (callee_method->has_compiled_code() &&
-        callee_method->instructions_size() > InlineSmallCode) {
-      wci_result->set_profit(wci_result->profit() * 0.1);
-      // %%% adjust wci_result->size()?
-    }
-
-    return false;
-  }
-
   // one more inlining restriction
   if (fail_msg == NULL && callee_method->has_unloaded_classes_in_signature()) {
     fail_msg = "unloaded signature classes";
@@ -264,6 +226,18 @@
   }
 
 #ifndef PRODUCT
+  int caller_bci = jvms->bci();
+  int inline_depth = inline_level()+1;
+  if (ciReplay::should_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) {
+    set_msg("force inline by ciReplay");
+    return false;
+  }
+
+  if (ciReplay::should_not_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) {
+    set_msg("disallowed by ciReplay");
+    return true;
+  }
+
   if (ciReplay::should_not_inline(callee_method)) {
     set_msg("disallowed by ciReplay");
     return true;
@@ -332,9 +306,7 @@
                                int caller_bci, JVMState* jvms, ciCallProfile& profile,
                                WarmCallInfo* wci_result, bool& should_delay) {
 
-   // Old algorithm had funny accumulating BC-size counters
-  if (UseOldInlining && ClipInlining
-      && (int)count_inline_bcs() >= DesiredMethodLimit) {
+  if (ClipInlining && (int)count_inline_bcs() >= DesiredMethodLimit) {
     if (!callee_method->force_inline() || !IncrementalInline) {
       set_msg("size > DesiredMethodLimit");
       return false;
@@ -343,6 +315,7 @@
     }
   }
 
+  _forced_inline = false; // Reset
   if (!should_inline(callee_method, caller_method, caller_bci, profile,
                      wci_result)) {
     return false;
@@ -373,10 +346,10 @@
 
     if ((!UseInterpreter || CompileTheWorld) &&
         is_init_with_ea(callee_method, caller_method, C)) {
-
       // Escape Analysis stress testing when running Xcomp or CTW:
       // inline constructors even if they are not reached.
-
+    } else if (forced_inline()) {
+      // Inlining was forced by CompilerOracle or ciReplay
     } else if (profile.count() == 0) {
       // don't inline unreached call sites
        set_msg("call site not reached");
@@ -436,8 +409,7 @@
 
   int size = callee_method->code_size_for_inlining();
 
-  if (UseOldInlining && ClipInlining
-      && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
+  if (ClipInlining && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
     if (!callee_method->force_inline() || !IncrementalInline) {
       set_msg("size > DesiredMethodLimit");
       return false;
@@ -555,8 +527,7 @@
                                jvms, profile, &wci, should_delay);
 
 #ifndef PRODUCT
-  if (UseOldInlining && InlineWarmCalls
-      && (PrintOpto || C->print_inlining())) {
+  if (InlineWarmCalls && (PrintOpto || C->print_inlining())) {
     bool cold = wci.is_cold();
     bool hot  = !cold && wci.is_hot();
     bool old_cold = !success;
@@ -570,13 +541,12 @@
     }
   }
 #endif
-  if (UseOldInlining) {
-    if (success) {
-      wci = *(WarmCallInfo::always_hot());
-    } else {
-      wci = *(WarmCallInfo::always_cold());
-    }
+  if (success) {
+    wci = *(WarmCallInfo::always_hot());
+  } else {
+    wci = *(WarmCallInfo::always_cold());
   }
+
   if (!InlineWarmCalls) {
     if (!wci.is_cold() && !wci.is_hot()) {
       // Do not inline the warm calls.
@@ -590,8 +560,7 @@
       set_msg("inline (hot)");
     }
     print_inlining(callee_method, caller_bci, true /* success */);
-    if (UseOldInlining)
-      build_inline_tree_for_callee(callee_method, jvms, caller_bci);
+    build_inline_tree_for_callee(callee_method, jvms, caller_bci);
     if (InlineWarmCalls && !wci.is_hot())
       return new (C) WarmCallInfo(wci);  // copy to heap
     return WarmCallInfo::always_hot();
@@ -700,12 +669,28 @@
   return iltp;
 }
 
+// Count number of nodes in this subtree
+int InlineTree::count() const {
+  int result = 1;
+  for (int i = 0 ; i < _subtrees.length(); i++) {
+    result += _subtrees.at(i)->count();
+  }
+  return result;
+}
+
+void InlineTree::dump_replay_data(outputStream* out) {
+  out->print(" %d %d ", inline_level(), caller_bci());
+  method()->dump_name_as_ascii(out);
+  for (int i = 0 ; i < _subtrees.length(); i++) {
+    _subtrees.at(i)->dump_replay_data(out);
+  }
+}
 
 
 #ifndef PRODUCT
 void InlineTree::print_impl(outputStream* st, int indent) const {
   for (int i = 0; i < indent; i++) st->print(" ");
-  st->print(" @ %d ", caller_bci());
+  st->print(" @ %d", caller_bci());
   method()->print_short_name(st);
   st->cr();
 
--- a/src/share/vm/opto/c2_globals.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/c2_globals.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -35,6 +35,9 @@
 #ifdef TARGET_ARCH_arm
 # include "c2_globals_arm.hpp"
 #endif
+#ifdef TARGET_ARCH_ppc
+# include "c2_globals_ppc.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_linux
 # include "c2_globals_linux.hpp"
 #endif
@@ -44,6 +47,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "c2_globals_windows.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "c2_globals_aix.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "c2_globals_bsd.hpp"
 #endif
@@ -223,7 +229,8 @@
   diagnostic(bool, UnrollLimitCheck, true,                                  \
           "Additional overflow checks during loop unroll")                  \
                                                                             \
-  product(bool, OptimizeFill, true,                                         \
+  /* OptimizeFill not yet supported on PowerPC. */                          \
+  product(bool, OptimizeFill, true PPC64_ONLY(&& false),                    \
           "convert fill/copy loops into intrinsic")                         \
                                                                             \
   develop(bool, TraceOptimizeFill, false,                                   \
@@ -350,9 +357,6 @@
           "File to dump ideal graph to.  If set overrides the "             \
           "use of the network")                                             \
                                                                             \
-  product(bool, UseOldInlining, true,                                       \
-          "Enable the 1.3 inlining strategy")                               \
-                                                                            \
   product(bool, UseBimorphicInlining, true,                                 \
           "Profiling based inlining for two receivers")                     \
                                                                             \
@@ -457,6 +461,9 @@
   experimental(bool, AggressiveUnboxing, false,                             \
           "Control optimizations for aggressive boxing elimination")        \
                                                                             \
+  develop(bool, TracePostallocExpand, false, "Trace expanding nodes after"  \
+          " register allocation.")                                          \
+                                                                            \
   product(bool, DoEscapeAnalysis, true,                                     \
           "Perform escape analysis")                                        \
                                                                             \
@@ -637,14 +644,22 @@
   diagnostic(bool, OptimizeExpensiveOps, true,                              \
           "Find best control for expensive operations")                     \
                                                                             \
-  experimental(bool, UseMathExactIntrinsics, false,                         \
+  product(bool, UseMathExactIntrinsics, true,                               \
           "Enables intrinsification of various java.lang.Math functions")   \
                                                                             \
   experimental(bool, ReplaceInParentMaps, false,                            \
           "Propagate type improvements in callers of inlinee if possible")  \
                                                                             \
   experimental(bool, UseTypeSpeculation, false,                             \
-          "Speculatively propagate types from profiles")
+          "Speculatively propagate types from profiles")                    \
+                                                                            \
+  diagnostic(bool, UseInlineDepthForSpeculativeTypes, true,                 \
+          "Carry inline depth of profile point with speculative type "      \
+          "and give priority to profiling from lower inline depth")         \
+                                                                            \
+  product_pd(bool, TrapBasedRangeChecks,                                    \
+          "Generate code for range checks that uses a cmp and trap "        \
+          "instruction raising SIGTRAP. Used on PPC64.")                    \
 
 C2_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG)
 
--- a/src/share/vm/opto/c2compiler.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/c2compiler.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,8 +40,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 // register information defined by ADLC
@@ -111,7 +114,7 @@
   assert(is_initialized(), "Compiler thread must be initialized");
 
   bool subsume_loads = SubsumeLoads;
-  bool do_escape_analysis = DoEscapeAnalysis && !env->jvmti_can_access_local_variables();
+  bool do_escape_analysis = DoEscapeAnalysis && !env->should_retain_local_variables();
   bool eliminate_boxing = EliminateAutoBox;
   while (!env->failing()) {
     // Attempt to compile while subsuming loads into machine instructions.
--- a/src/share/vm/opto/c2compiler.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/c2compiler.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,10 +37,6 @@
   // Name
   const char *name() { return "C2"; }
 
-#ifdef TIERED
-  virtual bool is_c2() { return true; };
-#endif // TIERED
-
   void initialize();
 
   // Compilation entry point for methods
--- a/src/share/vm/opto/callGenerator.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/callGenerator.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -201,7 +201,7 @@
   // Block::implicit_null_check() only looks for loads and stores, not calls.
   ciMethod *caller = kit.method();
   ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data();
-  if (!UseInlineCaches || !ImplicitNullChecks ||
+  if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
        ((ImplicitNullCheckThreshold > 0) && caller_md &&
        (caller_md->trap_count(Deoptimization::Reason_null_check)
        >= (uint)ImplicitNullCheckThreshold))) {
@@ -722,7 +722,7 @@
     Node* m = kit.map()->in(i);
     Node* n = slow_map->in(i);
     if (m != n) {
-      const Type* t = gvn.type(m)->meet(gvn.type(n));
+      const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
       Node* phi = PhiNode::make(region, m, t);
       phi->set_req(2, n);
       kit.map()->set_req(i, gvn.transform(phi));
@@ -975,7 +975,7 @@
     Node* m = kit.map()->in(i);
     Node* n = slow_map->in(i);
     if (m != n) {
-      const Type* t = gvn.type(m)->meet(gvn.type(n));
+      const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
       Node* phi = PhiNode::make(region, m, t);
       phi->set_req(2, n);
       kit.map()->set_req(i, gvn.transform(phi));
--- a/src/share/vm/opto/callnode.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/callnode.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -342,7 +342,7 @@
       st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con());
       break;
     case Type::AnyPtr:
-      assert( t == TypePtr::NULL_PTR, "" );
+      assert( t == TypePtr::NULL_PTR || n->in_dump(), "" );
       st->print(" %s%d]=#NULL",msg,i);
       break;
     case Type::AryPtr:
@@ -595,6 +595,18 @@
   }
 }
 
+// Adapt offsets in in-array after adding or removing an edge.
+// Prerequisite is that the JVMState is used by only one node.
+void JVMState::adapt_position(int delta) {
+  for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) {
+    jvms->set_locoff(jvms->locoff() + delta);
+    jvms->set_stkoff(jvms->stkoff() + delta);
+    jvms->set_monoff(jvms->monoff() + delta);
+    jvms->set_scloff(jvms->scloff() + delta);
+    jvms->set_endoff(jvms->endoff() + delta);
+  }
+}
+
 //=============================================================================
 uint CallNode::cmp( const Node &n ) const
 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
@@ -887,7 +899,7 @@
   if (!(call->req() > TypeFunc::Parms &&
         call->in(TypeFunc::Parms) != NULL &&
         call->in(TypeFunc::Parms)->is_Con())) {
-    assert(_in_dump_cnt != 0, "OK if dumping");
+    assert(in_dump() != 0, "OK if dumping");
     tty->print("[bad uncommon trap]");
     return 0;
   }
--- a/src/share/vm/opto/callnode.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/callnode.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -299,6 +299,7 @@
   JVMState* clone_deep(Compile* C) const;    // recursively clones caller chain
   JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
   void      set_map_deep(SafePointNode *map);// reset map for all callers
+  void      adapt_position(int delta);       // Adapt offsets in in-array after adding an edge.
 
 #ifndef PRODUCT
   void      format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
@@ -559,9 +560,15 @@
   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
   // for some macro nodes whose expansion does not have a safepoint on the fast path.
   virtual bool        guaranteed_safepoint()  { return true; }
-  // For macro nodes, the JVMState gets modified during expansion, so when cloning
-  // the node the JVMState must be cloned.
-  virtual void        clone_jvms(Compile* C) { }   // default is not to clone
+  // For macro nodes, the JVMState gets modified during expansion. If calls
+  // use MachConstantBase, it gets modified during matching. So when cloning
+  // the node the JVMState must be cloned. Default is not to clone.
+  virtual void clone_jvms(Compile* C) {
+    if (C->needs_clone_jvms() && jvms() != NULL) {
+      set_jvms(jvms()->clone_deep(C));
+      jvms()->set_map_deep(this);
+    }
+  }
 
   // Returns true if the call may modify n
   virtual bool        may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
--- a/src/share/vm/opto/cfgnode.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/cfgnode.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -951,7 +951,7 @@
         if (is_intf != ti_is_intf)
           { t = _type; break; }
       }
-      t = t->meet(ti);
+      t = t->meet_speculative(ti);
     }
   }
 
@@ -968,11 +968,11 @@
   //
   // It is not possible to see Type::BOTTOM values as phi inputs,
   // because the ciTypeFlow pre-pass produces verifier-quality types.
-  const Type* ft = t->filter(_type);  // Worst case type
+  const Type* ft = t->filter_speculative(_type);  // Worst case type
 
 #ifdef ASSERT
   // The following logic has been moved into TypeOopPtr::filter.
-  const Type* jt = t->join(_type);
+  const Type* jt = t->join_speculative(_type);
   if( jt->empty() ) {           // Emptied out???
 
     // Check for evil case of 't' being a class and '_type' expecting an
@@ -1018,7 +1018,7 @@
           !jtkp->klass_is_exact() && // Keep exact interface klass (6894807)
           ttkp->is_loaded() && !ttkp->klass()->is_interface() ) {
         assert(ft == ttkp->cast_to_ptr_type(jtkp->ptr()) ||
-               ft->isa_narrowoop() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), "");
+               ft->isa_narrowklass() && ft->make_ptr() == ttkp->cast_to_ptr_type(jtkp->ptr()), "");
         jt = ft;
       }
     }
@@ -1757,7 +1757,7 @@
           break;
         }
         // Accumulate type for resulting Phi
-        type = type->meet(in(i)->in(AddPNode::Base)->bottom_type());
+        type = type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type());
       }
       Node* base = NULL;
       if (doit) {
--- a/src/share/vm/opto/chaitin.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/chaitin.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -210,7 +210,7 @@
 {
   NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); )
 
-  _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
+  _high_frequency_lrg = MIN2(double(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
 
   // Build a list of basic blocks, sorted by frequency
   _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
@@ -761,7 +761,7 @@
         // processes as vector in RA.
         if (RegMask::is_vector(ireg))
           lrg._is_vector = 1;
-        assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD,
+        assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD || ireg == Op_RegL,
                "vector must be in vector registers");
 
         // Check for bound register masks
@@ -961,7 +961,7 @@
         int kreg = n->in(k)->ideal_reg();
         bool is_vect = RegMask::is_vector(kreg);
         assert(n->in(k)->bottom_type()->isa_vect() == NULL ||
-               is_vect || kreg == Op_RegD,
+               is_vect || kreg == Op_RegD || kreg == Op_RegL,
                "vector must be in vector registers");
         if (lrgmask.is_bound(kreg))
           lrg._is_bound = 1;
@@ -1682,9 +1682,21 @@
       // (where top() node is placed).
       base->init_req(0, _cfg.get_root_node());
       Block *startb = _cfg.get_block_for_node(C->top());
-      startb->insert_node(base, startb->find_node(C->top()));
+      uint node_pos = startb->find_node(C->top());
+      startb->insert_node(base, node_pos);
       _cfg.map_node_to_block(base, startb);
       assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
+
+      // The loadConP0 might have projection nodes depending on architecture
+      // Add the projection nodes to the CFG
+      for (DUIterator_Fast imax, i = base->fast_outs(imax); i < imax; i++) {
+        Node* use = base->fast_out(i);
+        if (use->is_MachProj()) {
+          startb->insert_node(use, ++node_pos);
+          _cfg.map_node_to_block(use, startb);
+          new_lrg(use, maxlrg++);
+        }
+      }
     }
     if (_lrg_map.live_range_id(base) == 0) {
       new_lrg(base, maxlrg++);
@@ -1787,7 +1799,7 @@
           Block *phi_block = _cfg.get_block_for_node(phi);
           if (_cfg.get_block_for_node(phi_block->pred(2)) == block) {
             const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
-            Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
+            Node *spill = new (C) MachSpillCopyNode(MachSpillCopyNode::LoopPhiInput, phi, *mask, *mask);
             insert_proj( phi_block, 1, spill, maxlrg++ );
             n->set_req(1,spill);
             must_recompute_live = true;
--- a/src/share/vm/opto/chaitin.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/chaitin.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,10 +34,9 @@
 #include "opto/phase.hpp"
 #include "opto/regalloc.hpp"
 #include "opto/regmask.hpp"
+#include "opto/machnode.hpp"
 
 class LoopTree;
-class MachCallNode;
-class MachSafePointNode;
 class Matcher;
 class PhaseCFG;
 class PhaseLive;
@@ -98,6 +97,12 @@
   }
   // Compute the degree between 2 live ranges
   int compute_degree( LRG &l ) const;
+  bool mask_is_nonempty_and_up() const {
+    return mask().is_UP() && mask_size();
+  }
+  bool is_float_or_vector() const {
+    return _is_float || _is_vector;
+  }
 
 private:
   RegMask _mask;                // Allowed registers for this LRG
@@ -129,6 +134,7 @@
   void SUBTRACT( const RegMask &rm ) { _mask.SUBTRACT(rm); debug_only(_msize_valid=0;)}
   void Clear()   { _mask.Clear()  ; debug_only(_msize_valid=1); _mask_size = 0; }
   void Set_All() { _mask.Set_All(); debug_only(_msize_valid=1); _mask_size = RegMask::CHUNK_SIZE; }
+
   void Insert( OptoReg::Name reg ) { _mask.Insert(reg);  debug_only(_msize_valid=0;) }
   void Remove( OptoReg::Name reg ) { _mask.Remove(reg);  debug_only(_msize_valid=0;) }
   void clear_to_pairs() { _mask.clear_to_pairs(); debug_only(_msize_valid=0;) }
@@ -417,8 +423,8 @@
   uint _simplified;             // Linked list head of simplified LRGs
 
   // Helper functions for Split()
-  uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
-  uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
+  uint split_DEF(Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
+  uint split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
 
   //------------------------------clone_projs------------------------------------
   // After cloning some rematerialized instruction, clone any MachProj's that
@@ -440,7 +446,7 @@
                             int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru);
   // True if lidx is used before any real register is def'd in the block
   bool prompt_use( Block *b, uint lidx );
-  Node *get_spillcopy_wide( Node *def, Node *use, uint uidx );
+  Node *get_spillcopy_wide(MachSpillCopyNode::SpillType spill_type, Node *def, Node *use, uint uidx );
   // Insert the spill at chosen location.  Skip over any intervening Proj's or
   // Phis.  Skip over a CatchNode and projs, inserting in the fall-through block
   // instead.  Update high-pressure indices.  Create a new live range.
@@ -483,15 +489,113 @@
   // Same as _ifg->add_vector(reg,live) EXCEPT use the RegMask
   // information to trim the set of interferences.  Return the
   // count of edges added.
-  void interfere_with_live( uint reg, IndexSet *live );
+  void interfere_with_live(uint lid, IndexSet* liveout);
+#ifdef ASSERT
   // Count register pressure for asserts
-  uint count_int_pressure( IndexSet *liveout );
-  uint count_float_pressure( IndexSet *liveout );
+  uint count_int_pressure(IndexSet* liveout);
+  uint count_float_pressure(IndexSet* liveout);
+#endif
 
   // Build the interference graph using virtual registers only.
   // Used for aggressive coalescing.
   void build_ifg_virtual( );
 
+  // used when computing the register pressure for each block in the CFG. This
+  // is done during IFG creation.
+  class Pressure {
+      // keeps track of the register pressure at the current
+      // instruction (used when stepping backwards in the block)
+      uint _current_pressure;
+
+      // keeps track of the instruction index of the first low to high register pressure
+      // transition (starting from the top) in the block
+      // if high_pressure_index == 0 then the whole block is high pressure
+      // if high_pressure_index = b.end_idx() + 1 then the whole block is low pressure
+      uint _high_pressure_index;
+
+      // stores the highest pressure we find
+      uint _final_pressure;
+
+      // number of live ranges that constitute high register pressure
+      const uint _high_pressure_limit;
+    public:
+
+      // lower the register pressure and look for a low to high pressure
+      // transition
+      void lower(LRG& lrg, uint& location) {
+        _current_pressure -= lrg.reg_pressure();
+        if (_current_pressure == _high_pressure_limit) {
+          _high_pressure_index = location;
+        }
+      }
+
+      // raise the pressure and store the pressure if it's the biggest
+      // pressure so far
+      void raise(LRG &lrg) {
+        _current_pressure += lrg.reg_pressure();
+        if (_current_pressure > _final_pressure) {
+          _final_pressure = _current_pressure;
+        }
+      }
+
+      uint high_pressure_index() const {
+        return _high_pressure_index;
+      }
+
+      uint final_pressure() const {
+        return _final_pressure;
+      }
+
+      uint current_pressure() const {
+        return _current_pressure;
+      }
+
+      uint high_pressure_limit() const {
+        return _high_pressure_limit;
+      }
+
+      void lower_high_pressure_index() {
+        _high_pressure_index--;
+      }
+
+      void set_high_pressure_index_to_block_start() {
+        _high_pressure_index = 0;
+      }
+
+      void check_pressure_at_fatproj(uint fatproj_location, RegMask& fatproj_mask) {
+        // this pressure is only valid at this instruction, i.e. we don't need to lower
+        // the register pressure since the fat proj was never live before (going backwards)
+        uint new_pressure = current_pressure() + fatproj_mask.Size();
+        if (new_pressure > final_pressure()) {
+          _final_pressure = new_pressure;
+        }
+
+        // if we were at a low pressure and now and the fat proj is at high pressure, record the fat proj location
+        // as coming from a low to high (to low again)
+        if (current_pressure() <= high_pressure_limit() && new_pressure > high_pressure_limit()) {
+          _high_pressure_index = fatproj_location;
+        }
+      }
+
+      Pressure(uint high_pressure_index, uint high_pressure_limit)
+      : _current_pressure(0)
+      , _high_pressure_index(high_pressure_index)
+      , _high_pressure_limit(high_pressure_limit)
+      , _final_pressure(0) {}
+  };
+
+  void lower_pressure(Block* b, uint location, LRG& lrg, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure);
+  void raise_pressure(Block* b, LRG& lrg, Pressure& int_pressure, Pressure& float_pressure);
+  void check_for_high_pressure_transition_at_fatproj(uint& block_reg_pressure, uint location, LRG& lrg, Pressure& pressure, const int op_regtype);
+  void add_input_to_liveout(Block* b, Node* n, IndexSet* liveout, double cost, Pressure& int_pressure, Pressure& float_pressure);
+  void compute_initial_block_pressure(Block* b, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure, double cost);
+  bool remove_node_if_not_used(Block* b, uint location, Node* n, uint lid, IndexSet* liveout);
+  void assign_high_score_to_immediate_copies(Block* b, Node* n, LRG& lrg, uint next_inst, uint last_inst);
+  void remove_interference_from_copy(Block* b, uint location, uint lid_copy, IndexSet* liveout, double cost, Pressure& int_pressure, Pressure& float_pressure);
+  void remove_bound_register_from_interfering_live_ranges(LRG& lrg, IndexSet* liveout, uint& must_spill);
+  void check_for_high_pressure_block(Pressure& pressure);
+  void adjust_high_pressure_index(Block* b, uint& hrp_index, Pressure& pressure);
+
   // Build the interference graph using physical registers when available.
   // That is, if 2 live ranges are simultaneously alive but in their
   // acceptable register sets do not overlap, then they do not interfere.
@@ -554,7 +658,7 @@
   // Replace the old node with the current live version of that value
   // and yank the old value if it's dead.
   int replace_and_yank_if_dead( Node *old, OptoReg::Name nreg,
-                                Block *current_block, Node_List& value, Node_List& regnd ) {
+      Block *current_block, Node_List& value, Node_List& regnd ) {
     Node* v = regnd[nreg];
     assert(v->outcnt() != 0, "no dead values");
     old->replace_by(v);
@@ -565,7 +669,7 @@
     return yank_if_dead_recurse(old, old, current_block, value, regnd);
   }
   int yank_if_dead_recurse(Node *old, Node *orig_old, Block *current_block,
-                           Node_List *value, Node_List *regnd);
+      Node_List *value, Node_List *regnd);
   int yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd );
   int elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List &regnd, bool can_change_regs );
   int use_prior_register( Node *copy, uint idx, Node *def, Block *current_block, Node_List &value, Node_List &regnd );
@@ -573,8 +677,8 @@
 
   // If nreg already contains the same constant as val then eliminate it
   bool eliminate_copy_of_constant(Node* val, Node* n,
-                                  Block *current_block, Node_List& value, Node_List &regnd,
-                                  OptoReg::Name nreg, OptoReg::Name nreg2);
+      Block *current_block, Node_List& value, Node_List &regnd,
+      OptoReg::Name nreg, OptoReg::Name nreg2);
   // Extend the node to LRG mapping
   void add_reference( const Node *node, const Node *old_node);
 
--- a/src/share/vm/opto/classes.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/classes.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/classes.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/classes.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,8 +29,6 @@
 macro(AbsF)
 macro(AbsI)
 macro(AddD)
-macro(AddExactI)
-macro(AddExactL)
 macro(AddF)
 macro(AddI)
 macro(AddL)
@@ -135,7 +133,6 @@
 macro(ExpD)
 macro(FastLock)
 macro(FastUnlock)
-macro(FlagsProj)
 macro(Goto)
 macro(Halt)
 macro(If)
@@ -170,14 +167,13 @@
 macro(LoopLimit)
 macro(Mach)
 macro(MachProj)
-macro(MathExact)
-macro(MathExactI)
-macro(MathExactL)
 macro(MaxI)
 macro(MemBarAcquire)
+macro(LoadFence)
 macro(MemBarAcquireLock)
 macro(MemBarCPUOrder)
 macro(MemBarRelease)
+macro(StoreFence)
 macro(MemBarReleaseLock)
 macro(MemBarVolatile)
 macro(MemBarStoreStore)
@@ -192,22 +188,24 @@
 macro(MoveL2D)
 macro(MoveD2L)
 macro(MulD)
-macro(MulExactI)
-macro(MulExactL)
 macro(MulF)
 macro(MulHiL)
 macro(MulI)
 macro(MulL)
 macro(Multi)
 macro(NegD)
-macro(NegExactI)
-macro(NegExactL)
 macro(NegF)
 macro(NeverBranch)
 macro(Opaque1)
 macro(Opaque2)
 macro(OrI)
 macro(OrL)
+macro(OverflowAddI)
+macro(OverflowSubI)
+macro(OverflowMulI)
+macro(OverflowAddL)
+macro(OverflowSubL)
+macro(OverflowMulL)
 macro(PCTable)
 macro(Parm)
 macro(PartialSubtypeCheck)
@@ -251,8 +249,6 @@
 macro(StrEquals)
 macro(StrIndexOf)
 macro(SubD)
-macro(SubExactI)
-macro(SubExactL)
 macro(SubF)
 macro(SubI)
 macro(SubL)
--- a/src/share/vm/opto/coalesce.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/coalesce.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -291,7 +291,7 @@
               _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map);
             } else {
               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
-              copy = new (C) MachSpillCopyNode(m, *rm, *rm);
+              copy = new (C) MachSpillCopyNode(MachSpillCopyNode::PhiInput, m, *rm, *rm);
               // Find a good place to insert.  Kinda tricky, use a subroutine
               insert_copy_with_overlap(pred,copy,phi_name,src_name);
             }
@@ -325,7 +325,7 @@
               l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
             } else {
               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
-              copy = new (C) MachSpillCopyNode(m, *rm, *rm);
+              copy = new (C) MachSpillCopyNode(MachSpillCopyNode::TwoAddress, m, *rm, *rm);
               // Insert the copy in the basic block, just before us
               b->insert_node(copy, l++);
             }
@@ -372,7 +372,7 @@
                 continue;     // Live out; do not pre-split
               // Split the lrg at this use
               const RegMask *rm = C->matcher()->idealreg2spillmask[inp->ideal_reg()];
-              Node *copy = new (C) MachSpillCopyNode( inp, *rm, *rm );
+              Node* copy = new (C) MachSpillCopyNode(MachSpillCopyNode::DebugUse, inp, *rm, *rm);
               // Insert the copy in the use-def chain
               n->set_req(inpidx, copy );
               // Insert the copy in the basic block, just before us
--- a/src/share/vm/opto/coalesce.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/coalesce.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/compile.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/compile.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "ci/ciReplay.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/exceptionHandlerTable.hpp"
 #include "code/nmethod.hpp"
@@ -81,8 +82,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 
@@ -644,9 +648,11 @@
                   _dead_node_count(0),
 #ifndef PRODUCT
                   _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
+                  _in_dump_cnt(0),
                   _printer(IdealGraphPrinter::printer()),
 #endif
                   _congraph(NULL),
+                  _replay_inline_data(NULL),
                   _late_inlines(comp_arena(), 2, 0, NULL),
                   _string_late_inlines(comp_arena(), 2, 0, NULL),
                   _boxing_late_inlines(comp_arena(), 2, 0, NULL),
@@ -680,6 +686,10 @@
   }
   set_print_assembly(print_opto_assembly);
   set_parsed_irreducible_loop(false);
+
+  if (method()->has_option("ReplayInline")) {
+    _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level());
+  }
 #endif
   set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
   set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
@@ -695,10 +705,7 @@
 
   print_compile_messages();
 
-  if (UseOldInlining || PrintCompilation NOT_PRODUCT( || PrintOpto) )
-    _ilt = InlineTree::build_inline_tree_root();
-  else
-    _ilt = NULL;
+  _ilt = InlineTree::build_inline_tree_root();
 
   // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
   assert(num_alias_types() >= AliasIdxRaw, "");
@@ -849,6 +856,15 @@
 #endif
 
   NOT_PRODUCT( verify_barriers(); )
+
+  // Dump compilation data to replay it.
+  if (method()->has_option("DumpReplay")) {
+    env()->dump_replay_data(_compile_id);
+  }
+  if (method()->has_option("DumpInline") && (ilt() != NULL)) {
+    env()->dump_inline_data(_compile_id);
+  }
+
   // Now that we know the size of all the monitors we can add a fixed slot
   // for the original deopt pc.
 
@@ -856,6 +872,10 @@
   int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size);
   set_fixed_slots(next_slot);
 
+  // Compute when to use implicit null checks. Used by matching trap based
+  // nodes and NullCheck optimization.
+  set_allowed_deopt_reasons();
+
   // Now generate code
   Code_Gen();
   if (failing())  return;
@@ -933,17 +953,20 @@
     _inner_loops(0),
 #ifndef PRODUCT
     _trace_opto_output(TraceOptoOutput),
+    _in_dump_cnt(0),
     _printer(NULL),
 #endif
     _dead_node_list(comp_arena()),
     _dead_node_count(0),
     _congraph(NULL),
+    _replay_inline_data(NULL),
     _number_of_mh_late_inlines(0),
     _inlining_progress(false),
     _inlining_incrementally(false),
     _print_inlining_list(NULL),
     _print_inlining_idx(0),
-    _preserve_jvm_state(0) {
+    _preserve_jvm_state(0),
+    _allowed_reasons(0) {
   C = this;
 
 #ifndef PRODUCT
@@ -2248,6 +2271,12 @@
     peep.do_transform();
   }
 
+  // Do late expand if CPU requires this.
+  if (Matcher::require_postalloc_expand) {
+    NOT_PRODUCT(TracePhase t2c("postalloc_expand", &_t_postalloc_expand, true));
+    cfg.postalloc_expand(_regalloc);
+  }
+
   // Convert Nodes to instruction bits in a buffer
   {
     // %%%% workspace merge brought two timers together for one job
@@ -2999,42 +3028,6 @@
       n->set_req(MemBarNode::Precedent, top());
     }
     break;
-    // Must set a control edge on all nodes that produce a FlagsProj
-    // so they can't escape the block that consumes the flags.
-    // Must also set the non throwing branch as the control
-    // for all nodes that depends on the result. Unless the node
-    // already have a control that isn't the control of the
-    // flag producer
-  case Op_FlagsProj:
-    {
-      MathExactNode* math = (MathExactNode*)  n->in(0);
-      Node* ctrl = math->control_node();
-      Node* non_throwing = math->non_throwing_branch();
-      math->set_req(0, ctrl);
-
-      Node* result = math->result_node();
-      if (result != NULL) {
-        for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
-          Node* out = result->fast_out(j);
-          // Phi nodes shouldn't be moved. They would only match below if they
-          // had the same control as the MathExactNode. The only time that
-          // would happen is if the Phi is also an input to the MathExact
-          //
-          // Cmp nodes shouldn't have control set at all.
-          if (out->is_Phi() ||
-              out->is_Cmp()) {
-            continue;
-          }
-
-          if (out->in(0) == NULL) {
-            out->set_req(0, non_throwing);
-          } else if (out->in(0) == ctrl) {
-            out->set_req(0, non_throwing);
-          }
-        }
-      }
-    }
-    break;
   default:
     assert( !n->is_Call(), "" );
     assert( !n->is_Mem(), "" );
@@ -3256,7 +3249,8 @@
     // because of a transient condition during start-up in the interpreter.
     return false;
   }
-  if (md->has_trap_at(bci, reason) != 0) {
+  ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
+  if (md->has_trap_at(bci, m, reason) != 0) {
     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
     // Also, if there are multiple reasons, or if there is no per-BCI record,
     // assume the worst.
@@ -3274,7 +3268,7 @@
 // Less-accurate variant which does not require a method and bci.
 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
                              ciMethodData* logmd) {
- if (trap_count(reason) >= (uint)PerMethodTrapLimit) {
+  if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
     // Too many traps globally.
     // Note that we use cumulative trap_count, not just md->trap_count.
     if (log()) {
@@ -3309,10 +3303,11 @@
   uint m_cutoff  = (uint) PerMethodRecompilationCutoff / 2 + 1;  // not zero
   Deoptimization::DeoptReason per_bc_reason
     = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
+  ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
   if ((per_bc_reason == Deoptimization::Reason_none
-       || md->has_trap_at(bci, reason) != 0)
+       || md->has_trap_at(bci, m, reason) != 0)
       // The trap frequency measure we care about is the recompile count:
-      && md->trap_recompiled_at(bci)
+      && md->trap_recompiled_at(bci, m)
       && md->overflow_recompile_count() >= bc_cutoff) {
     // Do not emit a trap here if it has already caused recompilations.
     // Also, if there are multiple reasons, or if there is no per-BCI record,
@@ -3339,6 +3334,19 @@
   }
 }
 
+// Compute when not to trap. Used by matching trap based nodes and
+// NullCheck optimization.
+void Compile::set_allowed_deopt_reasons() {
+  _allowed_reasons = 0;
+  if (is_method_compilation()) {
+    for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) {
+      assert(rs < BitsPerInt, "recode bit map");
+      if (!too_many_traps((Deoptimization::DeoptReason) rs)) {
+        _allowed_reasons |= nth_bit(rs);
+      }
+    }
+  }
+}
 
 #ifndef PRODUCT
 //------------------------------verify_graph_edges---------------------------
@@ -3757,6 +3765,16 @@
   }
 }
 
+// Dump inlining replay data to the stream.
+// Don't change thread state and acquire any locks.
+void Compile::dump_inline_data(outputStream* out) {
+  InlineTree* inl_tree = ilt();
+  if (inl_tree != NULL) {
+    out->print(" inline %d", inl_tree->count());
+    inl_tree->dump_replay_data(out);
+  }
+}
+
 int Compile::cmp_expensive_nodes(Node* n1, Node* n2) {
   if (n1->Opcode() < n2->Opcode())      return -1;
   else if (n1->Opcode() > n2->Opcode()) return 1;
@@ -3893,16 +3911,18 @@
     // which may optimize it out.
     for (uint next = 0; next < worklist.size(); ++next) {
       Node *n  = worklist.at(next);
-      if (n->is_Type() && n->as_Type()->type()->isa_oopptr() != NULL &&
-          n->as_Type()->type()->is_oopptr()->speculative() != NULL) {
+      if (n->is_Type()) {
         TypeNode* tn = n->as_Type();
-        const TypeOopPtr* t = tn->type()->is_oopptr();
-        bool in_hash = igvn.hash_delete(n);
-        assert(in_hash, "node should be in igvn hash table");
-        tn->set_type(t->remove_speculative());
-        igvn.hash_insert(n);
-        igvn._worklist.push(n); // give it a chance to go away
-        modified++;
+        const Type* t = tn->type();
+        const Type* t_no_spec = t->remove_speculative();
+        if (t_no_spec != t) {
+          bool in_hash = igvn.hash_delete(n);
+          assert(in_hash, "node should be in igvn hash table");
+          tn->set_type(t_no_spec);
+          igvn.hash_insert(n);
+          igvn._worklist.push(n); // give it a chance to go away
+          modified++;
+        }
       }
       uint max = n->len();
       for( uint i = 0; i < max; ++i ) {
@@ -3916,6 +3936,27 @@
     if (modified > 0) {
       igvn.optimize();
     }
+#ifdef ASSERT
+    // Verify that after the IGVN is over no speculative type has resurfaced
+    worklist.clear();
+    worklist.push(root());
+    for (uint next = 0; next < worklist.size(); ++next) {
+      Node *n  = worklist.at(next);
+      const Type* t = igvn.type(n);
+      assert(t == t->remove_speculative(), "no more speculative types");
+      if (n->is_Type()) {
+        t = n->as_Type()->type();
+        assert(t == t->remove_speculative(), "no more speculative types");
+      }
+      uint max = n->len();
+      for( uint i = 0; i < max; ++i ) {
+        Node *m = n->in(i);
+        if (not_a_node(m))  continue;
+        worklist.push(m);
+      }
+    }
+    igvn.check_no_speculative_types();
+#endif
   }
 }
 
--- a/src/share/vm/opto/compile.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/compile.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -431,6 +431,8 @@
   // Are we within a PreserveJVMState block?
   int _preserve_jvm_state;
 
+  void* _replay_inline_data; // Pointer to data loaded from file
+
  public:
 
   outputStream* print_inlining_stream() const {
@@ -465,6 +467,11 @@
     print_inlining_stream()->print(ss.as_string());
   }
 
+  void* replay_inline_data() const { return _replay_inline_data; }
+
+  // Dump inlining replay data to the stream.
+  void dump_inline_data(outputStream* out);
+
  private:
   // Matching, CFG layout, allocation, code generation
   PhaseCFG*             _cfg;                   // Results of CFG finding
@@ -592,6 +599,7 @@
   bool          trace_opto_output() const       { return _trace_opto_output; }
   bool              parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
   void          set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
+  int _in_dump_cnt;  // Required for dumping ir nodes.
 #endif
 
   // JSR 292
@@ -757,6 +765,8 @@
 
   MachConstantBaseNode*     mach_constant_base_node();
   bool                  has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; }
+  // Generated by adlc, true if CallNode requires MachConstantBase.
+  bool                      needs_clone_jvms();
 
   // Handy undefined Node
   Node*             top() const                 { return _top; }
@@ -853,6 +863,11 @@
                       ciMethodData* logmd = NULL);
   // Report if there were too many recompiles at a method and bci.
   bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
+  // Return a bitset with the reasons where deoptimization is allowed,
+  // i.e., where there were not too many uncommon traps.
+  int _allowed_reasons;
+  int      allowed_deopt_reasons() { return _allowed_reasons; }
+  void set_allowed_deopt_reasons();
 
   // Parsing, optimization
   PhaseGVN*         initial_gvn()               { return _initial_gvn; }
--- a/src/share/vm/opto/connode.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/connode.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -188,7 +188,7 @@
 const Type *CMoveNode::Value( PhaseTransform *phase ) const {
   if( phase->type(in(Condition)) == Type::TOP )
     return Type::TOP;
-  return phase->type(in(IfFalse))->meet(phase->type(in(IfTrue)));
+  return phase->type(in(IfFalse))->meet_speculative(phase->type(in(IfTrue)));
 }
 
 //------------------------------make-------------------------------------------
@@ -392,14 +392,14 @@
 //=============================================================================
 // If input is already higher or equal to cast type, then this is an identity.
 Node *ConstraintCastNode::Identity( PhaseTransform *phase ) {
-  return phase->type(in(1))->higher_equal(_type) ? in(1) : this;
+  return phase->type(in(1))->higher_equal_speculative(_type) ? in(1) : this;
 }
 
 //------------------------------Value------------------------------------------
 // Take 'join' of input and cast-up type
 const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
   if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
-  const Type* ft = phase->type(in(1))->filter(_type);
+const Type* ft = phase->type(in(1))->filter_speculative(_type);
 
 #ifdef ASSERT
   // Previous versions of this function had some special case logic,
@@ -409,7 +409,7 @@
     {
       const Type* t1 = phase->type(in(1));
       if( t1 == Type::TOP )  assert(ft == Type::TOP, "special case #1");
-      const Type* rt = t1->join(_type);
+      const Type* rt = t1->join_speculative(_type);
       if (rt->empty())       assert(ft == Type::TOP, "special case #2");
       break;
     }
--- a/src/share/vm/opto/connode.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/connode.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -36,7 +36,7 @@
 // Simple constants
 class ConNode : public TypeNode {
 public:
-  ConNode( const Type *t ) : TypeNode(t,1) {
+  ConNode( const Type *t ) : TypeNode(t->remove_speculative(),1) {
     init_req(0, (Node*)Compile::current()->root());
     init_flags(Flag_is_Con);
   }
--- a/src/share/vm/opto/doCall.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/doCall.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -161,19 +161,8 @@
 
     // Try inlining a bytecoded method:
     if (!call_does_dispatch) {
-      InlineTree* ilt;
-      if (UseOldInlining) {
-        ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
-      } else {
-        // Make a disembodied, stateless ILT.
-        // TO DO:  When UseOldInlining is removed, copy the ILT code elsewhere.
-        float site_invoke_ratio = prof_factor;
-        // Note:  ilt is for the root of this parse, not the present call site.
-        ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
-      }
+      InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
       WarmCallInfo scratch_ci;
-      if (!UseOldInlining)
-        scratch_ci.init(jvms, callee, profile, prof_factor);
       bool should_delay = false;
       WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
       assert(ci != &scratch_ci, "do not let this pointer escape");
@@ -261,7 +250,7 @@
           CallGenerator* miss_cg;
           Deoptimization::DeoptReason reason = morphism == 2 ?
                                     Deoptimization::Reason_bimorphic :
-                                    Deoptimization::Reason_class_check;
+                                    (speculative_receiver_type == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check);
           if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
               !too_many_traps(jvms->method(), jvms->bci(), reason)
              ) {
--- a/src/share/vm/opto/domgraph.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/domgraph.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/escape.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/escape.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1579,9 +1579,20 @@
         jobj->set_scalar_replaceable(false);
         return;
       }
+      // 2. An object is not scalar replaceable if the field into which it is
+      // stored has multiple bases one of which is null.
+      if (field->base_count() > 1) {
+        for (BaseIterator i(field); i.has_next(); i.next()) {
+          PointsToNode* base = i.get();
+          if (base == null_obj) {
+            jobj->set_scalar_replaceable(false);
+            return;
+          }
+        }
+      }
     }
     assert(use->is_Field() || use->is_LocalVar(), "sanity");
-    // 2. An object is not scalar replaceable if it is merged with other objects.
+    // 3. An object is not scalar replaceable if it is merged with other objects.
     for (EdgeIterator j(use); j.has_next(); j.next()) {
       PointsToNode* ptn = j.get();
       if (ptn->is_JavaObject() && ptn != jobj) {
@@ -1600,13 +1611,13 @@
     FieldNode* field = j.get()->as_Field();
     int offset = field->as_Field()->offset();
 
-    // 3. An object is not scalar replaceable if it has a field with unknown
+    // 4. An object is not scalar replaceable if it has a field with unknown
     // offset (array's element is accessed in loop).
     if (offset == Type::OffsetBot) {
       jobj->set_scalar_replaceable(false);
       return;
     }
-    // 4. Currently an object is not scalar replaceable if a LoadStore node
+    // 5. Currently an object is not scalar replaceable if a LoadStore node
     // access its field since the field value is unknown after it.
     //
     Node* n = field->ideal_node();
@@ -1617,7 +1628,7 @@
       }
     }
 
-    // 5. Or the address may point to more then one object. This may produce
+    // 6. Or the address may point to more then one object. This may produce
     // the false positive result (set not scalar replaceable)
     // since the flow-insensitive escape analysis can't separate
     // the case when stores overwrite the field's value from the case
--- a/src/share/vm/opto/gcm.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/gcm.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,9 +50,13 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
+#endif
+
 
 // Portions of code courtesy of Clifford Click
 
@@ -1326,15 +1330,6 @@
   // with suitable memory ops nearby.  Use the memory op to do the NULL check.
   // I can generate a memory op if there is not one nearby.
   if (C->is_method_compilation()) {
-    // Don't do it for natives, adapters, or runtime stubs
-    int allowed_reasons = 0;
-    // ...and don't do it when there have been too many traps, globally.
-    for (int reason = (int)Deoptimization::Reason_none+1;
-         reason < Compile::trapHistLength; reason++) {
-      assert(reason < BitsPerInt, "recode bit map");
-      if (!C->too_many_traps((Deoptimization::DeoptReason) reason))
-        allowed_reasons |= nth_bit(reason);
-    }
     // By reversing the loop direction we get a very minor gain on mpegaudio.
     // Feel free to revert to a forward loop for clarity.
     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
@@ -1342,7 +1337,7 @@
       Node* proj = _matcher._null_check_tests[i];
       Node* val  = _matcher._null_check_tests[i + 1];
       Block* block = get_block_for_node(proj);
-      implicit_null_check(block, proj, val, allowed_reasons);
+      implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
       // The implicit_null_check will only perform the transformation
       // if the null branch is truly uncommon, *and* it leads to an
       // uncommon trap.  Combined with the too_many_traps guards
@@ -1666,10 +1661,10 @@
   }
   assert (_members.length() > 0, "no empty loops");
   Block* hd = head();
-  hd->_freq = 1.0f;
+  hd->_freq = 1.0;
   for (int i = 0; i < _members.length(); i++) {
     CFGElement* s = _members.at(i);
-    float freq = s->_freq;
+    double freq = s->_freq;
     if (s->is_block()) {
       Block* b = s->as_Block();
       for (uint j = 0; j < b->_num_succs; j++) {
@@ -1681,7 +1676,7 @@
       assert(lp->_parent == this, "immediate child");
       for (int k = 0; k < lp->_exits.length(); k++) {
         Block* eb = lp->_exits.at(k).get_target();
-        float prob = lp->_exits.at(k).get_prob();
+        double prob = lp->_exits.at(k).get_prob();
         update_succ_freq(eb, freq * prob);
       }
     }
@@ -1693,7 +1688,7 @@
   // inner blocks do not get erroneously scaled.
   if (_depth != 0) {
     // Total the exit probabilities for this loop.
-    float exits_sum = 0.0f;
+    double exits_sum = 0.0f;
     for (int i = 0; i < _exits.length(); i++) {
       exits_sum += _exits.at(i).get_prob();
     }
@@ -1940,7 +1935,7 @@
 //------------------------------update_succ_freq-------------------------------
 // Update the appropriate frequency associated with block 'b', a successor of
 // a block in this loop.
-void CFGLoop::update_succ_freq(Block* b, float freq) {
+void CFGLoop::update_succ_freq(Block* b, double freq) {
   if (b->_loop == this) {
     if (b == head()) {
       // back branch within the loop
@@ -1981,11 +1976,11 @@
 // Scale frequency of loops and blocks by trip counts from outer loops
 // Do a top down traversal of loop tree (visit outer loops first.)
 void CFGLoop::scale_freq() {
-  float loop_freq = _freq * trip_count();
+  double loop_freq = _freq * trip_count();
   _freq = loop_freq;
   for (int i = 0; i < _members.length(); i++) {
     CFGElement* s = _members.at(i);
-    float block_freq = s->_freq * loop_freq;
+    double block_freq = s->_freq * loop_freq;
     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
       block_freq = MIN_BLOCK_FREQUENCY;
     s->_freq = block_freq;
@@ -1998,7 +1993,7 @@
 }
 
 // Frequency of outer loop
-float CFGLoop::outer_loop_freq() const {
+double CFGLoop::outer_loop_freq() const {
   if (_child != NULL) {
     return _child->_freq;
   }
@@ -2047,7 +2042,7 @@
       k = 0;
     }
     Block *blk = _exits.at(i).get_target();
-    float prob = _exits.at(i).get_prob();
+    double prob = _exits.at(i).get_prob();
     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
   }
   tty->print("\n");
--- a/src/share/vm/opto/generateOptoStub.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/generateOptoStub.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -104,13 +104,12 @@
   //
   Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
   Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS);
-  store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias);
+  store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias, MemNode::unordered);
 
   // Set _thread_in_native
   // The order of stores into TLS is critical!  Setting _thread_in_native MUST
   // be last, because a GC is allowed at any time after setting it and the GC
   // will require last_Java_pc and last_Java_sp.
-  Node* adr_state = basic_plus_adr(top(), thread, in_bytes(JavaThread::thread_state_offset()));
 
   //-----------------------------
   // Compute signature for C call.  Varies from the Java signature!
@@ -118,8 +117,16 @@
   uint cnt = TypeFunc::Parms;
   // The C routines gets the base of thread-local storage passed in as an
   // extra argument.  Not all calls need it, but its cheap to add here.
-  for( ; cnt<parm_cnt; cnt++ )
-    fields[cnt] = jdomain->field_at(cnt);
+  for (uint pcnt = cnt; pcnt < parm_cnt; pcnt++, cnt++) {
+    // Convert ints to longs if required.
+    if (CCallingConventionRequiresIntsAsLongs && jdomain->field_at(pcnt)->isa_int()) {
+      fields[cnt++] = TypeLong::LONG;
+      fields[cnt]   = Type::HALF; // must add an additional half for a long
+    } else {
+      fields[cnt] = jdomain->field_at(pcnt);
+    }
+  }
+
   fields[cnt++] = TypeRawPtr::BOTTOM; // Thread-local storage
   // Also pass in the caller's PC, if asked for.
   if( return_pc )
@@ -170,12 +177,20 @@
 
   // Set fixed predefined input arguments
   cnt = 0;
-  for( i=0; i<TypeFunc::Parms; i++ )
-    call->init_req( cnt++, map()->in(i) );
+  for (i = 0; i < TypeFunc::Parms; i++)
+    call->init_req(cnt++, map()->in(i));
   // A little too aggressive on the parm copy; return address is not an input
   call->set_req(TypeFunc::ReturnAdr, top());
-  for( ; i<parm_cnt; i++ )    // Regular input arguments
-    call->init_req( cnt++, map()->in(i) );
+  for (; i < parm_cnt; i++) { // Regular input arguments
+    // Convert ints to longs if required.
+    if (CCallingConventionRequiresIntsAsLongs && jdomain->field_at(i)->isa_int()) {
+      Node* int_as_long = _gvn.transform(new (C) ConvI2LNode(map()->in(i)));
+      call->init_req(cnt++, int_as_long); // long
+      call->init_req(cnt++, top());       // half
+    } else {
+      call->init_req(cnt++, map()->in(i));
+    }
+  }
 
   call->init_req( cnt++, thread );
   if( return_pc )             // Return PC, if asked for
@@ -209,16 +224,15 @@
   //-----------------------------
 
   // Clear last_Java_sp
-  store_to_memory(NULL, adr_sp, null(), T_ADDRESS, NoAlias);
+  store_to_memory(NULL, adr_sp, null(), T_ADDRESS, NoAlias, MemNode::unordered);
   // Clear last_Java_pc and (optionally)_flags
-  store_to_memory(NULL, adr_last_Java_pc, null(), T_ADDRESS, NoAlias);
+  store_to_memory(NULL, adr_last_Java_pc, null(), T_ADDRESS, NoAlias, MemNode::unordered);
 #if defined(SPARC)
-  store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias);
+  store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias, MemNode::unordered);
 #endif /* defined(SPARC) */
-#ifdef IA64
+#if (defined(IA64) && !defined(AIX))
   Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset()));
-  if( os::is_MP() ) insert_mem_bar(Op_MemBarRelease);
-  store_to_memory(NULL, adr_last_Java_fp,    null(),    T_ADDRESS, NoAlias);
+  store_to_memory(NULL, adr_last_Java_fp, null(), T_ADDRESS, NoAlias, MemNode::unordered);
 #endif
 
   // For is-fancy-jump, the C-return value is also the branch target
@@ -226,16 +240,16 @@
   // Runtime call returning oop in TLS?  Fetch it out
   if( pass_tls ) {
     Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset()));
-    Node* vm_result = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, false);
+    Node* vm_result = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
     map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result
     // clear thread-local-storage(tls)
-    store_to_memory(NULL, adr, null(), T_ADDRESS, NoAlias);
+    store_to_memory(NULL, adr, null(), T_ADDRESS, NoAlias, MemNode::unordered);
   }
 
   //-----------------------------
   // check exception
   Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset()));
-  Node* pending = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, false);
+  Node* pending = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered);
 
   Node* exit_memory = reset_memory();
 
--- a/src/share/vm/opto/graphKit.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/graphKit.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -420,7 +420,7 @@
       }
       const Type* srctype = _gvn.type(src);
       if (phi->type() != srctype) {
-        const Type* dsttype = phi->type()->meet(srctype);
+        const Type* dsttype = phi->type()->meet_speculative(srctype);
         if (phi->type() != dsttype) {
           phi->set_type(dsttype);
           _gvn.set_type(phi, dsttype);
@@ -494,7 +494,7 @@
     // first must access the should_post_on_exceptions_flag in this thread's JavaThread
     Node* jthread = _gvn.transform(new (C) ThreadLocalNode());
     Node* adr = basic_plus_adr(top(), jthread, in_bytes(JavaThread::should_post_on_exceptions_flag_offset()));
-    Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, false);
+    Node* should_post_flag = make_load(control(), adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw, MemNode::unordered);
 
     // Test the should_post_on_exceptions_flag vs. 0
     Node* chk = _gvn.transform( new (C) CmpINode(should_post_flag, intcon(0)) );
@@ -596,7 +596,8 @@
 
       Node *adr = basic_plus_adr(ex_node, ex_node, offset);
       const TypeOopPtr* val_type = TypeOopPtr::make_from_klass(env()->String_klass());
-      Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT);
+      // Conservatively release stores of object references.
+      Node *store = store_oop_to_object(control(), ex_node, adr, adr_typ, null(), val_type, T_OBJECT, MemNode::release);
 
       add_exception_state(make_exception_state(ex_node));
       return;
@@ -611,9 +612,10 @@
   // Usual case:  Bail to interpreter.
   // Reserve the right to recompile if we haven't seen anything yet.
 
+  assert(!Deoptimization::reason_is_speculate(reason), "unsupported");
   Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
   if (treat_throw_as_hot
-      && (method()->method_data()->trap_recompiled_at(bci())
+      && (method()->method_data()->trap_recompiled_at(bci(), NULL)
           || C->too_many_traps(reason))) {
     // We cannot afford to take more traps here.  Suffer in the interpreter.
     if (C->log() != NULL)
@@ -863,7 +865,7 @@
     }
   }
 
-  if (env()->jvmti_can_access_local_variables()) {
+  if (env()->should_retain_local_variables()) {
     // At any safepoint, this method can get breakpointed, which would
     // then require an immediate deoptimization.
     can_prune_locals = false;  // do not prune locals
@@ -1223,7 +1225,7 @@
         // See if mixing in the NULL pointer changes type.
         // If so, then the NULL pointer was not allowed in the original
         // type.  In other words, "value" was not-null.
-        if (t->meet(TypePtr::NULL_PTR) != t) {
+        if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) {
           // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
           explicit_null_checks_elided++;
           return value;           // Elided null check quickly!
@@ -1356,7 +1358,7 @@
 // Cast obj to not-null on this path
 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
   const Type *t = _gvn.type(obj);
-  const Type *t_not_null = t->join(TypePtr::NOTNULL);
+  const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
   // Object is already not-null?
   if( t == t_not_null ) return obj;
 
@@ -1483,16 +1485,16 @@
 // factory methods in "int adr_idx"
 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
                           int adr_idx,
-                          bool require_atomic_access) {
+                          MemNode::MemOrd mo, bool require_atomic_access) {
   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
   const TypePtr* adr_type = NULL; // debug-mode-only argument
   debug_only(adr_type = C->get_adr_type(adr_idx));
   Node* mem = memory(adr_idx);
   Node* ld;
   if (require_atomic_access && bt == T_LONG) {
-    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
+    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, mo);
   } else {
-    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
+    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo);
   }
   ld = _gvn.transform(ld);
   if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
@@ -1504,6 +1506,7 @@
 
 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
                                 int adr_idx,
+                                MemNode::MemOrd mo,
                                 bool require_atomic_access) {
   assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
   const TypePtr* adr_type = NULL;
@@ -1511,9 +1514,9 @@
   Node *mem = memory(adr_idx);
   Node* st;
   if (require_atomic_access && bt == T_LONG) {
-    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
+    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
   } else {
-    st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
+    st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
   }
   st = _gvn.transform(st);
   set_memory(st, adr_idx);
@@ -1613,7 +1616,8 @@
                           Node* val,
                           const TypeOopPtr* val_type,
                           BasicType bt,
-                          bool use_precise) {
+                          bool use_precise,
+                          MemNode::MemOrd mo) {
   // Transformation of a value which could be NULL pointer (CastPP #NULL)
   // could be delayed during Parse (for example, in adjust_map_after_if()).
   // Execute transformation here to avoid barrier generation in such case.
@@ -1633,7 +1637,7 @@
               NULL /* pre_val */,
               bt);
 
-  Node* store = store_to_memory(control(), adr, val, bt, adr_idx);
+  Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo);
   post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
   return store;
 }
@@ -1644,7 +1648,8 @@
                              Node* adr,  // actual adress to store val at
                              const TypePtr* adr_type,
                              Node* val,
-                             BasicType bt) {
+                             BasicType bt,
+                             MemNode::MemOrd mo) {
   Compile::AliasType* at = C->alias_type(adr_type);
   const TypeOopPtr* val_type = NULL;
   if (adr_type->isa_instptr()) {
@@ -1663,7 +1668,7 @@
   if (val_type == NULL) {
     val_type = TypeInstPtr::BOTTOM;
   }
-  return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
+  return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
 }
 
 
@@ -1707,7 +1712,7 @@
   const Type* elemtype = arytype->elem();
   BasicType elembt = elemtype->array_element_basic_type();
   Node* adr = array_element_address(ary, idx, elembt, arytype->size());
-  Node* ld = make_load(ctl, adr, elemtype, elembt, arytype);
+  Node* ld = make_load(ctl, adr, elemtype, elembt, arytype, MemNode::unordered);
   return ld;
 }
 
@@ -1942,9 +1947,9 @@
 void GraphKit::increment_counter(Node* counter_addr) {
   int adr_type = Compile::AliasIdxRaw;
   Node* ctrl = control();
-  Node* cnt  = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type);
+  Node* cnt  = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
   Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
-  store_to_memory( ctrl, counter_addr, incr, T_INT, adr_type );
+  store_to_memory(ctrl, counter_addr, incr, T_INT, adr_type, MemNode::unordered);
 }
 
 
@@ -2108,30 +2113,33 @@
  * @return           node with improved type
  */
 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls) {
-  const TypeOopPtr* current_type = _gvn.type(n)->isa_oopptr();
+  const Type* current_type = _gvn.type(n);
   assert(UseTypeSpeculation, "type speculation must be on");
-  if (exact_kls != NULL &&
-      // nothing to improve if type is already exact
-      (current_type == NULL ||
-       (!current_type->klass_is_exact() &&
-        (current_type->speculative() == NULL ||
-         !current_type->speculative()->klass_is_exact())))) {
+
+  const TypeOopPtr* speculative = current_type->speculative();
+
+  if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
     const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls);
     const TypeOopPtr* xtype = tklass->as_instance_type();
     assert(xtype->klass_is_exact(), "Should be exact");
-
+    // record the new speculative type's depth
+    speculative = xtype->with_inline_depth(jvms()->depth());
+  }
+
+  if (speculative != current_type->speculative()) {
     // Build a type with a speculative type (what we think we know
     // about the type but will need a guard when we use it)
-    const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, xtype);
-    // We're changing the type, we need a new cast node to carry the
-    // new type. The new type depends on the control: what profiling
-    // tells us is only valid from here as far as we can tell.
-    Node* cast = new(C) CastPPNode(n, spec_type);
-    cast->init_req(0, control());
+    const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
+    // We're changing the type, we need a new CheckCast node to carry
+    // the new type. The new type depends on the control: what
+    // profiling tells us is only valid from here as far as we can
+    // tell.
+    Node* cast = new(C) CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
     cast = _gvn.transform(cast);
     replace_in_map(n, cast);
     n = cast;
   }
+
   return n;
 }
 
@@ -2141,7 +2149,7 @@
  *
  * @param n  receiver node
  *
- * @return           node with improved type
+ * @return   node with improved type
  */
 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
   if (!UseTypeSpeculation) {
@@ -2525,7 +2533,8 @@
 
   // First load the super-klass's check-offset
   Node *p1 = basic_plus_adr( superklass, superklass, in_bytes(Klass::super_check_offset_offset()) );
-  Node *chk_off = _gvn.transform( new (C) LoadINode( NULL, memory(p1), p1, _gvn.type(p1)->is_ptr() ) );
+  Node *chk_off = _gvn.transform(new (C) LoadINode(NULL, memory(p1), p1, _gvn.type(p1)->is_ptr(),
+                                                   TypeInt::INT, MemNode::unordered));
   int cacheoff_con = in_bytes(Klass::secondary_super_cache_offset());
   bool might_be_cache = (find_int_con(chk_off, cacheoff_con) == cacheoff_con);
 
@@ -2734,12 +2743,14 @@
 // Subsequent type checks will always fold up.
 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
                                              ciKlass* require_klass,
-                                            ciKlass* spec_klass,
+                                             ciKlass* spec_klass,
                                              bool safe_for_replace) {
   if (!UseTypeProfile || !TypeProfileCasts) return NULL;
 
+  Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;
+
   // Make sure we haven't already deoptimized from this tactic.
-  if (too_many_traps(Deoptimization::Reason_class_check))
+  if (too_many_traps(reason))
     return NULL;
 
   // (No, this isn't a call, but it's enough like a virtual call
@@ -2761,7 +2772,7 @@
                                             &exact_obj);
       { PreserveJVMState pjvms(this);
         set_control(slow_ctl);
-        uncommon_trap(Deoptimization::Reason_class_check,
+        uncommon_trap(reason,
                       Deoptimization::Action_maybe_recompile);
       }
       if (safe_for_replace) {
@@ -2788,8 +2799,10 @@
                                         bool not_null) {
   // type == NULL if profiling tells us this object is always null
   if (type != NULL) {
-    if (!too_many_traps(Deoptimization::Reason_null_check) &&
-        !too_many_traps(Deoptimization::Reason_class_check)) {
+    Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
+    Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check;
+    if (!too_many_traps(null_reason) &&
+        !too_many_traps(class_reason)) {
       Node* not_null_obj = NULL;
       // not_null is true if we know the object is not null and
       // there's no need for a null check
@@ -2808,7 +2821,7 @@
       {
         PreserveJVMState pjvms(this);
         set_control(slow_ctl);
-        uncommon_trap(Deoptimization::Reason_class_check,
+        uncommon_trap(class_reason,
                       Deoptimization::Action_maybe_recompile);
       }
       replace_in_map(not_null_obj, exact_obj);
@@ -2877,7 +2890,7 @@
   }
 
   if (known_statically && UseTypeSpeculation) {
-    // If we know the type check always succeed then we don't use the
+    // If we know the type check always succeeds then we don't use the
     // profiling data at this bytecode. Don't lose it, feed it to the
     // type system as a speculative type.
     not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);
@@ -3009,7 +3022,7 @@
       if (failure_control != NULL) // failure is now impossible
         (*failure_control) = top();
       // adjust the type of the phi to the exact klass:
-      phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
+      phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
     }
   }
 
@@ -3238,7 +3251,7 @@
   }
   constant_value = Klass::_lh_neutral_value;  // put in a known value
   Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
-  return make_load(NULL, lhp, TypeInt::INT, T_INT);
+  return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered);
 }
 
 // We just put in an allocate/initialize with a big raw-memory effect.
@@ -3773,7 +3786,7 @@
 
   // Smash zero into card
   if( !UseConcMarkSweepGC ) {
-    __ store(__ ctrl(), card_adr, zero, bt, adr_type);
+    __ store(__ ctrl(), card_adr, zero, bt, adr_type, MemNode::release);
   } else {
     // Specialized path for CM store barrier
     __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
@@ -3870,9 +3883,9 @@
 
         // Now get the buffer location we will log the previous value into and store it
         Node *log_addr = __ AddP(no_base, buffer, next_index);
-        __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
+        __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
         // update the index
-        __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw);
+        __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
 
       } __ else_(); {
 
@@ -3912,8 +3925,9 @@
     Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
     Node* log_addr = __ AddP(no_base, buffer, next_index);
 
-    __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
-    __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw);
+    // Order, see storeCM.
+    __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
+    __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
 
   } __ else_(); {
     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
@@ -4043,7 +4057,7 @@
     int offset_field_idx = C->get_alias_index(offset_field_type);
     return make_load(ctrl,
                      basic_plus_adr(str, str, offset_offset),
-                     TypeInt::INT, T_INT, offset_field_idx);
+                     TypeInt::INT, T_INT, offset_field_idx, MemNode::unordered);
   } else {
     return intcon(0);
   }
@@ -4058,7 +4072,7 @@
     int count_field_idx = C->get_alias_index(count_field_type);
     return make_load(ctrl,
                      basic_plus_adr(str, str, count_offset),
-                     TypeInt::INT, T_INT, count_field_idx);
+                     TypeInt::INT, T_INT, count_field_idx, MemNode::unordered);
   } else {
     return load_array_length(load_String_value(ctrl, str));
   }
@@ -4074,7 +4088,7 @@
                                                    ciTypeArrayKlass::make(T_CHAR), true, 0);
   int value_field_idx = C->get_alias_index(value_field_type);
   Node* load = make_load(ctrl, basic_plus_adr(str, str, value_offset),
-                         value_type, T_OBJECT, value_field_idx);
+                         value_type, T_OBJECT, value_field_idx, MemNode::unordered);
   // String.value field is known to be @Stable.
   if (UseImplicitStableValues) {
     load = cast_array_to_stable(load, value_type);
@@ -4089,7 +4103,7 @@
   const TypePtr* offset_field_type = string_type->add_offset(offset_offset);
   int offset_field_idx = C->get_alias_index(offset_field_type);
   store_to_memory(ctrl, basic_plus_adr(str, offset_offset),
-                  value, T_INT, offset_field_idx);
+                  value, T_INT, offset_field_idx, MemNode::unordered);
 }
 
 void GraphKit::store_String_value(Node* ctrl, Node* str, Node* value) {
@@ -4099,7 +4113,7 @@
   const TypePtr* value_field_type = string_type->add_offset(value_offset);
 
   store_oop_to_object(ctrl, str,  basic_plus_adr(str, value_offset), value_field_type,
-      value, TypeAryPtr::CHARS, T_OBJECT);
+      value, TypeAryPtr::CHARS, T_OBJECT, MemNode::unordered);
 }
 
 void GraphKit::store_String_length(Node* ctrl, Node* str, Node* value) {
@@ -4109,7 +4123,7 @@
   const TypePtr* count_field_type = string_type->add_offset(count_offset);
   int count_field_idx = C->get_alias_index(count_field_type);
   store_to_memory(ctrl, basic_plus_adr(str, count_offset),
-                  value, T_INT, count_field_idx);
+                  value, T_INT, count_field_idx, MemNode::unordered);
 }
 
 Node* GraphKit::cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type) {
--- a/src/share/vm/opto/graphKit.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/graphKit.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -406,7 +406,7 @@
   // Use the type profile to narrow an object type.
   Node* maybe_cast_profiled_receiver(Node* not_null_obj,
                                      ciKlass* require_klass,
-                                    ciKlass* spec,
+                                     ciKlass* spec,
                                      bool safe_for_replace);
 
   // Cast obj to type and emit guard unless we had too many traps here already
@@ -510,36 +510,50 @@
 
   // Create a LoadNode, reading from the parser's memory state.
   // (Note:  require_atomic_access is useful only with T_LONG.)
+  //
+  // We choose the unordered semantics by default because we have
+  // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
+  // of volatile fields.
   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
-                  bool require_atomic_access = false) {
+                  MemNode::MemOrd mo, bool require_atomic_access = false) {
     // This version computes alias_index from bottom_type
     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
-                     require_atomic_access);
+                     mo, require_atomic_access);
   }
-  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) {
+  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
+                  MemNode::MemOrd mo, bool require_atomic_access = false) {
     // This version computes alias_index from an address type
     assert(adr_type != NULL, "use other make_load factory");
     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
-                     require_atomic_access);
+                     mo, require_atomic_access);
   }
   // This is the base version which is given an alias index.
-  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, bool require_atomic_access = false);
+  Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
+                  MemNode::MemOrd mo, bool require_atomic_access = false);
 
   // Create & transform a StoreNode and store the effect into the
   // parser's memory state.
+  //
+  // We must ensure that stores of object references will be visible
+  // only after the object's initialization. So the clients of this
+  // procedure must indicate that the store requires `release'
+  // semantics, if the stored value is an object reference that might
+  // point to a new object and may become externally visible.
   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
                         const TypePtr* adr_type,
+                        MemNode::MemOrd mo,
                         bool require_atomic_access = false) {
     // This version computes alias_index from an address type
     assert(adr_type != NULL, "use other store_to_memory factory");
     return store_to_memory(ctl, adr, val, bt,
                            C->get_alias_index(adr_type),
-                           require_atomic_access);
+                           mo, require_atomic_access);
   }
   // This is the base version which is given alias index
   // Return the new StoreXNode
   Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
                         int adr_idx,
+                        MemNode::MemOrd,
                         bool require_atomic_access = false);
 
 
@@ -557,40 +571,44 @@
 
   Node* store_oop(Node* ctl,
                   Node* obj,   // containing obj
-                  Node* adr,  // actual adress to store val at
+                  Node* adr,   // actual adress to store val at
                   const TypePtr* adr_type,
                   Node* val,
                   const TypeOopPtr* val_type,
                   BasicType bt,
-                  bool use_precise);
+                  bool use_precise,
+                  MemNode::MemOrd mo);
 
   Node* store_oop_to_object(Node* ctl,
                             Node* obj,   // containing obj
-                            Node* adr,  // actual adress to store val at
+                            Node* adr,   // actual adress to store val at
                             const TypePtr* adr_type,
                             Node* val,
                             const TypeOopPtr* val_type,
-                            BasicType bt) {
-    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false);
+                            BasicType bt,
+                            MemNode::MemOrd mo) {
+    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false, mo);
   }
 
   Node* store_oop_to_array(Node* ctl,
                            Node* obj,   // containing obj
-                           Node* adr,  // actual adress to store val at
+                           Node* adr,   // actual adress to store val at
                            const TypePtr* adr_type,
                            Node* val,
                            const TypeOopPtr* val_type,
-                           BasicType bt) {
-    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
+                           BasicType bt,
+                           MemNode::MemOrd mo) {
+    return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
   }
 
   // Could be an array or object we don't know at compile time (unsafe ref.)
   Node* store_oop_to_unknown(Node* ctl,
                              Node* obj,   // containing obj
-                             Node* adr,  // actual adress to store val at
+                             Node* adr,   // actual adress to store val at
                              const TypePtr* adr_type,
                              Node* val,
-                             BasicType bt);
+                             BasicType bt,
+                             MemNode::MemOrd mo);
 
   // For the few case where the barriers need special help
   void pre_barrier(bool do_load, Node* ctl,
--- a/src/share/vm/opto/idealGraphPrinter.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/idealGraphPrinter.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -404,7 +404,7 @@
 
     Node *node = n;
 #ifndef PRODUCT
-    node->_in_dump_cnt++;
+    Compile::current()->_in_dump_cnt++;
     print_prop(NODE_NAME_PROPERTY, (const char *)node->Name());
     const Type *t = node->bottom_type();
     print_prop("type", t->msg());
@@ -623,7 +623,7 @@
       print_prop("lrg", lrg_id);
     }
 
-    node->_in_dump_cnt--;
+    Compile::current()->_in_dump_cnt--;
 #endif
 
     tail(PROPERTIES_ELEMENT);
--- a/src/share/vm/opto/idealKit.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/idealKit.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -359,25 +359,25 @@
   Node* mem = memory(adr_idx);
   Node* ld;
   if (require_atomic_access && bt == T_LONG) {
-    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t);
+    ld = LoadLNode::make_atomic(C, ctl, mem, adr, adr_type, t, MemNode::unordered);
   } else {
-    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt);
+    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, MemNode::unordered);
   }
   return transform(ld);
 }
 
 Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
-                                int adr_idx,
-                                bool require_atomic_access) {
-  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
+                      int adr_idx,
+                      MemNode::MemOrd mo, bool require_atomic_access) {
+  assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
   const TypePtr* adr_type = NULL;
   debug_only(adr_type = C->get_adr_type(adr_idx));
   Node *mem = memory(adr_idx);
   Node* st;
   if (require_atomic_access && bt == T_LONG) {
-    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val);
+    st = StoreLNode::make_atomic(C, ctl, mem, adr, adr_type, val, mo);
   } else {
-    st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt);
+    st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
   }
   st = transform(st);
   set_memory(st, adr_idx);
--- a/src/share/vm/opto/idealKit.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/idealKit.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -226,6 +226,7 @@
               Node* val,
               BasicType bt,
               int adr_idx,
+              MemNode::MemOrd mo,
               bool require_atomic_access = false);
 
   // Store a card mark ordered after store_oop
--- a/src/share/vm/opto/ifg.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/ifg.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -281,20 +281,23 @@
 }
 #endif
 
-// Interfere this register with everything currently live.  Use the RegMasks
-// to trim the set of possible interferences. Return a count of register-only
-// interferences as an estimate of register pressure.
-void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) {
-  uint retval = 0;
-  // Interfere with everything live.
-  const RegMask &rm = lrgs(r).mask();
-  // Check for interference by checking overlap of regmasks.
-  // Only interfere if acceptable register masks overlap.
+/*
+ * Interfere this register with everything currently live.
+ * Check for interference by checking overlap of regmasks.
+ * Only interfere if acceptable register masks overlap.
+ */
+void PhaseChaitin::interfere_with_live(uint lid, IndexSet* liveout) {
+  LRG& lrg = lrgs(lid);
+  const RegMask& rm = lrg.mask();
   IndexSetIterator elements(liveout);
-  uint l;
-  while( (l = elements.next()) != 0 )
-    if( rm.overlap( lrgs(l).mask() ) )
-      _ifg->add_edge( r, l );
+  uint interfering_lid = elements.next();
+  while (interfering_lid != 0) {
+    LRG& interfering_lrg = lrgs(interfering_lid);
+    if (rm.overlap(interfering_lrg.mask())) {
+      _ifg->add_edge(lid, interfering_lid);
+    }
+    interfering_lid = elements.next();
+  }
 }
 
 // Actually build the interference graph.  Uses virtual registers only, no
@@ -333,7 +336,7 @@
         // Copies do not define a new value and so do not interfere.
         // Remove the copies source from the liveout set before interfering.
         uint idx = n->is_Copy();
-        if (idx) {
+        if (idx != 0) {
           liveout->remove(_lrg_map.live_range_id(n->in(idx)));
         }
 
@@ -389,418 +392,459 @@
   } // End of forall blocks
 }
 
-uint PhaseChaitin::count_int_pressure( IndexSet *liveout ) {
+#ifdef ASSERT
+uint PhaseChaitin::count_int_pressure(IndexSet* liveout) {
   IndexSetIterator elements(liveout);
-  uint lidx;
+  uint lidx = elements.next();
   uint cnt = 0;
-  while ((lidx = elements.next()) != 0) {
-    if( lrgs(lidx).mask().is_UP() &&
-        lrgs(lidx).mask_size() &&
-        !lrgs(lidx)._is_float &&
-        !lrgs(lidx)._is_vector &&
-        lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) )
-      cnt += lrgs(lidx).reg_pressure();
+  while (lidx != 0) {
+    LRG& lrg = lrgs(lidx);
+    if (lrg.mask_is_nonempty_and_up() &&
+        !lrg.is_float_or_vector() &&
+        lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI])) {
+      cnt += lrg.reg_pressure();
+    }
+    lidx = elements.next();
   }
   return cnt;
 }
 
-uint PhaseChaitin::count_float_pressure( IndexSet *liveout ) {
+uint PhaseChaitin::count_float_pressure(IndexSet* liveout) {
   IndexSetIterator elements(liveout);
-  uint lidx;
+  uint lidx = elements.next();
   uint cnt = 0;
-  while ((lidx = elements.next()) != 0) {
-    if( lrgs(lidx).mask().is_UP() &&
-        lrgs(lidx).mask_size() &&
-        (lrgs(lidx)._is_float || lrgs(lidx)._is_vector))
-      cnt += lrgs(lidx).reg_pressure();
+  while (lidx != 0) {
+    LRG& lrg = lrgs(lidx);
+    if (lrg.mask_is_nonempty_and_up() && lrg.is_float_or_vector()) {
+      cnt += lrg.reg_pressure();
+    }
+    lidx = elements.next();
   }
   return cnt;
 }
+#endif
 
-// Adjust register pressure down by 1.  Capture last hi-to-low transition,
-static void lower_pressure( LRG *lrg, uint where, Block *b, uint *pressure, uint *hrp_index ) {
-  if (lrg->mask().is_UP() && lrg->mask_size()) {
-    if (lrg->_is_float || lrg->_is_vector) {
-      pressure[1] -= lrg->reg_pressure();
-      if( pressure[1] == (uint)FLOATPRESSURE ) {
-        hrp_index[1] = where;
-        if( pressure[1] > b->_freg_pressure )
-          b->_freg_pressure = pressure[1]+1;
+/*
+ * Adjust register pressure down by 1.  Capture last hi-to-low transition,
+ */
+void PhaseChaitin::lower_pressure(Block* b, uint location, LRG& lrg, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure) {
+  if (lrg.mask_is_nonempty_and_up()) {
+    if (lrg.is_float_or_vector()) {
+      float_pressure.lower(lrg, location);
+    } else {
+      // Do not count the SP and flag registers
+      const RegMask& r = lrg.mask();
+      if (r.overlap(*Matcher::idealreg2regmask[Op_RegI])) {
+        int_pressure.lower(lrg, location);
       }
-    } else if( lrg->mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
-      pressure[0] -= lrg->reg_pressure();
-      if( pressure[0] == (uint)INTPRESSURE   ) {
-        hrp_index[0] = where;
-        if( pressure[0] > b->_reg_pressure )
-          b->_reg_pressure = pressure[0]+1;
+    }
+  }
+  assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
+  assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
+}
+
+/* Go to the first non-phi index in a block */
+static uint first_nonphi_index(Block* b) {
+  uint i;
+  uint end_idx = b->end_idx();
+  for (i = 1; i < end_idx; i++) {
+    Node* n = b->get_node(i);
+    if (!n->is_Phi()) {
+      break;
+    }
+  }
+  return i;
+}
+
+/*
+ * Spills could be inserted before a CreateEx node which should be the first
+ * instruction in a block after Phi nodes. If so, move the CreateEx node up.
+ */
+static void move_exception_node_up(Block* b, uint first_inst, uint last_inst) {
+  for (uint i = first_inst; i < last_inst; i++) {
+    Node* ex = b->get_node(i);
+    if (ex->is_SpillCopy()) {
+      continue;
+    }
+
+    if (i > first_inst &&
+        ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
+      b->remove_node(i);
+      b->insert_node(ex, first_inst);
+    }
+    // Stop once a CreateEx or any other node is found
+    break;
+  }
+}
+
+/*
+ * When new live ranges are live, we raise the register pressure
+ */
+void PhaseChaitin::raise_pressure(Block* b, LRG& lrg, Pressure& int_pressure, Pressure& float_pressure) {
+  if (lrg.mask_is_nonempty_and_up()) {
+    if (lrg.is_float_or_vector()) {
+      float_pressure.raise(lrg);
+    } else {
+      // Do not count the SP and flag registers
+      const RegMask& rm = lrg.mask();
+      if (rm.overlap(*Matcher::idealreg2regmask[Op_RegI])) {
+        int_pressure.raise(lrg);
       }
     }
   }
 }
 
-// Build the interference graph using physical registers when available.
-// That is, if 2 live ranges are simultaneously alive but in their acceptable
-// register sets do not overlap, then they do not interfere.
-uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
-  NOT_PRODUCT( Compile::TracePhase t3("buildIFG", &_t_buildIFGphysical, TimeCompiler); )
+
+/*
+ * Computes the initial register pressure of a block, looking at all live
+ * ranges in the liveout. The register pressure is computed for both float
+ * and int/pointer registers.
+ * Live ranges in the liveout are presumed live for the whole block.
+ * We add the cost for the whole block to the area of the live ranges initially.
+ * If a live range gets killed in the block, we'll subtract the unused part of
+ * the block from the area.
+ */
+void PhaseChaitin::compute_initial_block_pressure(Block* b, IndexSet* liveout, Pressure& int_pressure, Pressure& float_pressure, double cost) {
+  IndexSetIterator elements(liveout);
+  uint lid = elements.next();
+  while (lid != 0) {
+    LRG& lrg = lrgs(lid);
+    lrg._area += cost;
+    raise_pressure(b, lrg, int_pressure, float_pressure);
+    lid = elements.next();
+  }
+  assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
+  assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
+}
 
-  uint must_spill = 0;
+/*
+ * Remove dead node if it's not used.
+ * We only remove projection nodes if the node "defining" the projection is
+ * dead, for example on x86, if we have a dead Add node we remove its
+ * RFLAGS node.
+ */
+bool PhaseChaitin::remove_node_if_not_used(Block* b, uint location, Node* n, uint lid, IndexSet* liveout) {
+  Node* def = n->in(0);
+  if (!n->is_Proj() ||
+      (_lrg_map.live_range_id(def) && !liveout->member(_lrg_map.live_range_id(def)))) {
+    b->remove_node(location);
+    LRG& lrg = lrgs(lid);
+    if (lrg._def == n) {
+      lrg._def = 0;
+    }
+    n->disconnect_inputs(NULL, C);
+    _cfg.unmap_node_from_block(n);
+    n->replace_by(C->top());
+    return true;
+  }
+  return false;
+}
+
+/*
+ * When encountering a fat projection, we might go from a low to high to low
+ * (since the fat proj only lives at this instruction) going backwards in the
+ * block. If we find a low to high transition, we record it.
+ */
+void PhaseChaitin::check_for_high_pressure_transition_at_fatproj(uint& block_reg_pressure, uint location, LRG& lrg, Pressure& pressure, const int op_regtype) {
+  RegMask mask_tmp = lrg.mask();
+  mask_tmp.AND(*Matcher::idealreg2regmask[op_regtype]);
+  pressure.check_pressure_at_fatproj(location, mask_tmp);
+}
+
+/*
+ * Insure high score for immediate-use spill copies so they get a color.
+ * All single-use MachSpillCopy(s) that immediately precede their
+ * use must color early.  If a longer live range steals their
+ * color, the spill copy will split and may push another spill copy
+ * further away resulting in an infinite spill-split-retry cycle.
+ * Assigning a zero area results in a high score() and a good
+ * location in the simplify list.
+ */
+void PhaseChaitin::assign_high_score_to_immediate_copies(Block* b, Node* n, LRG& lrg, uint next_inst, uint last_inst) {
+  if (n->is_SpillCopy() &&
+      lrg.is_singledef() && // A multi defined live range can still split
+      n->outcnt() == 1 &&   // and use must be in this block
+      _cfg.get_block_for_node(n->unique_out()) == b) {
 
-  // For all blocks (in any order) do...
-  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
-    Block* block = _cfg.get_block(i);
-    // Clone (rather than smash in place) the liveout info, so it is alive
-    // for the "collect_gc_info" phase later.
-    IndexSet liveout(_live->live(block));
-    uint last_inst = block->end_idx();
-    // Compute first nonphi node index
-    uint first_inst;
-    for (first_inst = 1; first_inst < last_inst; first_inst++) {
-      if (!block->get_node(first_inst)->is_Phi()) {
-        break;
+    Node* single_use = n->unique_out();
+    assert(b->find_node(single_use) >= next_inst, "Use must be later in block");
+    // Use can be earlier in block if it is a Phi, but then I should be a MultiDef
+
+    // Find first non SpillCopy 'm' that follows the current instruction
+    // (current_inst - 1) is index for current instruction 'n'
+    Node* m = n;
+    for (uint i = next_inst; i <= last_inst && m->is_SpillCopy(); ++i) {
+      m = b->get_node(i);
+    }
+    if (m == single_use) {
+      lrg._area = 0.0;
+    }
+  }
+}
+
+/*
+ * Copies do not define a new value and so do not interfere.
+ * Remove the copies source from the liveout set before interfering.
+ */
+void PhaseChaitin::remove_interference_from_copy(Block* b, uint location, uint lid_copy, IndexSet* liveout, double cost, Pressure& int_pressure, Pressure& float_pressure) {
+  if (liveout->remove(lid_copy)) {
+    LRG& lrg_copy = lrgs(lid_copy);
+    lrg_copy._area -= cost;
+
+    // Lower register pressure since copy and definition can share the same register
+    lower_pressure(b, location, lrg_copy, liveout, int_pressure, float_pressure);
+  }
+}
+
+/*
+ * The defined value must go in a particular register. Remove that register from
+ * all conflicting parties and avoid the interference.
+ */
+void PhaseChaitin::remove_bound_register_from_interfering_live_ranges(LRG& lrg, IndexSet* liveout, uint& must_spill) {
+  // Check for common case
+  const RegMask& rm = lrg.mask();
+  int r_size = lrg.num_regs();
+  // Smear odd bits
+  IndexSetIterator elements(liveout);
+  uint l = elements.next();
+  while (l != 0) {
+    LRG& interfering_lrg = lrgs(l);
+    // If 'l' must spill already, do not further hack his bits.
+    // He'll get some interferences and be forced to spill later.
+    if (interfering_lrg._must_spill) {
+      l = elements.next();
+      continue;
+    }
+
+    // Remove bound register(s) from 'l's choices
+    RegMask old = interfering_lrg.mask();
+    uint old_size = interfering_lrg.mask_size();
+
+    // Remove the bits from LRG 'rm' from LRG 'l' so 'l' no
+    // longer interferes with 'rm'.  If 'l' requires aligned
+    // adjacent pairs, subtract out bit pairs.
+    assert(!interfering_lrg._is_vector || !interfering_lrg._fat_proj, "sanity");
+
+    if (interfering_lrg.num_regs() > 1 && !interfering_lrg._fat_proj) {
+      RegMask r2mask = rm;
+      // Leave only aligned set of bits.
+      r2mask.smear_to_sets(interfering_lrg.num_regs());
+      // It includes vector case.
+      interfering_lrg.SUBTRACT(r2mask);
+      interfering_lrg.compute_set_mask_size();
+    } else if (r_size != 1) {
+      // fat proj
+      interfering_lrg.SUBTRACT(rm);
+      interfering_lrg.compute_set_mask_size();
+    } else {
+      // Common case: size 1 bound removal
+      OptoReg::Name r_reg = rm.find_first_elem();
+      if (interfering_lrg.mask().Member(r_reg)) {
+        interfering_lrg.Remove(r_reg);
+        interfering_lrg.set_mask_size(interfering_lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1);
       }
     }
 
-    // Spills could be inserted before CreateEx node which should be
-    // first instruction in block after Phis. Move CreateEx up.
-    for (uint insidx = first_inst; insidx < last_inst; insidx++) {
-      Node *ex = block->get_node(insidx);
-      if (ex->is_SpillCopy()) {
-        continue;
-      }
-      if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
-        // If the CreateEx isn't above all the MachSpillCopies
-        // then move it to the top.
-        block->remove_node(insidx);
-        block->insert_node(ex, first_inst);
-      }
-      // Stop once a CreateEx or any other node is found
-      break;
+    // If 'l' goes completely dry, it must spill.
+    if (interfering_lrg.not_free()) {
+      // Give 'l' some kind of reasonable mask, so it picks up
+      // interferences (and will spill later).
+      interfering_lrg.set_mask(old);
+      interfering_lrg.set_mask_size(old_size);
+      must_spill++;
+      interfering_lrg._must_spill = 1;
+      interfering_lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
+    }
+    l = elements.next();
+  }
+}
+
+/*
+ * Start loop at 1 (skip control edge) for most Nodes. SCMemProj's might be the
+ * sole use of a StoreLConditional. While StoreLConditionals set memory (the
+ * SCMemProj use) they also def flags; if that flag def is unused the allocator
+ * sees a flag-setting instruction with no use of the flags and assumes it's
+ * dead.  This keeps the (useless) flag-setting behavior alive while also
+ * keeping the (useful) memory update effect.
+ */
+void PhaseChaitin::add_input_to_liveout(Block* b, Node* n, IndexSet* liveout, double cost, Pressure& int_pressure, Pressure& float_pressure) {
+  JVMState* jvms = n->jvms();
+  uint debug_start = jvms ? jvms->debug_start() : 999999;
+
+  for (uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++) {
+    Node* def = n->in(k);
+    uint lid = _lrg_map.live_range_id(def);
+    if (!lid) {
+      continue;
+    }
+    LRG& lrg = lrgs(lid);
+
+    // No use-side cost for spilling debug info
+    if (k < debug_start) {
+      // A USE costs twice block frequency (once for the Load, once
+      // for a Load-delay).  Rematerialized uses only cost once.
+      lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq * 2));
     }
 
-    // Reset block's register pressure values for each ifg construction
-    uint pressure[2], hrp_index[2];
-    pressure[0] = pressure[1] = 0;
-    hrp_index[0] = hrp_index[1] = last_inst+1;
-    block->_reg_pressure = block->_freg_pressure = 0;
-    // Liveout things are presumed live for the whole block.  We accumulate
-    // 'area' accordingly.  If they get killed in the block, we'll subtract
-    // the unused part of the block from the area.
+    if (liveout->insert(lid)) {
+      // Newly live things assumed live from here to top of block
+      lrg._area += cost;
+      raise_pressure(b, lrg, int_pressure, float_pressure);
+      assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
+      assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
+    }
+    assert(lrg._area >= 0.0, "negative spill area" );
+  }
+}
+
+/*
+ * If we run off the top of the block with high pressure just record that the
+ * whole block is high pressure. (Even though we might have a transition
+ * later down in the block)
+ */
+void PhaseChaitin::check_for_high_pressure_block(Pressure& pressure) {
+  // current pressure now means the pressure before the first instruction in the block
+  // (since we have stepped through all instructions backwards)
+  if (pressure.current_pressure() > pressure.high_pressure_limit()) {
+    pressure.set_high_pressure_index_to_block_start();
+  }
+}
+
+/*
+ * Compute high pressure indice; avoid landing in the middle of projnodes
+ * and set the high pressure index for the block
+ */
+void PhaseChaitin::adjust_high_pressure_index(Block* b, uint& block_hrp_index, Pressure& pressure) {
+  uint i = pressure.high_pressure_index();
+  if (i < b->number_of_nodes() && i < b->end_idx() + 1) {
+    Node* cur = b->get_node(i);
+    while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
+      cur = b->get_node(--i);
+    }
+  }
+  block_hrp_index = i;
+}
+
+/* Build an interference graph:
+ *   That is, if 2 live ranges are simultaneously alive but in their acceptable
+ *   register sets do not overlap, then they do not interfere. The IFG is built
+ *   by a single reverse pass over each basic block. Starting with the known
+ *   live-out set, we remove things that get defined and add things that become
+ *   live (essentially executing one pass of a standard LIVE analysis). Just
+ *   before a Node defines a value (and removes it from the live-ness set) that
+ *   value is certainly live. The defined value interferes with everything
+ *   currently live. The value is then removed from the live-ness set and it's
+ *   inputs are added to the live-ness set.
+ * Compute register pressure for each block:
+ *   We store the biggest register pressure for each block and also the first
+ *   low to high register pressure transition within the block (if any).
+ */
+uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
+  NOT_PRODUCT(Compile::TracePhase t3("buildIFG", &_t_buildIFGphysical, TimeCompiler);)
+
+  uint must_spill = 0;
+  for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
+    Block* block = _cfg.get_block(i);
+
+    // Clone (rather than smash in place) the liveout info, so it is alive
+    // for the "collect_gc_info" phase later.
+    IndexSet liveout(_live->live(block));
+
+    uint first_inst = first_nonphi_index(block);
+    uint last_inst = block->end_idx();
+
+    move_exception_node_up(block, first_inst, last_inst);
+
+    Pressure int_pressure(last_inst + 1, INTPRESSURE);
+    Pressure float_pressure(last_inst + 1, FLOATPRESSURE);
+    block->_reg_pressure = 0;
+    block->_freg_pressure = 0;
+
     int inst_count = last_inst - first_inst;
     double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
-    assert(!(cost < 0.0), "negative spill cost" );
-    IndexSetIterator elements(&liveout);
-    uint lidx;
-    while ((lidx = elements.next()) != 0) {
-      LRG &lrg = lrgs(lidx);
-      lrg._area += cost;
-      // Compute initial register pressure
-      if (lrg.mask().is_UP() && lrg.mask_size()) {
-        if (lrg._is_float || lrg._is_vector) {   // Count float pressure
-          pressure[1] += lrg.reg_pressure();
-          if (pressure[1] > block->_freg_pressure) {
-            block->_freg_pressure = pressure[1];
-          }
-          // Count int pressure, but do not count the SP, flags
-        } else if(lrgs(lidx).mask().overlap(*Matcher::idealreg2regmask[Op_RegI])) {
-          pressure[0] += lrg.reg_pressure();
-          if (pressure[0] > block->_reg_pressure) {
-            block->_reg_pressure = pressure[0];
-          }
-        }
-      }
-    }
-    assert( pressure[0] == count_int_pressure  (&liveout), "" );
-    assert( pressure[1] == count_float_pressure(&liveout), "" );
+    assert(cost >= 0.0, "negative spill cost" );
+
+    compute_initial_block_pressure(block, &liveout, int_pressure, float_pressure, cost);
 
-    // The IFG is built by a single reverse pass over each basic block.
-    // Starting with the known live-out set, we remove things that get
-    // defined and add things that become live (essentially executing one
-    // pass of a standard LIVE analysis).  Just before a Node defines a value
-    // (and removes it from the live-ness set) that value is certainly live.
-    // The defined value interferes with everything currently live.  The
-    // value is then removed from the live-ness set and it's inputs are added
-    // to the live-ness set.
-    uint j;
-    for (j = last_inst + 1; j > 1; j--) {
-      Node* n = block->get_node(j - 1);
+    for (uint location = last_inst; location > 0; location--) {
+      Node* n = block->get_node(location);
+      uint lid = _lrg_map.live_range_id(n);
 
-      // Get value being defined
-      uint r = _lrg_map.live_range_id(n);
+      if(lid) {
+        LRG& lrg = lrgs(lid);
 
-      // Some special values do not allocate
-      if(r) {
         // A DEF normally costs block frequency; rematerialized values are
         // removed from the DEF sight, so LOWER costs here.
-        lrgs(r)._cost += n->rematerialize() ? 0 : block->_freq;
+        lrg._cost += n->rematerialize() ? 0 : block->_freq;
 
-        // If it is not live, then this instruction is dead.  Probably caused
-        // by spilling and rematerialization.  Who cares why, yank this baby.
-        if( !liveout.member(r) && n->Opcode() != Op_SafePoint ) {
-          Node *def = n->in(0);
-          if( !n->is_Proj() ||
-              // Could also be a flags-projection of a dead ADD or such.
-              (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
-            block->remove_node(j - 1);
-            if (lrgs(r)._def == n) {
-              lrgs(r)._def = 0;
-            }
-            n->disconnect_inputs(NULL, C);
-            _cfg.unmap_node_from_block(n);
-            n->replace_by(C->top());
-            // Since yanking a Node from block, high pressure moves up one
-            hrp_index[0]--;
-            hrp_index[1]--;
+        if (!liveout.member(lid) && n->Opcode() != Op_SafePoint) {
+          if (remove_node_if_not_used(block, location, n, lid, &liveout)) {
+            float_pressure.lower_high_pressure_index();
+            int_pressure.lower_high_pressure_index();
             continue;
           }
-
-          // Fat-projections kill many registers which cannot be used to
-          // hold live ranges.
-          if (lrgs(r)._fat_proj) {
-            // Count the int-only registers
-            RegMask itmp = lrgs(r).mask();
-            itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
-            int iregs = itmp.Size();
-            if (pressure[0]+iregs > block->_reg_pressure) {
-              block->_reg_pressure = pressure[0] + iregs;
-            }
-            if (pressure[0] <= (uint)INTPRESSURE && pressure[0] + iregs > (uint)INTPRESSURE) {
-              hrp_index[0] = j - 1;
-            }
-            // Count the float-only registers
-            RegMask ftmp = lrgs(r).mask();
-            ftmp.AND(*Matcher::idealreg2regmask[Op_RegD]);
-            int fregs = ftmp.Size();
-            if (pressure[1] + fregs > block->_freg_pressure) {
-              block->_freg_pressure = pressure[1] + fregs;
-            }
-            if(pressure[1] <= (uint)FLOATPRESSURE && pressure[1]+fregs > (uint)FLOATPRESSURE) {
-              hrp_index[1] = j - 1;
-            }
+          if (lrg._fat_proj) {
+            check_for_high_pressure_transition_at_fatproj(block->_reg_pressure, location, lrg, int_pressure, Op_RegI);
+            check_for_high_pressure_transition_at_fatproj(block->_freg_pressure, location, lrg, float_pressure, Op_RegD);
           }
-
-        } else {                // Else it is live
-          // A DEF also ends 'area' partway through the block.
-          lrgs(r)._area -= cost;
-          assert(!(lrgs(r)._area < 0.0), "negative spill area" );
+        } else {
+          // A live range ends at its definition, remove the remaining area.
+          // If the cost is +Inf (which might happen in extreme cases), the lrg area will also be +Inf,
+          // and +Inf - +Inf = NaN. So let's not do that subtraction.
+          if (g_isfinite(cost)) {
+            lrg._area -= cost;
+          }
+          assert(lrg._area >= 0.0, "negative spill area" );
 
-          // Insure high score for immediate-use spill copies so they get a color
-          if( n->is_SpillCopy()
-              && lrgs(r).is_singledef()        // MultiDef live range can still split
-              && n->outcnt() == 1              // and use must be in this block
-              && _cfg.get_block_for_node(n->unique_out()) == block) {
-            // All single-use MachSpillCopy(s) that immediately precede their
-            // use must color early.  If a longer live range steals their
-            // color, the spill copy will split and may push another spill copy
-            // further away resulting in an infinite spill-split-retry cycle.
-            // Assigning a zero area results in a high score() and a good
-            // location in the simplify list.
-            //
-
-            Node *single_use = n->unique_out();
-            assert(block->find_node(single_use) >= j, "Use must be later in block");
-            // Use can be earlier in block if it is a Phi, but then I should be a MultiDef
-
-            // Find first non SpillCopy 'm' that follows the current instruction
-            // (j - 1) is index for current instruction 'n'
-            Node *m = n;
-            for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
-              m = block->get_node(i);
-            }
-            if (m == single_use) {
-              lrgs(r)._area = 0.0;
-            }
-          }
-
-          // Remove from live-out set
-          if( liveout.remove(r) ) {
-            // Adjust register pressure.
-            // Capture last hi-to-lo pressure transition
-            lower_pressure(&lrgs(r), j - 1, block, pressure, hrp_index);
-            assert( pressure[0] == count_int_pressure  (&liveout), "" );
-            assert( pressure[1] == count_float_pressure(&liveout), "" );
-          }
+          assign_high_score_to_immediate_copies(block, n, lrg, location + 1, last_inst);
 
-          // Copies do not define a new value and so do not interfere.
-          // Remove the copies source from the liveout set before interfering.
-          uint idx = n->is_Copy();
-          if (idx) {
-            uint x = _lrg_map.live_range_id(n->in(idx));
-            if (liveout.remove(x)) {
-              lrgs(x)._area -= cost;
-              // Adjust register pressure.
-              lower_pressure(&lrgs(x), j - 1, block, pressure, hrp_index);
-              assert( pressure[0] == count_int_pressure  (&liveout), "" );
-              assert( pressure[1] == count_float_pressure(&liveout), "" );
-            }
+          if (liveout.remove(lid)) {
+            lower_pressure(block, location, lrg, &liveout, int_pressure, float_pressure);
           }
-        } // End of if live or not
-
-        // Interfere with everything live.  If the defined value must
-        // go in a particular register, just remove that register from
-        // all conflicting parties and avoid the interference.
+          uint copy_idx = n->is_Copy();
+          if (copy_idx) {
+            uint lid_copy = _lrg_map.live_range_id(n->in(copy_idx));
+            remove_interference_from_copy(block, location, lid_copy, &liveout, cost, int_pressure, float_pressure);
+          }
+        }
 
-        // Make exclusions for rematerializable defs.  Since rematerializable
-        // DEFs are not bound but the live range is, some uses must be bound.
-        // If we spill live range 'r', it can rematerialize at each use site
-        // according to its bindings.
-        const RegMask &rmask = lrgs(r).mask();
-        if( lrgs(r).is_bound() && !(n->rematerialize()) && rmask.is_NotEmpty() ) {
-          // Check for common case
-          int r_size = lrgs(r).num_regs();
-          OptoReg::Name r_reg = (r_size == 1) ? rmask.find_first_elem() : OptoReg::Physical;
-          // Smear odd bits
-          IndexSetIterator elements(&liveout);
-          uint l;
-          while ((l = elements.next()) != 0) {
-            LRG &lrg = lrgs(l);
-            // If 'l' must spill already, do not further hack his bits.
-            // He'll get some interferences and be forced to spill later.
-            if( lrg._must_spill ) continue;
-            // Remove bound register(s) from 'l's choices
-            RegMask old = lrg.mask();
-            uint old_size = lrg.mask_size();
-            // Remove the bits from LRG 'r' from LRG 'l' so 'l' no
-            // longer interferes with 'r'.  If 'l' requires aligned
-            // adjacent pairs, subtract out bit pairs.
-            assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
-            if (lrg.num_regs() > 1 && !lrg._fat_proj) {
-              RegMask r2mask = rmask;
-              // Leave only aligned set of bits.
-              r2mask.smear_to_sets(lrg.num_regs());
-              // It includes vector case.
-              lrg.SUBTRACT( r2mask );
-              lrg.compute_set_mask_size();
-            } else if( r_size != 1 ) { // fat proj
-              lrg.SUBTRACT( rmask );
-              lrg.compute_set_mask_size();
-            } else {            // Common case: size 1 bound removal
-              if( lrg.mask().Member(r_reg) ) {
-                lrg.Remove(r_reg);
-                lrg.set_mask_size(lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1);
-              }
-            }
-            // If 'l' goes completely dry, it must spill.
-            if( lrg.not_free() ) {
-              // Give 'l' some kind of reasonable mask, so he picks up
-              // interferences (and will spill later).
-              lrg.set_mask( old );
-              lrg.set_mask_size(old_size);
-              must_spill++;
-              lrg._must_spill = 1;
-              lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
-            }
-          }
-        } // End of if bound
-
-        // Now interference with everything that is live and has
-        // compatible register sets.
-        interfere_with_live(r,&liveout);
-
-      } // End of if normal register-allocated value
+        // Since rematerializable DEFs are not bound but the live range is,
+        // some uses must be bound. If we spill live range 'r', it can
+        // rematerialize at each use site according to its bindings.
+        if (lrg.is_bound() && !n->rematerialize() && lrg.mask().is_NotEmpty()) {
+          remove_bound_register_from_interfering_live_ranges(lrg, &liveout, must_spill);
+        }
+        interfere_with_live(lid, &liveout);
+      }
 
       // Area remaining in the block
       inst_count--;
       cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
 
-      // Make all inputs live
-      if( !n->is_Phi() ) {      // Phi function uses come from prior block
-        JVMState* jvms = n->jvms();
-        uint debug_start = jvms ? jvms->debug_start() : 999999;
-        // Start loop at 1 (skip control edge) for most Nodes.
-        // SCMemProj's might be the sole use of a StoreLConditional.
-        // While StoreLConditionals set memory (the SCMemProj use)
-        // they also def flags; if that flag def is unused the
-        // allocator sees a flag-setting instruction with no use of
-        // the flags and assumes it's dead.  This keeps the (useless)
-        // flag-setting behavior alive while also keeping the (useful)
-        // memory update effect.
-        for (uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++) {
-          Node *def = n->in(k);
-          uint x = _lrg_map.live_range_id(def);
-          if (!x) {
-            continue;
-          }
-          LRG &lrg = lrgs(x);
-          // No use-side cost for spilling debug info
-          if (k < debug_start) {
-            // A USE costs twice block frequency (once for the Load, once
-            // for a Load-delay).  Rematerialized uses only cost once.
-            lrg._cost += (def->rematerialize() ? block->_freq : (block->_freq + block->_freq));
-          }
-          // It is live now
-          if (liveout.insert(x)) {
-            // Newly live things assumed live from here to top of block
-            lrg._area += cost;
-            // Adjust register pressure
-            if (lrg.mask().is_UP() && lrg.mask_size()) {
-              if (lrg._is_float || lrg._is_vector) {
-                pressure[1] += lrg.reg_pressure();
-                if (pressure[1] > block->_freg_pressure)  {
-                  block->_freg_pressure = pressure[1];
-                }
-              } else if( lrg.mask().overlap(*Matcher::idealreg2regmask[Op_RegI]) ) {
-                pressure[0] += lrg.reg_pressure();
-                if (pressure[0] > block->_reg_pressure) {
-                  block->_reg_pressure = pressure[0];
-                }
-              }
-            }
-            assert( pressure[0] == count_int_pressure  (&liveout), "" );
-            assert( pressure[1] == count_float_pressure(&liveout), "" );
-          }
-          assert(!(lrg._area < 0.0), "negative spill area" );
-        }
-      }
-    } // End of reverse pass over all instructions in block
-
-    // If we run off the top of the block with high pressure and
-    // never see a hi-to-low pressure transition, just record that
-    // the whole block is high pressure.
-    if (pressure[0] > (uint)INTPRESSURE) {
-      hrp_index[0] = 0;
-      if (pressure[0] > block->_reg_pressure) {
-        block->_reg_pressure = pressure[0];
-      }
-    }
-    if (pressure[1] > (uint)FLOATPRESSURE) {
-      hrp_index[1] = 0;
-      if (pressure[1] > block->_freg_pressure) {
-        block->_freg_pressure = pressure[1];
+      if (!n->is_Phi()) {
+        add_input_to_liveout(block, n, &liveout, cost, int_pressure, float_pressure);
       }
     }
 
-    // Compute high pressure indice; avoid landing in the middle of projnodes
-    j = hrp_index[0];
-    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
-      Node* cur = block->get_node(j);
-      while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
-        j--;
-        cur = block->get_node(j);
-      }
-    }
-    block->_ihrp_index = j;
-    j = hrp_index[1];
-    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
-      Node* cur = block->get_node(j);
-      while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
-        j--;
-        cur = block->get_node(j);
-      }
-    }
-    block->_fhrp_index = j;
+    check_for_high_pressure_block(int_pressure);
+    check_for_high_pressure_block(float_pressure);
+    adjust_high_pressure_index(block, block->_ihrp_index, int_pressure);
+    adjust_high_pressure_index(block, block->_fhrp_index, float_pressure);
+    // set the final_pressure as the register pressure for the block
+    block->_reg_pressure = int_pressure.final_pressure();
+    block->_freg_pressure = float_pressure.final_pressure();
 
 #ifndef PRODUCT
     // Gather Register Pressure Statistics
-    if( PrintOptoStatistics ) {
-      if (block->_reg_pressure > (uint)INTPRESSURE || block->_freg_pressure > (uint)FLOATPRESSURE) {
+    if (PrintOptoStatistics) {
+      if (block->_reg_pressure > int_pressure.high_pressure_limit() || block->_freg_pressure > float_pressure.high_pressure_limit()) {
         _high_pressure++;
       } else {
         _low_pressure++;
       }
     }
 #endif
-  } // End of for all blocks
+  }
 
   return must_spill;
 }
--- a/src/share/vm/opto/ifnode.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/ifnode.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,7 +76,6 @@
   if( !i1->is_Bool() ) return NULL;
   BoolNode *b = i1->as_Bool();
   Node *cmp = b->in(1);
-  if( cmp->is_FlagsProj() ) return NULL;
   if( !cmp->is_Cmp() ) return NULL;
   i1 = cmp->in(1);
   if( i1 == NULL || !i1->is_Phi() ) return NULL;
--- a/src/share/vm/opto/lcm.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/lcm.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,12 +45,52 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 // Optimization - Graph Style
 
+// Check whether val is not-null-decoded compressed oop,
+// i.e. will grab into the base of the heap if it represents NULL.
+static bool accesses_heap_base_zone(Node *val) {
+  if (Universe::narrow_oop_base() > 0) { // Implies UseCompressedOops.
+    if (val && val->is_Mach()) {
+      if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) {
+        // This assumes all Decodes with TypePtr::NotNull are matched to nodes that
+        // decode NULL to point to the heap base (Decode_NN).
+        if (val->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull) {
+          return true;
+        }
+      }
+      // Must recognize load operation with Decode matched in memory operand.
+      // We should not reach here exept for PPC/AIX, as os::zero_page_read_protected()
+      // returns true everywhere else. On PPC, no such memory operands
+      // exist, therefore we did not yet implement a check for such operands.
+      NOT_AIX(Unimplemented());
+    }
+  }
+  return false;
+}
+
+static bool needs_explicit_null_check_for_read(Node *val) {
+  // On some OSes (AIX) the page at address 0 is only write protected.
+  // If so, only Store operations will trap.
+  if (os::zero_page_read_protected()) {
+    return false;  // Implicit null check will work.
+  }
+  // Also a read accessing the base of a heap-based compressed heap will trap.
+  if (accesses_heap_base_zone(val) &&                    // Hits the base zone page.
+      Universe::narrow_oop_use_implicit_null_checks()) { // Base zone page is protected.
+    return false;
+  }
+
+  return true;
+}
+
 //------------------------------implicit_null_check----------------------------
 // Detect implicit-null-check opportunities.  Basically, find NULL checks
 // with suitable memory ops nearby.  Use the memory op to do the NULL check.
@@ -206,6 +246,14 @@
       }
       break;
     }
+
+    // On some OSes (AIX) the page at address 0 is only write protected.
+    // If so, only Store operations will trap.
+    // But a read accessing the base of a heap-based compressed heap will trap.
+    if (!was_store && needs_explicit_null_check_for_read(val)) {
+      continue;
+    }
+
     // check if the offset is not too high for implicit exception
     {
       intptr_t offset = 0;
@@ -472,13 +520,6 @@
           break;
         }
 
-        // For nodes that produce a FlagsProj, make the node adjacent to the
-        // use of the FlagsProj
-        if (use->is_FlagsProj() && get_block_for_node(use) == block) {
-          found_machif = true;
-          break;
-        }
-
         // More than this instruction pending for successor to be ready,
         // don't choose this if other opportunities are ready
         if (ready_cnt.at(use->_idx) > 1)
--- a/src/share/vm/opto/library_call.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/library_call.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -203,7 +203,9 @@
   bool inline_math_native(vmIntrinsics::ID id);
   bool inline_trig(vmIntrinsics::ID id);
   bool inline_math(vmIntrinsics::ID id);
-  void inline_math_mathExact(Node* math);
+  template <typename OverflowOp>
+  bool inline_math_overflow(Node* arg1, Node* arg2);
+  void inline_math_mathExact(Node* math, Node* test);
   bool inline_math_addExactI(bool is_increment);
   bool inline_math_addExactL(bool is_increment);
   bool inline_math_multiplyExactI();
@@ -304,6 +306,7 @@
   bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
   Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
   Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
+  Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
   bool inline_encodeISOArray();
   bool inline_updateCRC32();
   bool inline_updateBytesCRC32();
@@ -516,31 +519,31 @@
 
   case vmIntrinsics::_incrementExactI:
   case vmIntrinsics::_addExactI:
-    if (!Matcher::match_rule_supported(Op_AddExactI) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowAddI) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_incrementExactL:
   case vmIntrinsics::_addExactL:
-    if (!Matcher::match_rule_supported(Op_AddExactL) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowAddL) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_decrementExactI:
   case vmIntrinsics::_subtractExactI:
-    if (!Matcher::match_rule_supported(Op_SubExactI) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_decrementExactL:
   case vmIntrinsics::_subtractExactL:
-    if (!Matcher::match_rule_supported(Op_SubExactL) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_negateExactI:
-    if (!Matcher::match_rule_supported(Op_NegExactI) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_negateExactL:
-    if (!Matcher::match_rule_supported(Op_NegExactL) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_multiplyExactI:
-    if (!Matcher::match_rule_supported(Op_MulExactI) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowMulI) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_multiplyExactL:
-    if (!Matcher::match_rule_supported(Op_MulExactL) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowMulL) || !UseMathExactIntrinsics) return NULL;
     break;
 
  default:
@@ -1057,7 +1060,7 @@
   const Type* thread_type  = TypeOopPtr::make_from_klass(thread_klass)->cast_to_ptr_type(TypePtr::NotNull);
   Node* thread = _gvn.transform(new (C) ThreadLocalNode());
   Node* p = basic_plus_adr(top()/*!oop*/, thread, in_bytes(JavaThread::threadObj_offset()));
-  Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT);
+  Node* threadObj = make_load(NULL, p, thread_type, T_OBJECT, MemNode::unordered);
   tls_output = thread;
   return threadObj;
 }
@@ -1936,7 +1939,7 @@
     runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
 
     // These intrinsics are supported on all hardware
-  case vmIntrinsics::_dsqrt:  return Matcher::has_match_rule(Op_SqrtD)  ? inline_math(id) : false;
+  case vmIntrinsics::_dsqrt:  return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false;
   case vmIntrinsics::_dabs:   return Matcher::has_match_rule(Op_AbsD)   ? inline_math(id) : false;
 
   case vmIntrinsics::_dexp:   return Matcher::has_match_rule(Op_ExpD)   ? inline_exp()    :
@@ -1969,18 +1972,8 @@
   return true;
 }
 
-void LibraryCallKit::inline_math_mathExact(Node* math) {
-  // If we didn't get the expected opcode it means we have optimized
-  // the node to something else and don't need the exception edge.
-  if (!math->is_MathExact()) {
-    set_result(math);
-    return;
-  }
-
-  Node* result = _gvn.transform( new(C) ProjNode(math, MathExactNode::result_proj_node));
-  Node* flags = _gvn.transform( new(C) FlagsProjNode(math, MathExactNode::flags_proj_node));
-
-  Node* bol = _gvn.transform( new (C) BoolNode(flags, BoolTest::overflow) );
+void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) {
+  Node* bol = _gvn.transform( new (C) BoolNode(test, BoolTest::overflow) );
   IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
   Node* fast_path = _gvn.transform( new (C) IfFalseNode(check));
   Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) );
@@ -1998,108 +1991,50 @@
   }
 
   set_control(fast_path);
-  set_result(result);
+  set_result(math);
+}
+
+template <typename OverflowOp>
+bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
+  typedef typename OverflowOp::MathOp MathOp;
+
+  MathOp* mathOp = new(C) MathOp(arg1, arg2);
+  Node* operation = _gvn.transform( mathOp );
+  Node* ofcheck = _gvn.transform( new(C) OverflowOp(arg1, arg2) );
+  inline_math_mathExact(operation, ofcheck);
+  return true;
 }
 
 bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
-  Node* arg1 = argument(0);
-  Node* arg2 = NULL;
-
-  if (is_increment) {
-    arg2 = intcon(1);
-  } else {
-    arg2 = argument(1);
-  }
-
-  Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) );
-  inline_math_mathExact(add);
-  return true;
+  return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
 }
 
 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
-  Node* arg1 = argument(0); // type long
-  // argument(1) == TOP
-  Node* arg2 = NULL;
-
-  if (is_increment) {
-    arg2 = longcon(1);
-  } else {
-    arg2 = argument(2); // type long
-    // argument(3) == TOP
-  }
-
-  Node* add = _gvn.transform(new(C) AddExactLNode(NULL, arg1, arg2));
-  inline_math_mathExact(add);
-  return true;
+  return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
 }
 
 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
-  Node* arg1 = argument(0);
-  Node* arg2 = NULL;
-
-  if (is_decrement) {
-    arg2 = intcon(1);
-  } else {
-    arg2 = argument(1);
-  }
-
-  Node* sub = _gvn.transform(new(C) SubExactINode(NULL, arg1, arg2));
-  inline_math_mathExact(sub);
-  return true;
+  return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
 }
 
 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
-  Node* arg1 = argument(0); // type long
-  // argument(1) == TOP
-  Node* arg2 = NULL;
-
-  if (is_decrement) {
-    arg2 = longcon(1);
-  } else {
-    arg2 = argument(2); // type long
-    // argument(3) == TOP
-  }
-
-  Node* sub = _gvn.transform(new(C) SubExactLNode(NULL, arg1, arg2));
-  inline_math_mathExact(sub);
-  return true;
+  return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
 }
 
 bool LibraryCallKit::inline_math_negateExactI() {
-  Node* arg1 = argument(0);
-
-  Node* neg = _gvn.transform(new(C) NegExactINode(NULL, arg1));
-  inline_math_mathExact(neg);
-  return true;
+  return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
 }
 
 bool LibraryCallKit::inline_math_negateExactL() {
-  Node* arg1 = argument(0);
-  // argument(1) == TOP
-
-  Node* neg = _gvn.transform(new(C) NegExactLNode(NULL, arg1));
-  inline_math_mathExact(neg);
-  return true;
+  return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
 }
 
 bool LibraryCallKit::inline_math_multiplyExactI() {
-  Node* arg1 = argument(0);
-  Node* arg2 = argument(1);
-
-  Node* mul = _gvn.transform(new(C) MulExactINode(NULL, arg1, arg2));
-  inline_math_mathExact(mul);
-  return true;
+  return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
 }
 
 bool LibraryCallKit::inline_math_multiplyExactL() {
-  Node* arg1 = argument(0);
-  // argument(1) == TOP
-  Node* arg2 = argument(2);
-  // argument(3) == TOP
-
-  Node* mul = _gvn.transform(new(C) MulExactLNode(NULL, arg1, arg2));
-  inline_math_mathExact(mul);
-  return true;
+  return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
 }
 
 Node*
@@ -2627,8 +2562,13 @@
     // rough approximation of type.
     need_mem_bar = true;
     // For Stores, place a memory ordering barrier now.
-    if (is_store)
+    if (is_store) {
       insert_mem_bar(Op_MemBarRelease);
+    } else {
+      if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+        insert_mem_bar(Op_MemBarVolatile);
+      }
+    }
   }
 
   // Memory barrier to prevent normal and 'unsafe' accesses from
@@ -2640,7 +2580,7 @@
   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
 
   if (!is_store) {
-    Node* p = make_load(control(), adr, value_type, type, adr_type, is_volatile);
+    Node* p = make_load(control(), adr, value_type, type, adr_type, MemNode::unordered, is_volatile);
     // load value
     switch (type) {
     case T_BOOLEAN:
@@ -2684,13 +2624,14 @@
       break;
     }
 
+    MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
     if (type != T_OBJECT ) {
-      (void) store_to_memory(control(), adr, val, type, adr_type, is_volatile);
+      (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile);
     } else {
       // Possibly an oop being stored to Java heap or native memory
       if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
         // oop to Java heap.
-        (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
+        (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
       } else {
         // We can't tell at compile time if we are storing in the Java heap or outside
         // of it. So we need to emit code to conditionally do the proper type of
@@ -2702,11 +2643,11 @@
         __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
           // Sync IdealKit and graphKit.
           sync_kit(ideal);
-          Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type);
+          Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
           // Update IdealKit memory.
           __ sync_kit(this);
         } __ else_(); {
-          __ store(__ ctrl(), adr, val, type, alias_type->index(), is_volatile);
+          __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile);
         } __ end_if();
         // Final sync IdealKit and GraphKit.
         final_sync(ideal);
@@ -2716,10 +2657,13 @@
   }
 
   if (is_volatile) {
-    if (!is_store)
+    if (!is_store) {
       insert_mem_bar(Op_MemBarAcquire);
-    else
-      insert_mem_bar(Op_MemBarVolatile);
+    } else {
+      if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+        insert_mem_bar(Op_MemBarVolatile);
+      }
+    }
   }
 
   if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
@@ -2979,12 +2923,12 @@
       Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
       if (kind == LS_xchg) {
         load_store = _gvn.transform(new (C) GetAndSetNNode(control(), mem, adr,
-                                                              newval_enc, adr_type, value_type->make_narrowoop()));
+                                                           newval_enc, adr_type, value_type->make_narrowoop()));
       } else {
         assert(kind == LS_cmpxchg, "wrong LoadStore operation");
         Node *oldval_enc = _gvn.transform(new (C) EncodePNode(oldval, oldval->bottom_type()->make_narrowoop()));
         load_store = _gvn.transform(new (C) CompareAndSwapNNode(control(), mem, adr,
-                                                                   newval_enc, oldval_enc));
+                                                                newval_enc, oldval_enc));
       }
     } else
 #endif
@@ -3090,9 +3034,9 @@
   const bool require_atomic_access = true;
   Node* store;
   if (type == T_OBJECT) // reference stores need a store barrier.
-    store = store_oop_to_unknown(control(), base, adr, adr_type, val, type);
+    store = store_oop_to_unknown(control(), base, adr, adr_type, val, type, MemNode::release);
   else {
-    store = store_to_memory(control(), adr, val, type, adr_type, require_atomic_access);
+    store = store_to_memory(control(), adr, val, type, adr_type, MemNode::release, require_atomic_access);
   }
   insert_mem_bar(Op_MemBarCPUOrder);
   return true;
@@ -3104,10 +3048,10 @@
   insert_mem_bar(Op_MemBarCPUOrder);
   switch(id) {
     case vmIntrinsics::_loadFence:
-      insert_mem_bar(Op_MemBarAcquire);
+      insert_mem_bar(Op_LoadFence);
       return true;
     case vmIntrinsics::_storeFence:
-      insert_mem_bar(Op_MemBarRelease);
+      insert_mem_bar(Op_StoreFence);
       return true;
     case vmIntrinsics::_fullFence:
       insert_mem_bar(Op_MemBarVolatile);
@@ -3152,7 +3096,7 @@
     Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset()));
     // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
     // can generate code to load it as unsigned byte.
-    Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN);
+    Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
     Node* bits = intcon(InstanceKlass::fully_initialized);
     test = _gvn.transform(new (C) SubINode(inst, bits));
     // The 'test' is non-zero if we need to take a slow path.
@@ -3176,14 +3120,14 @@
   kls = null_check(kls, T_OBJECT);
   ByteSize offset = TRACE_ID_OFFSET;
   Node* insp = basic_plus_adr(kls, in_bytes(offset));
-  Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG);
+  Node* tvalue = make_load(NULL, insp, TypeLong::LONG, T_LONG, MemNode::unordered);
   Node* bits = longcon(~0x03l); // ignore bit 0 & 1
   Node* andl = _gvn.transform(new (C) AndLNode(tvalue, bits));
   Node* clsused = longcon(0x01l); // set the class bit
   Node* orl = _gvn.transform(new (C) OrLNode(tvalue, clsused));
 
   const TypePtr *adr_type = _gvn.type(insp)->isa_ptr();
-  store_to_memory(control(), insp, orl, T_LONG, adr_type);
+  store_to_memory(control(), insp, orl, T_LONG, adr_type, MemNode::unordered);
   set_result(andl);
   return true;
 }
@@ -3192,15 +3136,15 @@
   Node* tls_ptr = NULL;
   Node* cur_thr = generate_current_thread(tls_ptr);
   Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
-  Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
+  Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
   p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::thread_id_offset()));
 
   Node* threadid = NULL;
   size_t thread_id_size = OSThread::thread_id_size();
   if (thread_id_size == (size_t) BytesPerLong) {
-    threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG));
+    threadid = ConvL2I(make_load(control(), p, TypeLong::LONG, T_LONG, MemNode::unordered));
   } else if (thread_id_size == (size_t) BytesPerInt) {
-    threadid = make_load(control(), p, TypeInt::INT, T_INT);
+    threadid = make_load(control(), p, TypeInt::INT, T_INT, MemNode::unordered);
   } else {
     ShouldNotReachHere();
   }
@@ -3275,11 +3219,11 @@
 
   // (b) Interrupt bit on TLS must be false.
   Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
-  Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
+  Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
   p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
 
   // Set the control input on the field _interrupted read to prevent it floating up.
-  Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT);
+  Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT, MemNode::unordered);
   Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0)));
   Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne));
 
@@ -3347,7 +3291,7 @@
 // Given a klass oop, load its java mirror (a java.lang.Class oop).
 Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
   Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
-  return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT);
+  return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
 }
 
 //-----------------------load_klass_from_mirror_common-------------------------
@@ -3384,7 +3328,7 @@
   // Branch around if the given klass has the given modifier bit set.
   // Like generate_guard, adds a new path onto the region.
   Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
-  Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT);
+  Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
   Node* mask = intcon(modifier_mask);
   Node* bits = intcon(modifier_bits);
   Node* mbit = _gvn.transform(new (C) AndINode(mods, mask));
@@ -3501,7 +3445,7 @@
 
   case vmIntrinsics::_getModifiers:
     p = basic_plus_adr(kls, in_bytes(Klass::modifier_flags_offset()));
-    query_value = make_load(NULL, p, TypeInt::INT, T_INT);
+    query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
     break;
 
   case vmIntrinsics::_isInterface:
@@ -3559,7 +3503,7 @@
       // Be sure to pin the oop load to the guard edge just created:
       Node* is_array_ctrl = region->in(region->req()-1);
       Node* cma = basic_plus_adr(kls, in_bytes(ArrayKlass::component_mirror_offset()));
-      Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT);
+      Node* cmo = make_load(is_array_ctrl, cma, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
       phi->add_req(cmo);
     }
     query_value = null();  // non-array case is null
@@ -3567,7 +3511,7 @@
 
   case vmIntrinsics::_getClassAccessFlags:
     p = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
-    query_value = make_load(NULL, p, TypeInt::INT, T_INT);
+    query_value = make_load(NULL, p, TypeInt::INT, T_INT, MemNode::unordered);
     break;
 
   default:
@@ -3933,7 +3877,7 @@
                      vtable_index*vtableEntry::size()) * wordSize +
                      vtableEntry::method_offset_in_bytes();
   Node* entry_addr  = basic_plus_adr(obj_klass, entry_offset);
-  Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS);
+  Node* target_call = make_load(NULL, entry_addr, TypePtr::NOTNULL, T_ADDRESS, MemNode::unordered);
 
   // Compare the target method with the expected method (e.g., Object.hashCode).
   const TypePtr* native_call_addr = TypeMetadataPtr::make(method);
@@ -4059,7 +4003,7 @@
 
   // Get the header out of the object, use LoadMarkNode when available
   Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
-  Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type());
+  Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
 
   // Test the header to see if it is unlocked.
   Node *lock_mask      = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place);
@@ -5480,7 +5424,7 @@
         // Store a zero to the immediately preceding jint:
         Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit)));
         Node* p1 = basic_plus_adr(dest, x1);
-        mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT);
+        mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT, MemNode::unordered);
         mem = _gvn.transform(mem);
       }
     }
@@ -5530,8 +5474,8 @@
         ((src_off ^ dest_off) & (BytesPerLong-1)) == 0) {
       Node* sptr = basic_plus_adr(src,  src_off);
       Node* dptr = basic_plus_adr(dest, dest_off);
-      Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type);
-      store_to_memory(control(), dptr, sval, T_INT, adr_type);
+      Node* sval = make_load(control(), sptr, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
+      store_to_memory(control(), dptr, sval, T_INT, adr_type, MemNode::unordered);
       src_off += BytesPerInt;
       dest_off += BytesPerInt;
     } else {
@@ -5596,7 +5540,7 @@
   // super_check_offset, for the desired klass.
   int sco_offset = in_bytes(Klass::super_check_offset_offset());
   Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset);
-  Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr());
+  Node* n3 = new(C) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr(), TypeInt::INT, MemNode::unordered);
   Node* check_offset = ConvI2X(_gvn.transform(n3));
   Node* check_value  = dest_elem_klass;
 
@@ -5737,7 +5681,7 @@
   Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr()));
   Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2)));
   Node* adr = basic_plus_adr(top(), base, ConvI2X(offset));
-  result = make_load(control(), adr, TypeInt::INT, T_INT);
+  result = make_load(control(), adr, TypeInt::INT, T_INT, MemNode::unordered);
 
   crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8)));
   result = _gvn.transform(new (C) XorINode(crc, result));
@@ -5838,7 +5782,7 @@
   const TypeOopPtr* object_type = TypeOopPtr::make_from_klass(klass);
 
   Node* no_ctrl = NULL;
-  Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT);
+  Node* result = make_load(no_ctrl, adr, object_type, T_OBJECT, MemNode::unordered);
 
   // Use the pre-barrier to record the value in the referent field
   pre_barrier(false /* do_load */,
@@ -5885,7 +5829,7 @@
   const Type *type = TypeOopPtr::make_from_klass(field_klass->as_klass());
 
   // Build the load.
-  Node* loadedField = make_load(NULL, adr, type, bt, adr_type, is_vol);
+  Node* loadedField = make_load(NULL, adr, type, bt, adr_type, MemNode::unordered, is_vol);
   return loadedField;
 }
 
@@ -5936,10 +5880,22 @@
   Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
   if (k_start == NULL) return false;
 
-  // Call the stub.
-  make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
-                    stubAddr, stubName, TypePtr::BOTTOM,
-                    src_start, dest_start, k_start);
+  if (Matcher::pass_original_key_for_aes()) {
+    // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
+    // compatibility issues between Java key expansion and SPARC crypto instructions
+    Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
+    if (original_k_start == NULL) return false;
+
+    // Call the stub.
+    make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
+                      stubAddr, stubName, TypePtr::BOTTOM,
+                      src_start, dest_start, k_start, original_k_start);
+  } else {
+    // Call the stub.
+    make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
+                      stubAddr, stubName, TypePtr::BOTTOM,
+                      src_start, dest_start, k_start);
+  }
 
   return true;
 }
@@ -6017,14 +5973,29 @@
   if (objRvec == NULL) return false;
   Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
 
-  // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
-  make_runtime_call(RC_LEAF|RC_NO_FP,
-                    OptoRuntime::cipherBlockChaining_aescrypt_Type(),
-                    stubAddr, stubName, TypePtr::BOTTOM,
-                    src_start, dest_start, k_start, r_start, len);
-
-  // return is void so no result needs to be pushed
-
+  Node* cbcCrypt;
+  if (Matcher::pass_original_key_for_aes()) {
+    // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
+    // compatibility issues between Java key expansion and SPARC crypto instructions
+    Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
+    if (original_k_start == NULL) return false;
+
+    // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
+    cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
+                                 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
+                                 stubAddr, stubName, TypePtr::BOTTOM,
+                                 src_start, dest_start, k_start, r_start, len, original_k_start);
+  } else {
+    // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
+    cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
+                                 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
+                                 stubAddr, stubName, TypePtr::BOTTOM,
+                                 src_start, dest_start, k_start, r_start, len);
+  }
+
+  // return cipher length (int)
+  Node* retvalue = _gvn.transform(new (C) ProjNode(cbcCrypt, TypeFunc::Parms));
+  set_result(retvalue);
   return true;
 }
 
@@ -6039,6 +6010,17 @@
   return k_start;
 }
 
+//------------------------------get_original_key_start_from_aescrypt_object-----------------------
+Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
+  Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
+  assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
+  if (objAESCryptKey == NULL) return (Node *) NULL;
+
+  // now have the array, need to get the start address of the lastKey array
+  Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
+  return original_k_start;
+}
+
 //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
 // Return node representing slow path of predicate check.
 // the pseudo code we want to emulate with this predicate is:
--- a/src/share/vm/opto/live.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/live.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/live.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/live.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/locknode.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/locknode.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -43,8 +43,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 //------------------------------BoxLockNode------------------------------------
--- a/src/share/vm/opto/loopPredicate.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/loopPredicate.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/loopTransform.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/loopTransform.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -713,10 +713,6 @@
       case Op_ModL: body_size += 30; break;
       case Op_DivL: body_size += 30; break;
       case Op_MulL: body_size += 10; break;
-      case Op_FlagsProj:
-        // Can't handle unrolling of loops containing
-        // nodes that generate a FlagsProj at the moment
-        return false;
       case Op_StrComp:
       case Op_StrEquals:
       case Op_StrIndexOf:
@@ -780,10 +776,6 @@
         continue; // not RC
 
       Node *cmp = bol->in(1);
-      if (cmp->is_FlagsProj()) {
-        continue;
-      }
-
       Node *rc_exp = cmp->in(1);
       Node *limit = cmp->in(2);
 
@@ -2699,27 +2691,38 @@
     _igvn.register_new_node_with_optimizer(store_value);
   }
 
+  if (CCallingConventionRequiresIntsAsLongs &&
+      // See StubRoutines::select_fill_function for types. FLOAT has been converted to INT.
+      (t == T_FLOAT || t == T_INT ||  is_subword_type(t))) {
+    store_value = new (C) ConvI2LNode(store_value);
+    _igvn.register_new_node_with_optimizer(store_value);
+  }
+
   Node* mem_phi = store->in(MemNode::Memory);
   Node* result_ctrl;
   Node* result_mem;
   const TypeFunc* call_type = OptoRuntime::array_fill_Type();
   CallLeafNode *call = new (C) CallLeafNoFPNode(call_type, fill,
                                                 fill_name, TypeAryPtr::get_array_body_type(t));
-  call->init_req(TypeFunc::Parms+0, from);
-  call->init_req(TypeFunc::Parms+1, store_value);
+  uint cnt = 0;
+  call->init_req(TypeFunc::Parms + cnt++, from);
+  call->init_req(TypeFunc::Parms + cnt++, store_value);
+  if (CCallingConventionRequiresIntsAsLongs) {
+    call->init_req(TypeFunc::Parms + cnt++, C->top());
+  }
 #ifdef _LP64
   len = new (C) ConvI2LNode(len);
   _igvn.register_new_node_with_optimizer(len);
 #endif
-  call->init_req(TypeFunc::Parms+2, len);
+  call->init_req(TypeFunc::Parms + cnt++, len);
 #ifdef _LP64
-  call->init_req(TypeFunc::Parms+3, C->top());
+  call->init_req(TypeFunc::Parms + cnt++, C->top());
 #endif
-  call->init_req( TypeFunc::Control, head->init_control());
-  call->init_req( TypeFunc::I_O    , C->top() )        ;   // does no i/o
-  call->init_req( TypeFunc::Memory ,  mem_phi->in(LoopNode::EntryControl) );
-  call->init_req( TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr) );
-  call->init_req( TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr) );
+  call->init_req(TypeFunc::Control,   head->init_control());
+  call->init_req(TypeFunc::I_O,       C->top());       // Does no I/O.
+  call->init_req(TypeFunc::Memory,    mem_phi->in(LoopNode::EntryControl));
+  call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr));
+  call->init_req(TypeFunc::FramePtr,  C->start()->proj_out(TypeFunc::FramePtr));
   _igvn.register_new_node_with_optimizer(call);
   result_ctrl = new (C) ProjNode(call,TypeFunc::Control);
   _igvn.register_new_node_with_optimizer(result_ctrl);
--- a/src/share/vm/opto/loopnode.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/loopnode.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/loopopts.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/loopopts.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,12 +43,6 @@
     return NULL;
   }
 
-  if (n->is_MathExact()) {
-    // MathExact has projections that are not correctly handled in the code
-    // below.
-    return NULL;
-  }
-
   int wins = 0;
   assert(!n->is_CFG(), "");
   assert(region->is_Region(), "");
@@ -1115,8 +1109,8 @@
     Node *n2 = phi->in(i)->in(1)->in(2);
     phi1->set_req( i, n1 );
     phi2->set_req( i, n2 );
-    phi1->set_type( phi1->type()->meet(n1->bottom_type()) );
-    phi2->set_type( phi2->type()->meet(n2->bottom_type()) );
+    phi1->set_type( phi1->type()->meet_speculative(n1->bottom_type()));
+    phi2->set_type( phi2->type()->meet_speculative(n2->bottom_type()));
   }
   // See if these Phis have been made before.
   // Register with optimizer
@@ -1189,8 +1183,8 @@
     }
     phi1->set_req( j, n1 );
     phi2->set_req( j, n2 );
-    phi1->set_type( phi1->type()->meet(n1->bottom_type()) );
-    phi2->set_type( phi2->type()->meet(n2->bottom_type()) );
+    phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
+    phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
   }
 
   // See if these Phis have been made before.
@@ -2362,8 +2356,7 @@
         opc == Op_Catch     ||
         opc == Op_CatchProj ||
         opc == Op_Jump      ||
-        opc == Op_JumpProj  ||
-        opc == Op_FlagsProj) {
+        opc == Op_JumpProj) {
 #if !defined(PRODUCT)
       if (TracePartialPeeling) {
         tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
--- a/src/share/vm/opto/machnode.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/machnode.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -134,6 +134,10 @@
   ShouldNotCallThis();
 }
 
+//---------------------------postalloc_expand----------------------------------
+// Expand node after register allocation.
+void MachNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {}
+
 //------------------------------size-------------------------------------------
 // Size of instruction in bytes
 uint MachNode::size(PhaseRegAlloc *ra_) const {
@@ -393,6 +397,17 @@
   return skipped;
 }
 
+int MachNode::operand_index(const MachOper *oper) const {
+  uint skipped = oper_input_base(); // Sum of leaves skipped so far
+  uint opcnt;
+  for (opcnt = 1; opcnt < num_opnds(); opcnt++) {
+    if (_opnds[opcnt] == oper) break;
+    uint num_edges = _opnds[opcnt]->num_edges(); // leaves for operand
+    skipped += num_edges;
+  }
+  if (_opnds[opcnt] != oper) return -1;
+  return skipped;
+}
 
 //------------------------------peephole---------------------------------------
 // Apply peephole rule(s) to this instruction
@@ -501,6 +516,9 @@
   return _constant.offset();
 }
 
+int MachConstantNode::constant_offset_unchecked() const {
+  return _constant.offset();
+}
 
 //=============================================================================
 #ifndef PRODUCT
@@ -641,10 +659,15 @@
 
 
 //------------------------------Registers--------------------------------------
-const RegMask &MachCallNode::in_RegMask( uint idx ) const {
+const RegMask &MachCallNode::in_RegMask(uint idx) const {
   // Values in the domain use the users calling convention, embodied in the
   // _in_rms array of RegMasks.
-  if (idx < tf()->domain()->cnt())  return _in_rms[idx];
+  if (idx < tf()->domain()->cnt()) {
+    return _in_rms[idx];
+  }
+  if (idx == mach_constant_base_node_input()) {
+    return MachConstantBaseNode::static_out_RegMask();
+  }
   // Values outside the domain represent debug info
   return *Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()];
 }
@@ -671,7 +694,12 @@
 const RegMask &MachCallJavaNode::in_RegMask(uint idx) const {
   // Values in the domain use the users calling convention, embodied in the
   // _in_rms array of RegMasks.
-  if (idx < tf()->domain()->cnt())  return _in_rms[idx];
+  if (idx < tf()->domain()->cnt()) {
+    return _in_rms[idx];
+  }
+  if (idx == mach_constant_base_node_input()) {
+    return MachConstantBaseNode::static_out_RegMask();
+  }
   // Values outside the domain represent debug info
   Matcher* m = Compile::current()->matcher();
   // If this call is a MethodHandle invoke we have to use a different
--- a/src/share/vm/opto/machnode.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/machnode.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -31,6 +31,7 @@
 #include "opto/node.hpp"
 #include "opto/regmask.hpp"
 
+class BiasedLockingCounters;
 class BufferBlob;
 class CodeBuffer;
 class JVMState;
@@ -102,6 +103,15 @@
     return ::as_XMMRegister(reg(ra_, node, idx));
   }
 #endif
+  // CondRegister reg converter
+#if defined(PPC64)
+  ConditionRegister as_ConditionRegister(PhaseRegAlloc *ra_, const Node *node) const {
+    return ::as_ConditionRegister(reg(ra_, node));
+  }
+  ConditionRegister as_ConditionRegister(PhaseRegAlloc *ra_, const Node *node, int idx) const {
+    return ::as_ConditionRegister(reg(ra_, node, idx));
+  }
+#endif
 
   virtual intptr_t  constant() const;
   virtual relocInfo::relocType constant_reloc() const;
@@ -155,7 +165,15 @@
   virtual void ext_format(PhaseRegAlloc *,const MachNode *node,int idx, outputStream *st) const=0;
 
   virtual void dump_spec(outputStream *st) const; // Print per-operand info
-#endif
+
+  // Check whether o is a valid oper.
+  static bool notAnOper(const MachOper *o) {
+    if (o == NULL)                   return true;
+    if (((intptr_t)o & 1) != 0)      return true;
+    if (*(address*)o == badAddress)  return true;  // kill by Node::destruct
+    return false;
+  }
+#endif // !PRODUCT
 };
 
 //------------------------------MachNode---------------------------------------
@@ -173,6 +191,9 @@
   // Number of inputs which come before the first operand.
   // Generally at least 1, to skip the Control input
   virtual uint oper_input_base() const { return 1; }
+  // Position of constant base node in node's inputs. -1 if
+  // no constant base node input.
+  virtual uint mach_constant_base_node_input() const { return (uint)-1; }
 
   // Copy inputs and operands to new node of instruction.
   // Called from cisc_version() and short_branch_version().
@@ -195,6 +216,7 @@
 
   // First index in _in[] corresponding to operand, or -1 if there is none
   int  operand_index(uint operand) const;
+  int  operand_index(const MachOper *oper) const;
 
   // Register class input is expected in
   virtual const RegMask &in_RegMask(uint) const;
@@ -220,6 +242,12 @@
 
   // Emit bytes into cbuf
   virtual void  emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
+  // Expand node after register allocation.
+  // Node is replaced by several nodes in the postalloc expand phase.
+  // Corresponding methods are generated for nodes if they specify
+  // postalloc_expand. See block.cpp for more documentation.
+  virtual bool requires_postalloc_expand() const { return false; }
+  virtual void postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_);
   // Size of instruction in bytes
   virtual uint  size(PhaseRegAlloc *ra_) const;
   // Helper function that computes size by emitting code
@@ -236,6 +264,9 @@
   // Return number of relocatable values contained in this instruction
   virtual int   reloc() const { return 0; }
 
+  // Return number of words used for double constants in this instruction
+  virtual int   ins_num_consts() const { return 0; }
+
   // Hash and compare over operands.  Used to do GVN on machine Nodes.
   virtual uint  hash() const;
   virtual uint  cmp( const Node &n ) const;
@@ -293,6 +324,9 @@
   static const Pipeline *pipeline_class();
   virtual const Pipeline *pipeline() const;
 
+  // Returns true if this node is a check that can be implemented with a trap.
+  virtual bool is_TrapBasedCheckNode() const { return false; }
+
 #ifndef PRODUCT
   virtual const char *Name() const = 0; // Machine-specific name
   virtual void dump_spec(outputStream *st) const; // Print per-node info
@@ -356,6 +390,9 @@
   virtual uint ideal_reg() const { return Op_RegP; }
   virtual uint oper_input_base() const { return 1; }
 
+  virtual bool requires_postalloc_expand() const;
+  virtual void postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_);
+
   virtual void emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const;
   virtual uint size(PhaseRegAlloc* ra_) const;
   virtual bool pinned() const { return UseRDPCForConstantTableBase; }
@@ -395,10 +432,12 @@
   }
 
   // Input edge of MachConstantBaseNode.
-  uint mach_constant_base_node_input() const { return req() - 1; }
+  virtual uint mach_constant_base_node_input() const { return req() - 1; }
 
   int  constant_offset();
   int  constant_offset() const { return ((MachConstantNode*) this)->constant_offset(); }
+  // Unchecked version to avoid assertions in debug output.
+  int  constant_offset_unchecked() const;
 };
 
 //------------------------------MachUEPNode-----------------------------------
@@ -481,12 +520,33 @@
 // Machine SpillCopy Node.  Copies 1 or 2 words from any location to any
 // location (stack or register).
 class MachSpillCopyNode : public MachIdealNode {
+public:
+  enum SpillType {
+    TwoAddress,                        // Inserted when coalescing of a two-address-instruction node and its input fails
+    PhiInput,                          // Inserted when coalescing of a phi node and its input fails
+    DebugUse,                          // Inserted as debug info spills to safepoints in non-frequent blocks
+    LoopPhiInput,                      // Pre-split compares of loop-phis
+    Definition,                        // An lrg marked as spilled will be spilled to memory right after its definition,
+                                       // if in high pressure region or the lrg is bound
+    RegToReg,                          // A register to register move
+    RegToMem,                          // A register to memory move
+    MemToReg,                          // A memory to register move
+    PhiLocationDifferToInputLocation,  // When coalescing phi nodes in PhaseChaitin::Split(), a move spill is inserted if
+                                       // the phi and its input resides at different locations (i.e. reg or mem)
+    BasePointerToMem,                  // Spill base pointer to memory at safepoint
+    InputToRematerialization,          // When rematerializing a node we stretch the inputs live ranges, and they might be
+                                       // stretched beyond a new definition point, therefore we split out new copies instead
+    CallUse,                           // Spill use at a call
+    Bound                              // An lrg marked as spill that is bound and needs to be spilled at a use
+  };
+private:
   const RegMask *_in;           // RegMask for input
   const RegMask *_out;          // RegMask for output
   const Type *_type;
+  const SpillType _spill_type;
 public:
-  MachSpillCopyNode( Node *n, const RegMask &in, const RegMask &out ) :
-    MachIdealNode(), _in(&in), _out(&out), _type(n->bottom_type()) {
+  MachSpillCopyNode(SpillType spill_type, Node *n, const RegMask &in, const RegMask &out ) :
+    MachIdealNode(), _spill_type(spill_type), _in(&in), _out(&out), _type(n->bottom_type()) {
     init_class_id(Class_MachSpillCopy);
     init_flags(Flag_is_Copy);
     add_req(NULL);
@@ -505,8 +565,42 @@
   virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
   virtual uint size(PhaseRegAlloc *ra_) const;
 
+
 #ifndef PRODUCT
-  virtual const char *Name() const { return "MachSpillCopy"; }
+  virtual const char *Name() const {
+    switch (_spill_type) {
+      case TwoAddress:
+        return "TwoAddressSpillCopy";
+      case PhiInput:
+        return "PhiInputSpillCopy";
+      case DebugUse:
+        return "DebugUseSpillCopy";
+      case LoopPhiInput:
+        return "LoopPhiInputSpillCopy";
+      case Definition:
+        return "DefinitionSpillCopy";
+      case RegToReg:
+        return "RegToRegSpillCopy";
+      case RegToMem:
+        return "RegToMemSpillCopy";
+      case MemToReg:
+        return "MemToRegSpillCopy";
+      case PhiLocationDifferToInputLocation:
+        return "PhiLocationDifferToInputLocationSpillCopy";
+      case BasePointerToMem:
+        return "BasePointerToMemSpillCopy";
+      case InputToRematerialization:
+        return "InputToRematerializationSpillCopy";
+      case CallUse:
+        return "CallUseSpillCopy";
+      case Bound:
+        return "BoundSpillCopy";
+      default:
+        assert(false, "Must have valid spill type");
+        return "MachSpillCopy";
+    }
+  }
+
   virtual void format( PhaseRegAlloc *, outputStream *st ) const;
 #endif
 };
--- a/src/share/vm/opto/macro.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/macro.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1084,7 +1084,7 @@
 Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) {
   Node* adr = basic_plus_adr(base, offset);
   const TypePtr* adr_type = adr->bottom_type()->is_ptr();
-  Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt);
+  Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt, MemNode::unordered);
   transform_later(value);
   return value;
 }
@@ -1092,7 +1092,7 @@
 
 Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) {
   Node* adr = basic_plus_adr(base, offset);
-  mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt);
+  mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt, MemNode::unordered);
   transform_later(mem);
   return mem;
 }
@@ -1272,8 +1272,8 @@
     // Load(-locked) the heap top.
     // See note above concerning the control input when using a TLAB
     Node *old_eden_top = UseTLAB
-      ? new (C) LoadPNode      (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM)
-      : new (C) LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr);
+      ? new (C) LoadPNode      (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, MemNode::unordered)
+      : new (C) LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr, MemNode::acquire);
 
     transform_later(old_eden_top);
     // Add to heap top to get a new heap top
@@ -1320,7 +1320,7 @@
     if (UseTLAB) {
       Node* store_eden_top =
         new (C) StorePNode(needgc_false, contended_phi_rawmem, eden_top_adr,
-                              TypeRawPtr::BOTTOM, new_eden_top);
+                              TypeRawPtr::BOTTOM, new_eden_top, MemNode::unordered);
       transform_later(store_eden_top);
       fast_oop_ctrl = needgc_false; // No contention, so this is the fast path
       fast_oop_rawmem = store_eden_top;
@@ -1700,9 +1700,10 @@
                    _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) );
       transform_later(eden_pf_adr);
 
-      Node *old_pf_wm = new (C) LoadPNode( needgc_false,
+      Node *old_pf_wm = new (C) LoadPNode(needgc_false,
                                    contended_phi_rawmem, eden_pf_adr,
-                                   TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM );
+                                   TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,
+                                   MemNode::unordered);
       transform_later(old_pf_wm);
 
       // check against new_eden_top
@@ -1726,9 +1727,10 @@
       transform_later(new_pf_wmt );
       new_pf_wmt->set_req(0, need_pf_true);
 
-      Node *store_new_wmt = new (C) StorePNode( need_pf_true,
+      Node *store_new_wmt = new (C) StorePNode(need_pf_true,
                                        contended_phi_rawmem, eden_pf_adr,
-                                       TypeRawPtr::BOTTOM, new_pf_wmt );
+                                       TypeRawPtr::BOTTOM, new_pf_wmt,
+                                       MemNode::unordered);
       transform_later(store_new_wmt);
 
       // adding prefetches
--- a/src/share/vm/opto/macro.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/macro.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/matcher.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/matcher.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -53,8 +53,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 OptoReg::Name OptoReg::c_frame_pointer;
@@ -842,16 +845,15 @@
 
   // Compute generic short-offset Loads
 #ifdef _LP64
-  MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
+  MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
 #endif
-  MachNode *spillI  = match_tree(new (C) LoadINode(NULL,mem,fp,atp));
-  MachNode *spillL  = match_tree(new (C) LoadLNode(NULL,mem,fp,atp));
-  MachNode *spillF  = match_tree(new (C) LoadFNode(NULL,mem,fp,atp));
-  MachNode *spillD  = match_tree(new (C) LoadDNode(NULL,mem,fp,atp));
-  MachNode *spillP  = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
+  MachNode *spillI  = match_tree(new (C) LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
+  MachNode *spillL  = match_tree(new (C) LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered,false));
+  MachNode *spillF  = match_tree(new (C) LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
+  MachNode *spillD  = match_tree(new (C) LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
+  MachNode *spillP  = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
   assert(spillI != NULL && spillL != NULL && spillF != NULL &&
          spillD != NULL && spillP != NULL, "");
-
   // Get the ADLC notion of the right regmask, for each basic type.
 #ifdef _LP64
   idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
@@ -1336,12 +1338,24 @@
   }
 
   // Debug inputs begin just after the last incoming parameter
-  assert( (mcall == NULL) || (mcall->jvms() == NULL) ||
-          (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "" );
+  assert((mcall == NULL) || (mcall->jvms() == NULL) ||
+         (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
 
   // Move the OopMap
   msfpt->_oop_map = sfpt->_oop_map;
 
+  // Add additional edges.
+  if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
+    // For these calls we can not add MachConstantBase in expand(), as the
+    // ins are not complete then.
+    msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
+    if (msfpt->jvms() &&
+        msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
+      // We added an edge before jvms, so we must adapt the position of the ins.
+      msfpt->jvms()->adapt_position(+1);
+    }
+  }
+
   // Registers killed by the call are set in the local scheduling pass
   // of Global Code Motion.
   return msfpt;
@@ -1984,7 +1998,6 @@
       case Op_Catch:
       case Op_CatchProj:
       case Op_CProj:
-      case Op_FlagsProj:
       case Op_JumpProj:
       case Op_JProj:
       case Op_NeverBranch:
@@ -2331,7 +2344,7 @@
 bool Matcher::post_store_load_barrier(const Node* vmb) {
   Compile* C = Compile::current();
   assert(vmb->is_MemBar(), "");
-  assert(vmb->Opcode() != Op_MemBarAcquire, "");
+  assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
   const MemBarNode* membar = vmb->as_MemBar();
 
   // Get the Ideal Proj node, ctrl, that can be used to iterate forward
@@ -2376,7 +2389,7 @@
     if (x->is_MemBar()) {
       // We must retain this membar if there is an upcoming volatile
       // load, which will be followed by acquire membar.
-      if (xop == Op_MemBarAcquire) {
+      if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
         return false;
       } else {
         // For other kinds of barriers, check by pretending we
@@ -2393,6 +2406,69 @@
   return false;
 }
 
+// Check whether node n is a branch to an uncommon trap that we could
+// optimize as test with very high branch costs in case of going to
+// the uncommon trap. The code must be able to be recompiled to use
+// a cheaper test.
+bool Matcher::branches_to_uncommon_trap(const Node *n) {
+  // Don't do it for natives, adapters, or runtime stubs
+  Compile *C = Compile::current();
+  if (!C->is_method_compilation()) return false;
+
+  assert(n->is_If(), "You should only call this on if nodes.");
+  IfNode *ifn = n->as_If();
+
+  Node *ifFalse = NULL;
+  for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
+    if (ifn->fast_out(i)->is_IfFalse()) {
+      ifFalse = ifn->fast_out(i);
+      break;
+    }
+  }
+  assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
+
+  Node *reg = ifFalse;
+  int cnt = 4; // We must protect against cycles.  Limit to 4 iterations.
+               // Alternatively use visited set?  Seems too expensive.
+  while (reg != NULL && cnt > 0) {
+    CallNode *call = NULL;
+    RegionNode *nxt_reg = NULL;
+    for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
+      Node *o = reg->fast_out(i);
+      if (o->is_Call()) {
+        call = o->as_Call();
+      }
+      if (o->is_Region()) {
+        nxt_reg = o->as_Region();
+      }
+    }
+
+    if (call &&
+        call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
+      const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
+      if (trtype->isa_int() && trtype->is_int()->is_con()) {
+        jint tr_con = trtype->is_int()->get_con();
+        Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
+        Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
+        assert((int)reason < (int)BitsPerInt, "recode bit map");
+
+        if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
+            && action != Deoptimization::Action_none) {
+          // This uncommon trap is sure to recompile, eventually.
+          // When that happens, C->too_many_traps will prevent
+          // this transformation from happening again.
+          return true;
+        }
+      }
+    }
+
+    reg = nxt_reg;
+    cnt--;
+  }
+
+  return false;
+}
+
 //=============================================================================
 //---------------------------State---------------------------------------------
 State::State(void) {
--- a/src/share/vm/opto/matcher.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/matcher.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -286,6 +286,9 @@
   // CPU supports misaligned vectors store/load.
   static const bool misaligned_vectors_ok();
 
+  // Should original key array reference be passed to AES stubs
+  static const bool pass_original_key_for_aes();
+
   // Used to determine a "low complexity" 64-bit constant.  (Zero is simple.)
   // The standard of comparison is one (StoreL ConL) vs. two (StoreI ConI).
   // Depends on the details of 64-bit constant generation on the CPU.
@@ -337,10 +340,6 @@
   // Register for MODL projection of divmodL
   static RegMask modL_proj_mask();
 
-  static const RegMask mathExactI_result_proj_mask();
-  static const RegMask mathExactL_result_proj_mask();
-  static const RegMask mathExactI_flags_proj_mask();
-
   // Use hardware DIV instruction when it is faster than
   // a code which use multiply for division by constant.
   static bool use_asm_for_ldiv_by_con( jlong divisor );
@@ -449,6 +448,10 @@
   // aligned.
   static const bool misaligned_doubles_ok;
 
+  // Does the CPU require postalloc expand (see block.cpp for description of
+  // postalloc expand)?
+  static const bool require_postalloc_expand;
+
   // Perform a platform dependent implicit null fixup.  This is needed
   // on windows95 to take care of some unusual register constraints.
   void pd_implicit_null_fixup(MachNode *load, uint idx);
@@ -481,6 +484,8 @@
   // retain the Node to act as a compiler ordering barrier.
   static bool post_store_load_barrier(const Node* mb);
 
+  // Does n lead to an uncommon trap that can cause deoptimization?
+  static bool branches_to_uncommon_trap(const Node *n);
 
 #ifdef ASSERT
   void dump_old2new_map();      // machine-independent to machine-dependent
--- a/src/share/vm/opto/mathexactnode.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/mathexactnode.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -31,358 +31,93 @@
 #include "opto/mathexactnode.hpp"
 #include "opto/subnode.hpp"
 
-MathExactNode::MathExactNode(Node* ctrl, Node* in1) : MultiNode(2) {
-  init_class_id(Class_MathExact);
-  init_req(0, ctrl);
-  init_req(1, in1);
-}
-
-MathExactNode::MathExactNode(Node* ctrl, Node* in1, Node* in2) : MultiNode(3) {
-  init_class_id(Class_MathExact);
-  init_req(0, ctrl);
-  init_req(1, in1);
-  init_req(2, in2);
-}
-
-BoolNode* MathExactNode::bool_node() const {
-  Node* flags = flags_node();
-  BoolNode* boolnode = flags->unique_out()->as_Bool();
-  assert(boolnode != NULL, "must have BoolNode");
-  return boolnode;
-}
-
-IfNode* MathExactNode::if_node() const {
-  BoolNode* boolnode = bool_node();
-  IfNode* ifnode = boolnode->unique_out()->as_If();
-  assert(ifnode != NULL, "must have IfNode");
-  return ifnode;
-}
-
-Node* MathExactNode::control_node() const {
-  IfNode* ifnode = if_node();
-  return ifnode->in(0);
-}
-
-Node* MathExactNode::non_throwing_branch() const {
-  IfNode* ifnode = if_node();
-  if (bool_node()->_test._test == BoolTest::overflow) {
-    return ifnode->proj_out(0);
-  }
-  return ifnode->proj_out(1);
-}
-
-// If the MathExactNode won't overflow we have to replace the
-// FlagsProjNode and ProjNode that is generated by the MathExactNode
-Node* MathExactNode::no_overflow(PhaseGVN* phase, Node* new_result) {
-  PhaseIterGVN* igvn = phase->is_IterGVN();
-  if (igvn) {
-    ProjNode* result = result_node();
-    ProjNode* flags = flags_node();
-
-    if (result != NULL) {
-      igvn->replace_node(result, new_result);
-    }
+template <typename OverflowOp>
+class AddHelper {
+public:
+  typedef typename OverflowOp::TypeClass TypeClass;
+  typedef typename TypeClass::NativeType NativeType;
 
-    if (flags != NULL) {
-      BoolNode* boolnode = bool_node();
-      switch (boolnode->_test._test) {
-        case BoolTest::overflow:
-          // if the check is for overflow - never taken
-          igvn->replace_node(boolnode, phase->intcon(0));
-          break;
-        case BoolTest::no_overflow:
-          // if the check is for no overflow - always taken
-          igvn->replace_node(boolnode, phase->intcon(1));
-          break;
-        default:
-          fatal("Unexpected value of BoolTest");
-          break;
-      }
-      flags->del_req(0);
+  static bool will_overflow(NativeType value1, NativeType value2) {
+    NativeType result = value1 + value2;
+    // Hacker's Delight 2-12 Overflow if both arguments have the opposite sign of the result
+    if (((value1 ^ result) & (value2 ^ result)) >= 0) {
+      return false;
     }
-  }
-  return new_result;
-}
-
-Node* MathExactINode::match(const ProjNode* proj, const Matcher* m) {
-  uint ideal_reg = proj->ideal_reg();
-  RegMask rm;
-  if (proj->_con == result_proj_node) {
-    rm = m->mathExactI_result_proj_mask();
-  } else {
-    assert(proj->_con == flags_proj_node, "must be result or flags");
-    assert(ideal_reg == Op_RegFlags, "sanity");
-    rm = m->mathExactI_flags_proj_mask();
-  }
-  return new (m->C) MachProjNode(this, proj->_con, rm, ideal_reg);
-}
-
-Node* MathExactLNode::match(const ProjNode* proj, const Matcher* m) {
-  uint ideal_reg = proj->ideal_reg();
-  RegMask rm;
-  if (proj->_con == result_proj_node) {
-    rm = m->mathExactL_result_proj_mask();
-  } else {
-    assert(proj->_con == flags_proj_node, "must be result or flags");
-    assert(ideal_reg == Op_RegFlags, "sanity");
-    rm = m->mathExactI_flags_proj_mask();
-  }
-  return new (m->C) MachProjNode(this, proj->_con, rm, ideal_reg);
-}
-
-Node* AddExactINode::Ideal(PhaseGVN* phase, bool can_reshape) {
-  Node* arg1 = in(1);
-  Node* arg2 = in(2);
-
-  const Type* type1 = phase->type(arg1);
-  const Type* type2 = phase->type(arg2);
-
-  if (type1 != Type::TOP && type1->singleton() &&
-      type2 != Type::TOP && type2->singleton()) {
-    jint val1 = arg1->get_int();
-    jint val2 = arg2->get_int();
-    jint result = val1 + val2;
-    // Hacker's Delight 2-12 Overflow if both arguments have the opposite sign of the result
-    if ( (((val1 ^ result) & (val2 ^ result)) >= 0)) {
-      Node* con_result = ConINode::make(phase->C, result);
-      return no_overflow(phase, con_result);
-    }
-    return NULL;
+    return true;
   }
 
-  if (type1 == TypeInt::ZERO || type2 == TypeInt::ZERO) { // (Add 0 x) == x
-    Node* add_result = new (phase->C) AddINode(arg1, arg2);
-    return no_overflow(phase, add_result);
-  }
-
-  if (type2->singleton()) {
-    return NULL; // no change - keep constant on the right
+  static bool can_overflow(const Type* type1, const Type* type2) {
+    if (type1 == TypeClass::ZERO || type2 == TypeClass::ZERO) {
+      return false;
+    }
+    return true;
   }
-
-  if (type1->singleton()) {
-    // Make it x + Constant - move constant to the right
-    swap_edges(1, 2);
-    return this;
-  }
-
-  if (arg2->is_Load()) {
-    return NULL; // no change - keep load on the right
-  }
-
-  if (arg1->is_Load()) {
-    // Make it x + Load - move load to the right
-    swap_edges(1, 2);
-    return this;
-  }
+};
 
-  if (arg1->_idx > arg2->_idx) {
-    // Sort the edges
-    swap_edges(1, 2);
-    return this;
-  }
-
-  return NULL;
-}
-
-Node* AddExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
-  Node* arg1 = in(1);
-  Node* arg2 = in(2);
+template <typename OverflowOp>
+class SubHelper {
+public:
+  typedef typename OverflowOp::TypeClass TypeClass;
+  typedef typename TypeClass::NativeType NativeType;
 
-  const Type* type1 = phase->type(arg1);
-  const Type* type2 = phase->type(arg2);
-
-  if (type1 != Type::TOP && type1->singleton() &&
-      type2 != Type::TOP && type2->singleton()) {
-    jlong val1 = arg1->get_long();
-    jlong val2 = arg2->get_long();
-    jlong result = val1 + val2;
-    // Hacker's Delight 2-12 Overflow if both arguments have the opposite sign of the result
-    if ( (((val1 ^ result) & (val2 ^ result)) >= 0)) {
-      Node* con_result = ConLNode::make(phase->C, result);
-      return no_overflow(phase, con_result);
+  static bool will_overflow(NativeType value1, NativeType value2) {
+    NativeType result = value1 - value2;
+    // hacker's delight 2-12 overflow iff the arguments have different signs and
+    // the sign of the result is different than the sign of arg1
+    if (((value1 ^ value2) & (value1 ^ result)) >= 0) {
+      return false;
     }
-    return NULL;
+    return true;
   }
 
-  if (type1 == TypeLong::ZERO || type2 == TypeLong::ZERO) { // (Add 0 x) == x
-    Node* add_result = new (phase->C) AddLNode(arg1, arg2);
-    return no_overflow(phase, add_result);
+  static bool can_overflow(const Type* type1, const Type* type2) {
+    if (type2 == TypeClass::ZERO) {
+      return false;
+    }
+    return true;
   }
-
-  if (type2->singleton()) {
-    return NULL; // no change - keep constant on the right
-  }
-
-  if (type1->singleton()) {
-    // Make it x + Constant - move constant to the right
-    swap_edges(1, 2);
-    return this;
-  }
+};
 
-  if (arg2->is_Load()) {
-    return NULL; // no change - keep load on the right
-  }
-
-  if (arg1->is_Load()) {
-    // Make it x + Load - move load to the right
-    swap_edges(1, 2);
-    return this;
-  }
-
-  if (arg1->_idx > arg2->_idx) {
-    // Sort the edges
-    swap_edges(1, 2);
-    return this;
-  }
+template <typename OverflowOp>
+class MulHelper {
+public:
+  typedef typename OverflowOp::TypeClass TypeClass;
 
-  return NULL;
-}
-
-Node* SubExactINode::Ideal(PhaseGVN* phase, bool can_reshape) {
-  Node* arg1 = in(1);
-  Node* arg2 = in(2);
-
-  const Type* type1 = phase->type(arg1);
-  const Type* type2 = phase->type(arg2);
-
-  if (type1 != Type::TOP && type1->singleton() &&
-      type2 != Type::TOP && type2->singleton()) {
-    jint val1 = arg1->get_int();
-    jint val2 = arg2->get_int();
-    jint result = val1 - val2;
+  static bool can_overflow(const Type* type1, const Type* type2) {
+    if (type1 == TypeClass::ZERO || type2 == TypeClass::ZERO) {
+      return false;
+    } else if (type1 == TypeClass::ONE || type2 == TypeClass::ONE) {
+      return false;
+    }
+    return true;
+  }
+};
 
-    // Hacker's Delight 2-12 Overflow iff the arguments have different signs and
-    // the sign of the result is different than the sign of arg1
-    if (((val1 ^ val2) & (val1 ^ result)) >= 0) {
-      Node* con_result = ConINode::make(phase->C, result);
-      return no_overflow(phase, con_result);
-    }
-    return NULL;
-  }
-
-  if (type1 == TypeInt::ZERO || type2 == TypeInt::ZERO) {
-    // Sub with zero is the same as add with zero
-    Node* add_result = new (phase->C) AddINode(arg1, arg2);
-    return no_overflow(phase, add_result);
-  }
-
-  return NULL;
+bool OverflowAddINode::will_overflow(jint v1, jint v2) const {
+  return AddHelper<OverflowAddINode>::will_overflow(v1, v2);
 }
 
-Node* SubExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
-  Node* arg1 = in(1);
-  Node* arg2 = in(2);
-
-  const Type* type1 = phase->type(arg1);
-  const Type* type2 = phase->type(arg2);
-
-  if (type1 != Type::TOP && type1->singleton() &&
-      type2 != Type::TOP && type2->singleton()) {
-    jlong val1 = arg1->get_long();
-    jlong val2 = arg2->get_long();
-    jlong result = val1 - val2;
-
-    // Hacker's Delight 2-12 Overflow iff the arguments have different signs and
-    // the sign of the result is different than the sign of arg1
-    if (((val1 ^ val2) & (val1 ^ result)) >= 0) {
-      Node* con_result = ConLNode::make(phase->C, result);
-      return no_overflow(phase, con_result);
-    }
-    return NULL;
-  }
-
-  if (type1 == TypeLong::ZERO || type2 == TypeLong::ZERO) {
-    // Sub with zero is the same as add with zero
-    Node* add_result = new (phase->C) AddLNode(arg1, arg2);
-    return no_overflow(phase, add_result);
-  }
-
-  return NULL;
-}
-
-Node* NegExactINode::Ideal(PhaseGVN* phase, bool can_reshape) {
-  Node *arg = in(1);
-
-  const Type* type = phase->type(arg);
-  if (type != Type::TOP && type->singleton()) {
-    jint value = arg->get_int();
-    if (value != min_jint) {
-      Node* neg_result = ConINode::make(phase->C, -value);
-      return no_overflow(phase, neg_result);
-    }
-  }
-  return NULL;
+bool OverflowSubINode::will_overflow(jint v1, jint v2) const {
+  return SubHelper<OverflowSubINode>::will_overflow(v1, v2);
 }
 
-Node* NegExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
-  Node *arg = in(1);
-
-  const Type* type = phase->type(arg);
-  if (type != Type::TOP && type->singleton()) {
-    jlong value = arg->get_long();
-    if (value != min_jlong) {
-      Node* neg_result = ConLNode::make(phase->C, -value);
-      return no_overflow(phase, neg_result);
+bool OverflowMulINode::will_overflow(jint v1, jint v2) const {
+    jlong result = (jlong) v1 * (jlong) v2;
+    if ((jint) result == result) {
+      return false;
     }
-  }
-  return NULL;
+    return true;
 }
 
-Node* MulExactINode::Ideal(PhaseGVN* phase, bool can_reshape) {
-  Node* arg1 = in(1);
-  Node* arg2 = in(2);
-
-  const Type* type1 = phase->type(arg1);
-  const Type* type2 = phase->type(arg2);
-
-  if (type1 != Type::TOP && type1->singleton() &&
-      type2 != Type::TOP && type2->singleton()) {
-    jint val1 = arg1->get_int();
-    jint val2 = arg2->get_int();
-    jlong result = (jlong) val1 * (jlong) val2;
-    if ((jint) result == result) {
-      // no overflow
-      Node* mul_result = ConINode::make(phase->C, result);
-      return no_overflow(phase, mul_result);
-    }
-  }
-
-  if (type1 == TypeInt::ZERO || type2 == TypeInt::ZERO) {
-    return no_overflow(phase, ConINode::make(phase->C, 0));
-  }
-
-  if (type1 == TypeInt::ONE) {
-    Node* mul_result = new (phase->C) AddINode(arg2, phase->intcon(0));
-    return no_overflow(phase, mul_result);
-  }
-  if (type2 == TypeInt::ONE) {
-    Node* mul_result = new (phase->C) AddINode(arg1, phase->intcon(0));
-    return no_overflow(phase, mul_result);
-  }
-
-  if (type1 == TypeInt::MINUS_1) {
-    return new (phase->C) NegExactINode(NULL, arg2);
-  }
-
-  if (type2 == TypeInt::MINUS_1) {
-    return new (phase->C) NegExactINode(NULL, arg1);
-  }
-
-  return NULL;
+bool OverflowAddLNode::will_overflow(jlong v1, jlong v2) const {
+  return AddHelper<OverflowAddLNode>::will_overflow(v1, v2);
 }
 
-Node* MulExactLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
-  Node* arg1 = in(1);
-  Node* arg2 = in(2);
+bool OverflowSubLNode::will_overflow(jlong v1, jlong v2) const {
+  return SubHelper<OverflowSubLNode>::will_overflow(v1, v2);
+}
 
-  const Type* type1 = phase->type(arg1);
-  const Type* type2 = phase->type(arg2);
-
-  if (type1 != Type::TOP && type1->singleton() &&
-      type2 != Type::TOP && type2->singleton()) {
-    jlong val1 = arg1->get_long();
-    jlong val2 = arg2->get_long();
-
+bool OverflowMulLNode::will_overflow(jlong val1, jlong val2) const {
     jlong result = val1 * val2;
     jlong ax = (val1 < 0 ? -val1 : val1);
     jlong ay = (val2 < 0 ? -val2 : val2);
@@ -398,33 +133,125 @@
       }
     }
 
-    if (!overflow) {
-      Node* mul_result = ConLNode::make(phase->C, result);
-      return no_overflow(phase, mul_result);
+    return overflow;
+}
+
+bool OverflowAddINode::can_overflow(const Type* t1, const Type* t2) const {
+  return AddHelper<OverflowAddINode>::can_overflow(t1, t2);
+}
+
+bool OverflowSubINode::can_overflow(const Type* t1, const Type* t2) const {
+  if (in(1) == in(2)) {
+    return false;
+  }
+  return SubHelper<OverflowSubINode>::can_overflow(t1, t2);
+}
+
+bool OverflowMulINode::can_overflow(const Type* t1, const Type* t2) const {
+  return MulHelper<OverflowMulINode>::can_overflow(t1, t2);
+}
+
+bool OverflowAddLNode::can_overflow(const Type* t1, const Type* t2) const {
+  return AddHelper<OverflowAddLNode>::can_overflow(t1, t2);
+}
+
+bool OverflowSubLNode::can_overflow(const Type* t1, const Type* t2) const {
+  if (in(1) == in(2)) {
+    return false;
+  }
+  return SubHelper<OverflowSubLNode>::can_overflow(t1, t2);
+}
+
+bool OverflowMulLNode::can_overflow(const Type* t1, const Type* t2) const {
+  return MulHelper<OverflowMulLNode>::can_overflow(t1, t2);
+}
+
+const Type* OverflowNode::sub(const Type* t1, const Type* t2) const {
+  fatal(err_msg_res("sub() should not be called for '%s'", NodeClassNames[this->Opcode()]));
+  return TypeInt::CC;
+}
+
+template <typename OverflowOp>
+struct IdealHelper {
+  typedef typename OverflowOp::TypeClass TypeClass; // TypeInt, TypeLong
+  typedef typename TypeClass::NativeType NativeType;
+
+  static Node* Ideal(const OverflowOp* node, PhaseGVN* phase, bool can_reshape) {
+    Node* arg1 = node->in(1);
+    Node* arg2 = node->in(2);
+    const Type* type1 = phase->type(arg1);
+    const Type* type2 = phase->type(arg2);
+
+    if (type1 == NULL || type2 == NULL) {
+      return NULL;
     }
-  }
 
-  if (type1 == TypeLong::ZERO || type2 == TypeLong::ZERO) {
-    return no_overflow(phase, ConLNode::make(phase->C, 0));
+    if (type1 != Type::TOP && type1->singleton() &&
+        type2 != Type::TOP && type2->singleton()) {
+      NativeType val1 = TypeClass::as_self(type1)->get_con();
+      NativeType val2 = TypeClass::as_self(type2)->get_con();
+      if (node->will_overflow(val1, val2) == false) {
+        Node* con_result = ConINode::make(phase->C, 0);
+        return con_result;
+      }
+      return NULL;
+    }
+    return NULL;
   }
 
-  if (type1 == TypeLong::ONE) {
-    Node* mul_result = new (phase->C) AddLNode(arg2, phase->longcon(0));
-    return no_overflow(phase, mul_result);
-  }
-  if (type2 == TypeLong::ONE) {
-    Node* mul_result = new (phase->C) AddLNode(arg1, phase->longcon(0));
-    return no_overflow(phase, mul_result);
-  }
+  static const Type* Value(const OverflowOp* node, PhaseTransform* phase) {
+    const Type *t1 = phase->type( node->in(1) );
+    const Type *t2 = phase->type( node->in(2) );
+    if( t1 == Type::TOP ) return Type::TOP;
+    if( t2 == Type::TOP ) return Type::TOP;
+
+    const TypeClass* i1 = TypeClass::as_self(t1);
+    const TypeClass* i2 = TypeClass::as_self(t2);
+
+    if (i1 == NULL || i2 == NULL) {
+      return TypeInt::CC;
+    }
 
-  if (type1 == TypeLong::MINUS_1) {
-    return new (phase->C) NegExactLNode(NULL, arg2);
-  }
+    if (t1->singleton() && t2->singleton()) {
+      NativeType val1 = i1->get_con();
+      NativeType val2 = i2->get_con();
+      if (node->will_overflow(val1, val2)) {
+        return TypeInt::CC;
+      }
+      return TypeInt::ZERO;
+    } else if (i1 != TypeClass::TYPE_DOMAIN && i2 != TypeClass::TYPE_DOMAIN) {
+      if (node->will_overflow(i1->_lo, i2->_lo)) {
+        return TypeInt::CC;
+      } else if (node->will_overflow(i1->_lo, i2->_hi)) {
+        return TypeInt::CC;
+      } else if (node->will_overflow(i1->_hi, i2->_lo)) {
+        return TypeInt::CC;
+      } else if (node->will_overflow(i1->_hi, i2->_hi)) {
+        return TypeInt::CC;
+      }
+      return TypeInt::ZERO;
+    }
 
-  if (type2 == TypeLong::MINUS_1) {
-    return new (phase->C) NegExactLNode(NULL, arg1);
+    if (!node->can_overflow(t1, t2)) {
+      return TypeInt::ZERO;
+    }
+    return TypeInt::CC;
   }
+};
 
-  return NULL;
+Node* OverflowINode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  return IdealHelper<OverflowINode>::Ideal(this, phase, can_reshape);
 }
 
+Node* OverflowLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+  return IdealHelper<OverflowLNode>::Ideal(this, phase, can_reshape);
+}
+
+const Type* OverflowINode::Value(PhaseTransform* phase) const {
+  return IdealHelper<OverflowINode>::Value(this, phase);
+}
+
+const Type* OverflowLNode::Value(PhaseTransform* phase) const {
+  return IdealHelper<OverflowLNode>::Value(this, phase);
+}
+
--- a/src/share/vm/opto/mathexactnode.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/mathexactnode.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -27,128 +27,111 @@
 
 #include "opto/multnode.hpp"
 #include "opto/node.hpp"
+#include "opto/addnode.hpp"
 #include "opto/subnode.hpp"
 #include "opto/type.hpp"
 
-class BoolNode;
-class IfNode;
-class Node;
-
 class PhaseGVN;
 class PhaseTransform;
 
-class MathExactNode : public MultiNode {
+class OverflowNode : public CmpNode {
 public:
-  MathExactNode(Node* ctrl, Node* in1);
-  MathExactNode(Node* ctrl, Node* in1, Node* in2);
-  enum {
-    result_proj_node = 0,
-    flags_proj_node = 1
-  };
-  virtual int Opcode() const;
-  virtual Node* Identity(PhaseTransform* phase) { return this; }
-  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape) { return NULL; }
-  virtual const Type* Value(PhaseTransform* phase) const { return bottom_type(); }
-  virtual uint hash() const { return NO_HASH; }
-  virtual bool is_CFG() const { return false; }
-  virtual uint ideal_reg() const { return NotAMachineReg; }
+  OverflowNode(Node* in1, Node* in2) : CmpNode(in1, in2) {}
 
-  ProjNode* result_node() const { return proj_out(result_proj_node); }
-  ProjNode* flags_node() const { return proj_out(flags_proj_node); }
-  Node* control_node() const;
-  Node* non_throwing_branch() const;
-protected:
-  IfNode* if_node() const;
-  BoolNode* bool_node() const;
-  Node* no_overflow(PhaseGVN *phase, Node* new_result);
-};
-
-class MathExactINode : public MathExactNode {
- public:
-  MathExactINode(Node* ctrl, Node* in1) : MathExactNode(ctrl, in1) {}
-  MathExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactNode(ctrl, in1, in2) {}
-  virtual int Opcode() const;
-  virtual Node* match(const ProjNode* proj, const Matcher* m);
-  virtual const Type* bottom_type() const { return TypeTuple::INT_CC_PAIR; }
-};
-
-class MathExactLNode : public MathExactNode {
-public:
-  MathExactLNode(Node* ctrl, Node* in1) : MathExactNode(ctrl, in1) {}
-  MathExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactNode(ctrl, in1, in2) {}
-  virtual int Opcode() const;
-  virtual Node* match(const ProjNode* proj, const Matcher* m);
-  virtual const Type* bottom_type() const { return TypeTuple::LONG_CC_PAIR; }
-};
-
-class AddExactINode : public MathExactINode {
-public:
-  AddExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactINode(ctrl, in1, in2) {}
-  virtual int Opcode() const;
-  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
+  virtual uint ideal_reg() const { return Op_RegFlags; }
+  virtual const Type* sub(const Type* t1, const Type* t2) const;
 };
 
-class AddExactLNode : public MathExactLNode {
-public:
-  AddExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactLNode(ctrl, in1, in2) {}
-  virtual int Opcode() const;
-  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
-};
-
-class SubExactINode : public MathExactINode {
-public:
-  SubExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactINode(ctrl, in1, in2) {}
-  virtual int Opcode() const;
-  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
-};
-
-class SubExactLNode : public MathExactLNode {
-public:
-  SubExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactLNode(ctrl, in1, in2) {}
-  virtual int Opcode() const;
-  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
-};
-
-class NegExactINode : public MathExactINode {
-public:
-  NegExactINode(Node* ctrl, Node* in1) : MathExactINode(ctrl, in1) {}
-  virtual int Opcode() const;
-  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
-};
-
-class NegExactLNode : public MathExactLNode {
+class OverflowINode : public OverflowNode {
 public:
-  NegExactLNode(Node* ctrl, Node* in1) : MathExactLNode(ctrl, in1) {}
-  virtual int Opcode() const;
-  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
-};
-
-class MulExactINode : public MathExactINode {
-public:
-  MulExactINode(Node* ctrl, Node* in1, Node* in2) : MathExactINode(ctrl, in1, in2) {}
-  virtual int Opcode() const;
-  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
-};
+  typedef TypeInt TypeClass;
 
-class MulExactLNode : public MathExactLNode {
-public:
-  MulExactLNode(Node* ctrl, Node* in1, Node* in2) : MathExactLNode(ctrl, in1, in2) {}
-  virtual int Opcode() const;
+  OverflowINode(Node* in1, Node* in2) : OverflowNode(in1, in2) {}
   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
-};
+  virtual const Type* Value(PhaseTransform* phase) const;
 
-class FlagsProjNode : public ProjNode {
-public:
-  FlagsProjNode(Node* src, uint con) : ProjNode(src, con) {
-    init_class_id(Class_FlagsProj);
-  }
-
-  virtual int Opcode() const;
-  virtual bool is_CFG() const { return false; }
-  virtual const Type* bottom_type() const { return TypeInt::CC; }
-  virtual uint ideal_reg() const { return Op_RegFlags; }
+  virtual bool will_overflow(jint v1, jint v2) const = 0;
+  virtual bool can_overflow(const Type* t1, const Type* t2) const = 0;
 };
 
 
+class OverflowLNode : public OverflowNode {
+public:
+  typedef TypeLong TypeClass;
+
+  OverflowLNode(Node* in1, Node* in2) : OverflowNode(in1, in2) {}
+  virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
+  virtual const Type* Value(PhaseTransform* phase) const;
+
+  virtual bool will_overflow(jlong v1, jlong v2) const = 0;
+  virtual bool can_overflow(const Type* t1, const Type* t2) const = 0;
+};
+
+class OverflowAddINode : public OverflowINode {
+public:
+  typedef AddINode MathOp;
+
+  OverflowAddINode(Node* in1, Node* in2) : OverflowINode(in1, in2) {}
+  virtual int Opcode() const;
+
+  virtual bool will_overflow(jint v1, jint v2) const;
+  virtual bool can_overflow(const Type* t1, const Type* t2) const;
+};
+
+class OverflowSubINode : public OverflowINode {
+public:
+  typedef SubINode MathOp;
+
+  OverflowSubINode(Node* in1, Node* in2) : OverflowINode(in1, in2) {}
+  virtual int Opcode() const;
+
+  virtual bool will_overflow(jint v1, jint v2) const;
+  virtual bool can_overflow(const Type* t1, const Type* t2) const;
+};
+
+class OverflowMulINode : public OverflowINode {
+public:
+  typedef MulINode MathOp;
+
+  OverflowMulINode(Node* in1, Node* in2) : OverflowINode(in1, in2) {}
+  virtual int Opcode() const;
+
+  virtual bool will_overflow(jint v1, jint v2) const;
+  virtual bool can_overflow(const Type* t1, const Type* t2) const;
+};
+
+class OverflowAddLNode : public OverflowLNode {
+public:
+  typedef AddLNode MathOp;
+
+  OverflowAddLNode(Node* in1, Node* in2) : OverflowLNode(in1, in2) {}
+  virtual int Opcode() const;
+
+  virtual bool will_overflow(jlong v1, jlong v2) const;
+  virtual bool can_overflow(const Type* t1, const Type* t2) const;
+};
+
+class OverflowSubLNode : public OverflowLNode {
+public:
+  typedef SubLNode MathOp;
+
+  OverflowSubLNode(Node* in1, Node* in2) : OverflowLNode(in1, in2) {}
+  virtual int Opcode() const;
+
+  virtual bool will_overflow(jlong v1, jlong v2) const;
+  virtual bool can_overflow(const Type* t1, const Type* t2) const;
+};
+
+class OverflowMulLNode : public OverflowLNode {
+public:
+  typedef MulLNode MathOp;
+
+  OverflowMulLNode(Node* in1, Node* in2) : OverflowLNode(in1, in2) {}
+  virtual int Opcode() const;
+
+  virtual bool will_overflow(jlong v1, jlong v2) const;
+  virtual bool can_overflow(const Type* t1, const Type* t2) const;
+};
+
 #endif
 
--- a/src/share/vm/opto/memnode.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/memnode.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -657,7 +657,7 @@
       // disregarding "null"-ness.
       // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
       const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
-      assert(cross_check->meet(tp_notnull) == cross_check,
+      assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
              "real address must not escape from expected memory type");
     }
     #endif
@@ -907,7 +907,7 @@
 
 //----------------------------LoadNode::make-----------------------------------
 // Polymorphic factory method:
-Node *LoadNode::make( PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt ) {
+Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) {
   Compile* C = gvn.C;
 
   // sanity check the alias category against the created node type
@@ -923,34 +923,34 @@
           rt->isa_oopptr() || is_immutable_value(adr),
           "raw memory operations should have control edge");
   switch (bt) {
-  case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int()    );
-  case T_BYTE:    return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int()    );
-  case T_INT:     return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int()    );
-  case T_CHAR:    return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int()    );
-  case T_SHORT:   return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int()    );
-  case T_LONG:    return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long()   );
-  case T_FLOAT:   return new (C) LoadFNode (ctl, mem, adr, adr_type, rt              );
-  case T_DOUBLE:  return new (C) LoadDNode (ctl, mem, adr, adr_type, rt              );
-  case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr()    );
+  case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(),  mo);
+  case T_BYTE:    return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(),  mo);
+  case T_INT:     return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(),  mo);
+  case T_CHAR:    return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(),  mo);
+  case T_SHORT:   return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(),  mo);
+  case T_LONG:    return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo);
+  case T_FLOAT:   return new (C) LoadFNode (ctl, mem, adr, adr_type, rt,            mo);
+  case T_DOUBLE:  return new (C) LoadDNode (ctl, mem, adr, adr_type, rt,            mo);
+  case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(),  mo);
   case T_OBJECT:
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
-      Node* load  = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop()));
+      Node* load  = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo));
       return new (C) DecodeNNode(load, load->bottom_type()->make_ptr());
     } else
 #endif
     {
       assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop");
-      return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr());
+      return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo);
     }
   }
   ShouldNotReachHere();
   return (LoadNode*)NULL;
 }
 
-LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt) {
+LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) {
   bool require_atomic = true;
-  return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), require_atomic);
+  return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic);
 }
 
 
@@ -1002,9 +1002,13 @@
     // a synchronized region.
     while (current->is_Proj()) {
       int opc = current->in(0)->Opcode();
-      if ((final && (opc == Op_MemBarAcquire || opc == Op_MemBarAcquireLock)) ||
-          opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder ||
-          opc == Op_MemBarReleaseLock) {
+      if ((final && (opc == Op_MemBarAcquire ||
+                     opc == Op_MemBarAcquireLock ||
+                     opc == Op_LoadFence)) ||
+          opc == Op_MemBarRelease ||
+          opc == Op_StoreFence ||
+          opc == Op_MemBarReleaseLock ||
+          opc == Op_MemBarCPUOrder) {
         Node* mem = current->in(0)->in(TypeFunc::Memory);
         if (mem->is_MergeMem()) {
           MergeMemNode* merge = mem->as_MergeMem();
@@ -1681,7 +1685,7 @@
       // t might actually be lower than _type, if _type is a unique
       // concrete subclass of abstract class t.
       if (off_beyond_header) {  // is the offset beyond the header?
-        const Type* jt = t->join(_type);
+        const Type* jt = t->join_speculative(_type);
         // In any case, do not allow the join, per se, to empty out the type.
         if (jt->empty() && !t->empty()) {
           // This can happen if a interface-typed array narrows to a class type.
@@ -2032,12 +2036,12 @@
 #ifdef _LP64
   if (adr_type->is_ptr_to_narrowklass()) {
     assert(UseCompressedClassPointers, "no compressed klasses");
-    Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
+    Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered));
     return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
   }
 #endif
   assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop");
-  return new (C) LoadKlassNode(ctl, mem, adr, at, tk);
+  return new (C) LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered);
 }
 
 //------------------------------Value------------------------------------------
@@ -2352,45 +2356,46 @@
 //=============================================================================
 //---------------------------StoreNode::make-----------------------------------
 // Polymorphic factory method:
-StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) {
+StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) {
+  assert((mo == unordered || mo == release), "unexpected");
   Compile* C = gvn.C;
-  assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
-          ctl != NULL, "raw memory operations should have control edge");
+  assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw ||
+         ctl != NULL, "raw memory operations should have control edge");
 
   switch (bt) {
   case T_BOOLEAN:
-  case T_BYTE:    return new (C) StoreBNode(ctl, mem, adr, adr_type, val);
-  case T_INT:     return new (C) StoreINode(ctl, mem, adr, adr_type, val);
+  case T_BYTE:    return new (C) StoreBNode(ctl, mem, adr, adr_type, val, mo);
+  case T_INT:     return new (C) StoreINode(ctl, mem, adr, adr_type, val, mo);
   case T_CHAR:
-  case T_SHORT:   return new (C) StoreCNode(ctl, mem, adr, adr_type, val);
-  case T_LONG:    return new (C) StoreLNode(ctl, mem, adr, adr_type, val);
-  case T_FLOAT:   return new (C) StoreFNode(ctl, mem, adr, adr_type, val);
-  case T_DOUBLE:  return new (C) StoreDNode(ctl, mem, adr, adr_type, val);
+  case T_SHORT:   return new (C) StoreCNode(ctl, mem, adr, adr_type, val, mo);
+  case T_LONG:    return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo);
+  case T_FLOAT:   return new (C) StoreFNode(ctl, mem, adr, adr_type, val, mo);
+  case T_DOUBLE:  return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo);
   case T_METADATA:
   case T_ADDRESS:
   case T_OBJECT:
 #ifdef _LP64
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
-      return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
+      return new (C) StoreNNode(ctl, mem, adr, adr_type, val, mo);
     } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
                (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
                 adr->bottom_type()->isa_rawptr())) {
       val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
-      return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
+      return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val, mo);
     }
 #endif
     {
-      return new (C) StorePNode(ctl, mem, adr, adr_type, val);
+      return new (C) StorePNode(ctl, mem, adr, adr_type, val, mo);
     }
   }
   ShouldNotReachHere();
   return (StoreNode*)NULL;
 }
 
-StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val) {
+StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) {
   bool require_atomic = true;
-  return new (C) StoreLNode(ctl, mem, adr, adr_type, val, require_atomic);
+  return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic);
 }
 
 
@@ -2783,12 +2788,12 @@
 
   Node *zero = phase->makecon(TypeLong::ZERO);
   Node *off  = phase->MakeConX(BytesPerLong);
-  mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero);
+  mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
   count--;
   while( count-- ) {
     mem = phase->transform(mem);
     adr = phase->transform(new (phase->C) AddPNode(base,adr,off));
-    mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero);
+    mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false);
   }
   return mem;
 }
@@ -2832,7 +2837,7 @@
     Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset));
     adr = phase->transform(adr);
     const TypePtr* atp = TypeRawPtr::BOTTOM;
-    mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
+    mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
     mem = phase->transform(mem);
     offset += BytesPerInt;
   }
@@ -2893,7 +2898,7 @@
     Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset));
     adr = phase->transform(adr);
     const TypePtr* atp = TypeRawPtr::BOTTOM;
-    mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT);
+    mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered);
     mem = phase->transform(mem);
     done_offset += BytesPerInt;
   }
@@ -2977,15 +2982,17 @@
 //------------------------------make-------------------------------------------
 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
   switch (opcode) {
-  case Op_MemBarAcquire:   return new(C) MemBarAcquireNode(C,  atp, pn);
-  case Op_MemBarRelease:   return new(C) MemBarReleaseNode(C,  atp, pn);
-  case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C,  atp, pn);
-  case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C,  atp, pn);
-  case Op_MemBarVolatile:  return new(C) MemBarVolatileNode(C, atp, pn);
-  case Op_MemBarCPUOrder:  return new(C) MemBarCPUOrderNode(C, atp, pn);
-  case Op_Initialize:      return new(C) InitializeNode(C,     atp, pn);
-  case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C,  atp, pn);
-  default:                 ShouldNotReachHere(); return NULL;
+  case Op_MemBarAcquire:     return new(C) MemBarAcquireNode(C, atp, pn);
+  case Op_LoadFence:         return new(C) LoadFenceNode(C, atp, pn);
+  case Op_MemBarRelease:     return new(C) MemBarReleaseNode(C, atp, pn);
+  case Op_StoreFence:        return new(C) StoreFenceNode(C, atp, pn);
+  case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn);
+  case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn);
+  case Op_MemBarVolatile:    return new(C) MemBarVolatileNode(C, atp, pn);
+  case Op_MemBarCPUOrder:    return new(C) MemBarCPUOrderNode(C, atp, pn);
+  case Op_Initialize:        return new(C) InitializeNode(C, atp, pn);
+  case Op_MemBarStoreStore:  return new(C) MemBarStoreStoreNode(C, atp, pn);
+  default: ShouldNotReachHere(); return NULL;
   }
 }
 
@@ -3767,14 +3774,14 @@
       ++new_long;
       off[nst] = offset;
       st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
-                                  phase->longcon(con), T_LONG);
+                                  phase->longcon(con), T_LONG, MemNode::unordered);
     } else {
       // Omit either if it is a zero.
       if (con0 != 0) {
         ++new_int;
         off[nst]  = offset;
         st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
-                                    phase->intcon(con0), T_INT);
+                                    phase->intcon(con0), T_INT, MemNode::unordered);
       }
       if (con1 != 0) {
         ++new_int;
@@ -3782,7 +3789,7 @@
         adr = make_raw_address(offset, phase);
         off[nst]  = offset;
         st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp,
-                                    phase->intcon(con1), T_INT);
+                                    phase->intcon(con1), T_INT, MemNode::unordered);
       }
     }
 
--- a/src/share/vm/opto/memnode.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/memnode.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,10 @@
          ValueIn,               // Value to store
          OopStore               // Preceeding oop store, only in StoreCM
   };
+  typedef enum { unordered = 0,
+                 acquire,       // Load has to acquire or be succeeded by MemBarAcquire.
+                 release        // Store has to release or be preceded by MemBarRelease.
+  } MemOrd;
 protected:
   MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
     : Node(c0,c1,c2   ) {
@@ -134,20 +138,32 @@
 //------------------------------LoadNode---------------------------------------
 // Load value; requires Memory and Address
 class LoadNode : public MemNode {
+private:
+  // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
+  // loads that can be reordered, and such requiring acquire semantics to
+  // adhere to the Java specification.  The required behaviour is stored in
+  // this field.
+  const MemOrd _mo;
+
 protected:
-  virtual uint cmp( const Node &n ) const;
+  virtual uint cmp(const Node &n) const;
   virtual uint size_of() const; // Size is bigger
   const Type* const _type;      // What kind of value is loaded?
 public:
 
-  LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
-    : MemNode(c,mem,adr,at), _type(rt) {
+  LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo)
+    : MemNode(c,mem,adr,at), _type(rt), _mo(mo) {
     init_class_id(Class_Load);
   }
+  inline bool is_unordered() const { return !is_acquire(); }
+  inline bool is_acquire() const {
+    assert(_mo == unordered || _mo == acquire, "unexpected");
+    return _mo == acquire;
+  }
 
   // Polymorphic factory method:
-  static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
-                     const TypePtr* at, const Type *rt, BasicType bt );
+   static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
+                     const TypePtr* at, const Type *rt, BasicType bt, MemOrd mo);
 
   virtual uint hash()   const;  // Check the type
 
@@ -221,8 +237,8 @@
 // Load a byte (8bits signed) from memory
 class LoadBNode : public LoadNode {
 public:
-  LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
-    : LoadNode(c,mem,adr,at,ti) {}
+  LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
+    : LoadNode(c, mem, adr, at, ti, mo) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@@ -235,8 +251,8 @@
 // Load a unsigned byte (8bits unsigned) from memory
 class LoadUBNode : public LoadNode {
 public:
-  LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
-    : LoadNode(c, mem, adr, at, ti) {}
+  LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo)
+    : LoadNode(c, mem, adr, at, ti, mo) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
@@ -249,8 +265,8 @@
 // Load an unsigned short/char (16bits unsigned) from memory
 class LoadUSNode : public LoadNode {
 public:
-  LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
-    : LoadNode(c,mem,adr,at,ti) {}
+  LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
+    : LoadNode(c, mem, adr, at, ti, mo) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@@ -263,8 +279,8 @@
 // Load a short (16bits signed) from memory
 class LoadSNode : public LoadNode {
 public:
-  LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
-    : LoadNode(c,mem,adr,at,ti) {}
+  LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
+    : LoadNode(c, mem, adr, at, ti, mo) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
@@ -277,8 +293,8 @@
 // Load an integer from memory
 class LoadINode : public LoadNode {
 public:
-  LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
-    : LoadNode(c,mem,adr,at,ti) {}
+  LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo)
+    : LoadNode(c, mem, adr, at, ti, mo) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegI; }
   virtual int store_Opcode() const { return Op_StoreI; }
@@ -289,8 +305,8 @@
 // Load an array length from the array
 class LoadRangeNode : public LoadINode {
 public:
-  LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
-    : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
+  LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
+    : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
   virtual int Opcode() const;
   virtual const Type *Value( PhaseTransform *phase ) const;
   virtual Node *Identity( PhaseTransform *phase );
@@ -309,18 +325,16 @@
   const bool _require_atomic_access;  // is piecewise load forbidden?
 
 public:
-  LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
-             const TypeLong *tl = TypeLong::LONG,
-             bool require_atomic_access = false )
-    : LoadNode(c,mem,adr,at,tl)
-    , _require_atomic_access(require_atomic_access)
-  {}
+  LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
+            MemOrd mo, bool require_atomic_access = false)
+    : LoadNode(c, mem, adr, at, tl, mo), _require_atomic_access(require_atomic_access) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegL; }
   virtual int store_Opcode() const { return Op_StoreL; }
   virtual BasicType memory_type() const { return T_LONG; }
   bool require_atomic_access() { return _require_atomic_access; }
-  static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
+  static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
+                                const Type* rt, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
     LoadNode::dump_spec(st);
@@ -333,8 +347,8 @@
 // Load a long from unaligned memory
 class LoadL_unalignedNode : public LoadLNode {
 public:
-  LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
-    : LoadLNode(c,mem,adr,at) {}
+  LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo)
+    : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo) {}
   virtual int Opcode() const;
 };
 
@@ -342,8 +356,8 @@
 // Load a float (64 bits) from memory
 class LoadFNode : public LoadNode {
 public:
-  LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
-    : LoadNode(c,mem,adr,at,t) {}
+  LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo)
+    : LoadNode(c, mem, adr, at, t, mo) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegF; }
   virtual int store_Opcode() const { return Op_StoreF; }
@@ -354,8 +368,8 @@
 // Load a double (64 bits) from memory
 class LoadDNode : public LoadNode {
 public:
-  LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
-    : LoadNode(c,mem,adr,at,t) {}
+  LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo)
+    : LoadNode(c, mem, adr, at, t, mo) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegD; }
   virtual int store_Opcode() const { return Op_StoreD; }
@@ -366,8 +380,8 @@
 // Load a double from unaligned memory
 class LoadD_unalignedNode : public LoadDNode {
 public:
-  LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
-    : LoadDNode(c,mem,adr,at) {}
+  LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo)
+    : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo) {}
   virtual int Opcode() const;
 };
 
@@ -375,8 +389,8 @@
 // Load a pointer from memory (either object or array)
 class LoadPNode : public LoadNode {
 public:
-  LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
-    : LoadNode(c,mem,adr,at,t) {}
+  LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo)
+    : LoadNode(c, mem, adr, at, t, mo) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegP; }
   virtual int store_Opcode() const { return Op_StoreP; }
@@ -388,8 +402,8 @@
 // Load a narrow oop from memory (either object or array)
 class LoadNNode : public LoadNode {
 public:
-  LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
-    : LoadNode(c,mem,adr,at,t) {}
+  LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo)
+    : LoadNode(c, mem, adr, at, t, mo) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegN; }
   virtual int store_Opcode() const { return Op_StoreN; }
@@ -400,8 +414,8 @@
 // Load a Klass from an object
 class LoadKlassNode : public LoadPNode {
 public:
-  LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
-    : LoadPNode(c,mem,adr,at,tk) {}
+  LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
+    : LoadPNode(c, mem, adr, at, tk, mo) {}
   virtual int Opcode() const;
   virtual const Type *Value( PhaseTransform *phase ) const;
   virtual Node *Identity( PhaseTransform *phase );
@@ -416,8 +430,8 @@
 // Load a narrow Klass from an object.
 class LoadNKlassNode : public LoadNNode {
 public:
-  LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk )
-    : LoadNNode(c,mem,adr,at,tk) {}
+  LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
+    : LoadNNode(c, mem, adr, at, tk, mo) {}
   virtual int Opcode() const;
   virtual uint ideal_reg() const { return Op_RegN; }
   virtual int store_Opcode() const { return Op_StoreNKlass; }
@@ -432,6 +446,14 @@
 //------------------------------StoreNode--------------------------------------
 // Store value; requires Store, Address and Value
 class StoreNode : public MemNode {
+private:
+  // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
+  // stores that can be reordered, and such requiring release semantics to
+  // adhere to the Java specification.  The required behaviour is stored in
+  // this field.
+  const MemOrd _mo;
+  // Needed for proper cloning.
+  virtual uint size_of() const { return sizeof(*this); }
 protected:
   virtual uint cmp( const Node &n ) const;
   virtual bool depends_only_on_test() const { return false; }
@@ -440,18 +462,44 @@
   Node *Ideal_sign_extended_input(PhaseGVN *phase, int  num_bits);
 
 public:
-  StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
-    : MemNode(c,mem,adr,at,val) {
+  // We must ensure that stores of object references will be visible
+  // only after the object's initialization. So the callers of this
+  // procedure must indicate that the store requires `release'
+  // semantics, if the stored value is an object reference that might
+  // point to a new object and may become externally visible.
+  StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
+    : MemNode(c, mem, adr, at, val), _mo(mo) {
     init_class_id(Class_Store);
   }
-  StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
-    : MemNode(c,mem,adr,at,val,oop_store) {
+  StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
+    : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
     init_class_id(Class_Store);
   }
 
-  // Polymorphic factory method:
-  static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
-                          const TypePtr* at, Node *val, BasicType bt );
+  inline bool is_unordered() const { return !is_release(); }
+  inline bool is_release() const {
+    assert((_mo == unordered || _mo == release), "unexpected");
+    return _mo == release;
+  }
+
+  // Conservatively release stores of object references in order to
+  // ensure visibility of object initialization.
+  static inline MemOrd release_if_reference(const BasicType t) {
+    const MemOrd mo = (t == T_ARRAY ||
+                       t == T_ADDRESS || // Might be the address of an object reference (`boxing').
+                       t == T_OBJECT) ? release : unordered;
+    return mo;
+  }
+
+  // Polymorphic factory method
+  //
+  // We must ensure that stores of object references will be visible
+  // only after the object's initialization. So the callers of this
+  // procedure must indicate that the store requires `release'
+  // semantics, if the stored value is an object reference that might
+  // point to a new object and may become externally visible.
+  static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
+                         const TypePtr* at, Node *val, BasicType bt, MemOrd mo);
 
   virtual uint hash() const;    // Check the type
 
@@ -482,7 +530,8 @@
 // Store byte to memory
 class StoreBNode : public StoreNode {
 public:
-  StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
+  StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
+    : StoreNode(c, mem, adr, at, val, mo) {}
   virtual int Opcode() const;
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   virtual BasicType memory_type() const { return T_BYTE; }
@@ -492,7 +541,8 @@
 // Store char/short to memory
 class StoreCNode : public StoreNode {
 public:
-  StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
+  StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
+    : StoreNode(c, mem, adr, at, val, mo) {}
   virtual int Opcode() const;
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   virtual BasicType memory_type() const { return T_CHAR; }
@@ -502,7 +552,8 @@
 // Store int to memory
 class StoreINode : public StoreNode {
 public:
-  StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
+  StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
+    : StoreNode(c, mem, adr, at, val, mo) {}
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_INT; }
 };
@@ -519,15 +570,12 @@
   const bool _require_atomic_access;  // is piecewise store forbidden?
 
 public:
-  StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
-              bool require_atomic_access = false )
-    : StoreNode(c,mem,adr,at,val)
-    , _require_atomic_access(require_atomic_access)
-  {}
+  StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
+    : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_LONG; }
   bool require_atomic_access() { return _require_atomic_access; }
-  static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
+  static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
     StoreNode::dump_spec(st);
@@ -540,7 +588,8 @@
 // Store float to memory
 class StoreFNode : public StoreNode {
 public:
-  StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
+  StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
+    : StoreNode(c, mem, adr, at, val, mo) {}
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_FLOAT; }
 };
@@ -549,7 +598,8 @@
 // Store double to memory
 class StoreDNode : public StoreNode {
 public:
-  StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
+  StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
+    : StoreNode(c, mem, adr, at, val, mo) {}
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_DOUBLE; }
 };
@@ -558,7 +608,8 @@
 // Store pointer to memory
 class StorePNode : public StoreNode {
 public:
-  StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
+  StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
+    : StoreNode(c, mem, adr, at, val, mo) {}
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_ADDRESS; }
 };
@@ -567,7 +618,8 @@
 // Store narrow oop to memory
 class StoreNNode : public StoreNode {
 public:
-  StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
+  StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
+    : StoreNode(c, mem, adr, at, val, mo) {}
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_NARROWOOP; }
 };
@@ -576,7 +628,8 @@
 // Store narrow klass to memory
 class StoreNKlassNode : public StoreNNode {
 public:
-  StoreNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNNode(c,mem,adr,at,val) {}
+  StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
+    : StoreNNode(c, mem, adr, at, val, mo) {}
   virtual int Opcode() const;
   virtual BasicType memory_type() const { return T_NARROWKLASS; }
 };
@@ -597,7 +650,7 @@
 
 public:
   StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
-    StoreNode(c,mem,adr,at,val,oop_store),
+    StoreNode(c, mem, adr, at, val, oop_store, MemNode::release),
     _oop_alias_idx(oop_alias_idx) {
     assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
            _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
@@ -617,8 +670,8 @@
 // On PowerPC and friends it's a real load-locked.
 class LoadPLockedNode : public LoadPNode {
 public:
-  LoadPLockedNode( Node *c, Node *mem, Node *adr )
-    : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
+  LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo)
+    : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {}
   virtual int Opcode() const;
   virtual int store_Opcode() const { return Op_StorePConditional; }
   virtual bool depends_only_on_test() const { return true; }
@@ -941,6 +994,17 @@
   virtual int Opcode() const;
 };
 
+// "Acquire" - no following ref can move before (but earlier refs can
+// follow, like an early Load stalled in cache).  Requires multi-cpu
+// visibility.  Inserted independ of any load, as required
+// for intrinsic sun.misc.Unsafe.loadFence().
+class LoadFenceNode: public MemBarNode {
+public:
+  LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
+    : MemBarNode(C, alias_idx, precedent) {}
+  virtual int Opcode() const;
+};
+
 // "Release" - no earlier ref can move after (but later refs can move
 // up, like a speculative pipelined cache-hitting Load).  Requires
 // multi-cpu visibility.  Inserted before a volatile store.
@@ -951,6 +1015,17 @@
   virtual int Opcode() const;
 };
 
+// "Release" - no earlier ref can move after (but later refs can move
+// up, like a speculative pipelined cache-hitting Load).  Requires
+// multi-cpu visibility.  Inserted independent of any store, as required
+// for intrinsic sun.misc.Unsafe.storeFence().
+class StoreFenceNode: public MemBarNode {
+public:
+  StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
+    : MemBarNode(C, alias_idx, precedent) {}
+  virtual int Opcode() const;
+};
+
 // "Acquire" - no following ref can move before (but earlier refs can
 // follow, like an early Load stalled in cache).  Requires multi-cpu
 // visibility.  Inserted after a FastLock.
--- a/src/share/vm/opto/mulnode.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/mulnode.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -485,7 +485,8 @@
       Node *ldus = new (phase->C) LoadUSNode(load->in(MemNode::Control),
                                              load->in(MemNode::Memory),
                                              load->in(MemNode::Address),
-                                             load->adr_type());
+                                             load->adr_type(),
+                                             TypeInt::CHAR, MemNode::unordered);
       ldus = phase->transform(ldus);
       return new (phase->C) AndINode(ldus, phase->intcon(mask & 0xFFFF));
     }
@@ -496,7 +497,8 @@
       Node* ldub = new (phase->C) LoadUBNode(load->in(MemNode::Control),
                                              load->in(MemNode::Memory),
                                              load->in(MemNode::Address),
-                                             load->adr_type());
+                                             load->adr_type(),
+                                             TypeInt::UBYTE, MemNode::unordered);
       ldub = phase->transform(ldub);
       return new (phase->C) AndINode(ldub, phase->intcon(mask));
     }
@@ -931,9 +933,10 @@
              ld->outcnt() == 1 && ld->unique_out() == shl)
       // Replace zero-extension-load with sign-extension-load
       return new (phase->C) LoadSNode( ld->in(MemNode::Control),
-                                ld->in(MemNode::Memory),
-                                ld->in(MemNode::Address),
-                                ld->adr_type());
+                                       ld->in(MemNode::Memory),
+                                       ld->in(MemNode::Address),
+                                       ld->adr_type(), TypeInt::SHORT,
+                                       MemNode::unordered);
   }
 
   // Check for "(byte[i] <<24)>>24" which simply sign-extends
--- a/src/share/vm/opto/multnode.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/multnode.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,11 +54,6 @@
         assert(Opcode() != Op_If || proj->Opcode() == (which_proj?Op_IfTrue:Op_IfFalse), "bad if #2");
         return proj;
       }
-    } else if (p->is_FlagsProj()) {
-      FlagsProjNode *proj = p->as_FlagsProj();
-      if (proj->_con == which_proj) {
-        return proj;
-      }
     } else {
       assert(p == this && this->is_Start(), "else must be proj");
       continue;
@@ -94,7 +89,7 @@
   if ((_con == TypeFunc::Parms) &&
       n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method()) {
     // The result of autoboxing is always non-null on normal path.
-    t = t->join(TypePtr::NOTNULL);
+    t = t->join_speculative(TypePtr::NOTNULL);
   }
   return t;
 }
--- a/src/share/vm/opto/multnode.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/multnode.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/node.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/node.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1002,13 +1002,13 @@
   if (is_Type()) {
     TypeNode *n = this->as_Type();
     if (VerifyAliases) {
-      assert(new_type->higher_equal(n->type()), "new type must refine old type");
+      assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
     }
     n->set_type(new_type);
   } else if (is_Load()) {
     LoadNode *n = this->as_Load();
     if (VerifyAliases) {
-      assert(new_type->higher_equal(n->type()), "new type must refine old type");
+      assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
     }
     n->set_type(new_type);
   }
@@ -1530,7 +1530,6 @@
 
 
 #ifndef PRODUCT
-int Node::_in_dump_cnt = 0;
 
 // -----------------------------Name-------------------------------------------
 extern const char *NodeClassNames[];
@@ -1602,7 +1601,7 @@
 void Node::dump(const char* suffix, outputStream *st) const {
   Compile* C = Compile::current();
   bool is_new = C->node_arena()->contains(this);
-  _in_dump_cnt++;
+  C->_in_dump_cnt++;
   st->print("%c%d\t%s\t=== ", is_new ? ' ' : 'o', _idx, Name());
 
   // Dump the required and precedence inputs
@@ -1617,7 +1616,7 @@
     dump_orig(debug_orig(), st);
 #endif
     st->cr();
-    _in_dump_cnt--;
+    C->_in_dump_cnt--;
     return;                     // don't process dead nodes
   }
 
@@ -1669,7 +1668,7 @@
     }
   }
   if (suffix) st->print(suffix);
-  _in_dump_cnt--;
+  C->_in_dump_cnt--;
 }
 
 //------------------------------dump_req--------------------------------------
--- a/src/share/vm/opto/node.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/node.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -69,7 +69,6 @@
 class EncodePKlassNode;
 class FastLockNode;
 class FastUnlockNode;
-class FlagsProjNode;
 class IfNode;
 class IfFalseNode;
 class IfTrueNode;
@@ -100,7 +99,6 @@
 class MachSpillCopyNode;
 class MachTempNode;
 class Matcher;
-class MathExactNode;
 class MemBarNode;
 class MemBarStoreStoreNode;
 class MemNode;
@@ -357,6 +355,8 @@
 
   // Reference to the i'th input Node.  Error if out of bounds.
   Node* in(uint i) const { assert(i < _max, err_msg_res("oob: i=%d, _max=%d", i, _max)); return _in[i]; }
+  // Reference to the i'th input Node.  NULL if out of bounds.
+  Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); }
   // Reference to the i'th output Node.  Error if out of bounds.
   // Use this accessor sparingly.  We are going trying to use iterators instead.
   Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
@@ -384,6 +384,10 @@
 
   // Set a required input edge, also updates corresponding output edge
   void add_req( Node *n ); // Append a NEW required input
+  void add_req( Node *n0, Node *n1 ) {
+    add_req(n0); add_req(n1); }
+  void add_req( Node *n0, Node *n1, Node *n2 ) {
+    add_req(n0); add_req(n1); add_req(n2); }
   void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
   void del_req( uint idx ); // Delete required edge & compact
   void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
@@ -569,7 +573,6 @@
       DEFINE_CLASS_ID(MemBar,      Multi, 3)
         DEFINE_CLASS_ID(Initialize,       MemBar, 0)
         DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
-      DEFINE_CLASS_ID(MathExact,   Multi, 4)
 
     DEFINE_CLASS_ID(Mach,  Node, 1)
       DEFINE_CLASS_ID(MachReturn, Mach, 0)
@@ -626,7 +629,6 @@
       DEFINE_CLASS_ID(Cmp,   Sub, 0)
         DEFINE_CLASS_ID(FastLock,   Cmp, 0)
         DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
-        DEFINE_CLASS_ID(FlagsProj, Cmp, 2)
 
     DEFINE_CLASS_ID(MergeMem, Node, 7)
     DEFINE_CLASS_ID(Bool,     Node, 8)
@@ -730,7 +732,6 @@
   DEFINE_CLASS_QUERY(EncodePKlass)
   DEFINE_CLASS_QUERY(FastLock)
   DEFINE_CLASS_QUERY(FastUnlock)
-  DEFINE_CLASS_QUERY(FlagsProj)
   DEFINE_CLASS_QUERY(If)
   DEFINE_CLASS_QUERY(IfFalse)
   DEFINE_CLASS_QUERY(IfTrue)
@@ -759,7 +760,6 @@
   DEFINE_CLASS_QUERY(MachSafePoint)
   DEFINE_CLASS_QUERY(MachSpillCopy)
   DEFINE_CLASS_QUERY(MachTemp)
-  DEFINE_CLASS_QUERY(MathExact)
   DEFINE_CLASS_QUERY(Mem)
   DEFINE_CLASS_QUERY(MemBar)
   DEFINE_CLASS_QUERY(MemBarStoreStore)
@@ -1027,8 +1027,7 @@
   // RegMask Print Functions
   void dump_in_regmask(int idx) { in_RegMask(idx).dump(); }
   void dump_out_regmask() { out_RegMask().dump(); }
-  static int _in_dump_cnt;
-  static bool in_dump() { return _in_dump_cnt > 0; }
+  static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; }
   void fast_dump() const {
     tty->print("%4d: %-17s", _idx, Name());
     for (uint i = 0; i < len(); i++)
@@ -1350,7 +1349,7 @@
 public:
   Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
   Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
-  bool contains(Node* n) {
+  bool contains(const Node* n) const {
     for (uint e = 0; e < size(); e++) {
       if (at(e) == n) return true;
     }
--- a/src/share/vm/opto/optoreg.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/optoreg.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/output.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/output.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -344,6 +344,11 @@
   uint*      jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
   uint*      jmp_size   = NEW_RESOURCE_ARRAY(uint,nblocks);
   int*       jmp_nidx   = NEW_RESOURCE_ARRAY(int ,nblocks);
+
+  // Collect worst case block paddings
+  int* block_worst_case_pad = NEW_RESOURCE_ARRAY(int, nblocks);
+  memset(block_worst_case_pad, 0, nblocks * sizeof(int));
+
   DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks); )
   DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks); )
 
@@ -460,6 +465,7 @@
           last_avoid_back_to_back_adr += max_loop_pad;
         }
         blk_size += max_loop_pad;
+        block_worst_case_pad[i + 1] = max_loop_pad;
       }
     }
 
@@ -499,9 +505,16 @@
         if (bnum > i) { // adjust following block's offset
           offset -= adjust_block_start;
         }
+
+        // This block can be a loop header, account for the padding
+        // in the previous block.
+        int block_padding = block_worst_case_pad[i];
+        assert(i == 0 || block_padding == 0 || br_offs >= block_padding, "Should have at least a padding on top");
         // In the following code a nop could be inserted before
         // the branch which will increase the backward distance.
-        bool needs_padding = ((uint)br_offs == last_may_be_short_branch_adr);
+        bool needs_padding = ((uint)(br_offs - block_padding) == last_may_be_short_branch_adr);
+        assert(!needs_padding || jmp_offset[i] == 0, "padding only branches at the beginning of block");
+
         if (needs_padding && offset <= 0)
           offset -= nop_size;
 
@@ -1068,7 +1081,7 @@
   // Compute prolog code size
   _method_size = 0;
   _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
-#ifdef IA64
+#if defined(IA64) && !defined(AIX)
   if (save_argument_registers()) {
     // 4815101: this is a stub with implicit and unknown precision fp args.
     // The usual spill mechanism can only generate stfd's in this case, which
@@ -1086,6 +1099,7 @@
   assert(_frame_slots >= 0 && _frame_slots < 1000000, "sanity check");
 
   if (has_mach_constant_base_node()) {
+    uint add_size = 0;
     // Fill the constant table.
     // Note:  This must happen before shorten_branches.
     for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
@@ -1099,6 +1113,9 @@
         if (n->is_MachConstant()) {
           MachConstantNode* machcon = n->as_MachConstant();
           machcon->eval_constant(C);
+        } else if (n->is_Mach()) {
+          // On Power there are more nodes that issue constants.
+          add_size += (n->as_Mach()->ins_num_consts() * 8);
         }
       }
     }
@@ -1106,7 +1123,7 @@
     // Calculate the offsets of the constants and the size of the
     // constant table (including the padding to the next section).
     constant_table().calculate_offsets_and_size();
-    const_req = constant_table().size();
+    const_req = constant_table().size() + add_size;
   }
 
   // Initialize the space for the BufferBlob used to find and verify
@@ -1377,7 +1394,7 @@
             int offset = blk_starts[block_num] - current_offset;
             if (block_num >= i) {
               // Current and following block's offset are not
-              // finilized yet, adjust distance by the difference
+              // finalized yet, adjust distance by the difference
               // between calculated and final offsets of current block.
               offset -= (blk_starts[i] - blk_offset);
             }
@@ -1458,6 +1475,12 @@
           // Intel all the time, with add-to-memory kind of opcodes.
           previous_offset = current_offset;
         }
+
+        // Not an else-if!
+        // If this is a trap based cmp then add its offset to the list.
+        if (mach->is_TrapBasedCheckNode()) {
+          inct_starts[inct_cnt++] = current_offset;
+        }
       }
 
       // Verify that there is sufficient space remaining
@@ -1724,6 +1747,12 @@
       _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
       continue;
     }
+    // Handle implicit exception table updates: trap instructions.
+    if (n->is_Mach() && n->as_Mach()->is_TrapBasedCheckNode()) {
+      uint block_num = block->non_connector_successor(0)->_pre_order;
+      _inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
+      continue;
+    }
   } // End of for all blocks fill in exception table entries
 }
 
--- a/src/share/vm/opto/output.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/output.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,8 +42,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 class Arena;
--- a/src/share/vm/opto/parse.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/parse.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -141,6 +141,13 @@
   GrowableArray<InlineTree*> subtrees() { return _subtrees; }
 
   void print_value_on(outputStream* st) const PRODUCT_RETURN;
+
+  bool        _forced_inline;     // Inlining was forced by CompilerOracle or ciReplay
+  bool        forced_inline()     const { return _forced_inline; }
+  // Count number of nodes in this subtree
+  int         count() const;
+  // Dump inlining replay data to the stream.
+  void dump_replay_data(outputStream* out);
 };
 
 
@@ -330,7 +337,8 @@
 
   GraphKit      _exits;         // Record all normal returns and throws here.
   bool          _wrote_final;   // Did we write a final field?
-  bool          _count_invocations; // update and test invocation counter
+  bool          _wrote_volatile;     // Did we write a volatile field?
+  bool          _count_invocations;  // update and test invocation counter
   bool          _method_data_update; // update method data oop
   Node*         _alloc_with_final;   // An allocation node with final field
 
@@ -373,6 +381,8 @@
   GraphKit&     exits()               { return _exits; }
   bool          wrote_final() const   { return _wrote_final; }
   void      set_wrote_final(bool z)   { _wrote_final = z; }
+  bool          wrote_volatile() const { return _wrote_volatile; }
+  void      set_wrote_volatile(bool z) { _wrote_volatile = z; }
   bool          count_invocations() const  { return _count_invocations; }
   bool          method_data_update() const { return _method_data_update; }
   Node*    alloc_with_final() const   { return _alloc_with_final; }
--- a/src/share/vm/opto/parse1.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/parse1.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -106,24 +106,24 @@
   // Very similar to LoadNode::make, except we handle un-aligned longs and
   // doubles on Sparc.  Intel can handle them just fine directly.
   Node *l;
-  switch( bt ) {                // Signature is flattened
-  case T_INT:     l = new (C) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
-  case T_FLOAT:   l = new (C) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break;
-  case T_ADDRESS: l = new (C) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM  ); break;
-  case T_OBJECT:  l = new (C) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break;
+  switch (bt) {                // Signature is flattened
+  case T_INT:     l = new (C) LoadINode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInt::INT,        MemNode::unordered); break;
+  case T_FLOAT:   l = new (C) LoadFNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::FLOAT,         MemNode::unordered); break;
+  case T_ADDRESS: l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM,  MemNode::unordered); break;
+  case T_OBJECT:  l = new (C) LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM, MemNode::unordered); break;
   case T_LONG:
   case T_DOUBLE: {
     // Since arguments are in reverse order, the argument address 'adr'
     // refers to the back half of the long/double.  Recompute adr.
-    adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize );
-    if( Matcher::misaligned_doubles_ok ) {
+    adr = basic_plus_adr(local_addrs_base, local_addrs, -(index+1)*wordSize);
+    if (Matcher::misaligned_doubles_ok) {
       l = (bt == T_DOUBLE)
-        ? (Node*)new (C) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
-        : (Node*)new (C) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
+        ? (Node*)new (C) LoadDNode(ctl, mem, adr, TypeRawPtr::BOTTOM, Type::DOUBLE, MemNode::unordered)
+        : (Node*)new (C) LoadLNode(ctl, mem, adr, TypeRawPtr::BOTTOM, TypeLong::LONG, MemNode::unordered);
     } else {
       l = (bt == T_DOUBLE)
-        ? (Node*)new (C) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM )
-        : (Node*)new (C) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM );
+        ? (Node*)new (C) LoadD_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered)
+        : (Node*)new (C) LoadL_unalignedNode(ctl, mem, adr, TypeRawPtr::BOTTOM, MemNode::unordered);
     }
     break;
   }
@@ -229,7 +229,7 @@
     Node *displaced_hdr = fetch_interpreter_state((index*2) + 1, T_ADDRESS, monitors_addr, osr_buf);
 
 
-    store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw);
+    store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
 
     // Build a bogus FastLockNode (no code will be generated) and push the
     // monitor into our debug info.
@@ -390,6 +390,7 @@
   _expected_uses = expected_uses;
   _depth = 1 + (caller->has_method() ? caller->depth() : 0);
   _wrote_final = false;
+  _wrote_volatile = false;
   _alloc_with_final = NULL;
   _entry_bci = InvocationEntryBci;
   _tf = NULL;
@@ -907,7 +908,13 @@
   Node* iophi = _exits.i_o();
   _exits.set_i_o(gvn().transform(iophi));
 
-  if (wrote_final()) {
+  // On PPC64, also add MemBarRelease for constructors which write
+  // volatile fields. As support_IRIW_for_not_multiple_copy_atomic_cpu
+  // is set on PPC64, no sync instruction is issued after volatile
+  // stores. We want to quarantee the same behaviour as on platforms
+  // with total store order, although this is not required by the Java
+  // memory model. So as with finals, we add a barrier here.
+  if (wrote_final() PPC64_ONLY(|| (wrote_volatile() && method()->is_initializer()))) {
     // This method (which must be a constructor by the rules of Java)
     // wrote a final.  The effects of all initializations must be
     // committed to memory before any code after the constructor
@@ -1649,7 +1656,7 @@
           assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
           map()->set_req(j, _gvn.transform_no_reclaim(phi));
           debug_only(const Type* bt2 = phi->bottom_type());
-          assert(bt2->higher_equal(bt1), "must be consistent with type-flow");
+          assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
           record_for_igvn(phi);
         }
       }
@@ -1931,7 +1938,7 @@
   Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
 
   Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
-  Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT);
+  Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
 
   Node* mask  = _gvn.transform(new (C) AndINode(access_flags, intcon(JVM_ACC_HAS_FINALIZER)));
   Node* check = _gvn.transform(new (C) CmpINode(mask, intcon(0)));
@@ -2022,7 +2029,7 @@
           !tp->klass()->is_interface()) {
         // sharpen the type eagerly; this eases certain assert checking
         if (tp->higher_equal(TypeInstPtr::NOTNULL))
-          tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr();
+          tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
         value = _gvn.transform(new (C) CheckCastPPNode(0,value,tr));
       }
     }
--- a/src/share/vm/opto/parse2.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/parse2.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,7 @@
   if (stopped())  return;     // guaranteed null or range check
   dec_sp(2);                  // Pop array and index
   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
-  Node* ld = make_load(control(), adr, elem, elem_type, adr_type);
+  Node* ld = make_load(control(), adr, elem, elem_type, adr_type, MemNode::unordered);
   push(ld);
 }
 
@@ -62,7 +62,7 @@
   Node* val = pop();
   dec_sp(2);                  // Pop array and index
   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type);
-  store_to_memory(control(), adr, val, elem_type, adr_type);
+  store_to_memory(control(), adr, val, elem_type, adr_type, StoreNode::release_if_reference(elem_type));
 }
 
 
@@ -88,7 +88,7 @@
       if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
         // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
         const Type* subklass = Type::get_const_type(toop->klass());
-        elemtype = subklass->join(el);
+        elemtype = subklass->join_speculative(el);
       }
     }
   }
@@ -1278,7 +1278,7 @@
        //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
        // or the narrowOop equivalent.
        const Type* obj_type = _gvn.type(obj);
-       const TypeOopPtr* tboth = obj_type->join(con_type)->isa_oopptr();
+       const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
        if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type &&
            tboth->higher_equal(obj_type)) {
           // obj has to be of the exact type Foo if the CmpP succeeds.
@@ -1288,7 +1288,7 @@
               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
             TypeNode* ccast = new (C) CheckCastPPNode(control(), obj, tboth);
             const Type* tcc = ccast->as_Type()->type();
-            assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
+            assert(tcc != obj_type && tcc->higher_equal_speculative(obj_type), "must improve");
             // Delay transform() call to allow recovery of pre-cast value
             // at the control merge.
             _gvn.set_type_bottom(ccast);
@@ -1318,7 +1318,7 @@
   switch (btest) {
   case BoolTest::eq:                    // Constant test?
     {
-      const Type* tboth = tcon->join(tval);
+      const Type* tboth = tcon->join_speculative(tval);
       if (tboth == tval)  break;        // Nothing to gain.
       if (tcon->isa_int()) {
         ccast = new (C) CastIINode(val, tboth);
@@ -1352,7 +1352,7 @@
 
   if (ccast != NULL) {
     const Type* tcc = ccast->as_Type()->type();
-    assert(tcc != tval && tcc->higher_equal(tval), "must improve");
+    assert(tcc != tval && tcc->higher_equal_speculative(tval), "must improve");
     // Delay transform() call to allow recovery of pre-cast value
     // at the control merge.
     ccast->set_req(0, control());
@@ -1720,14 +1720,14 @@
     a = array_addressing(T_LONG, 0);
     if (stopped())  return;     // guaranteed null or range check
     dec_sp(2);                  // Pop array and index
-    push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS));
+    push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS, MemNode::unordered));
     break;
   }
   case Bytecodes::_daload: {
     a = array_addressing(T_DOUBLE, 0);
     if (stopped())  return;     // guaranteed null or range check
     dec_sp(2);                  // Pop array and index
-    push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES));
+    push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered));
     break;
   }
   case Bytecodes::_bastore: array_store(T_BYTE);  break;
@@ -1744,7 +1744,7 @@
     a = pop();                  // the array itself
     const TypeOopPtr* elemtype  = _gvn.type(a)->is_aryptr()->elem()->make_oopptr();
     const TypeAryPtr* adr_type = TypeAryPtr::OOPS;
-    Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT);
+    Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT, MemNode::release);
     break;
   }
   case Bytecodes::_lastore: {
@@ -1752,7 +1752,7 @@
     if (stopped())  return;     // guaranteed null or range check
     c = pop_pair();
     dec_sp(2);                  // Pop array and index
-    store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS);
+    store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS, MemNode::unordered);
     break;
   }
   case Bytecodes::_dastore: {
@@ -1761,7 +1761,7 @@
     c = pop_pair();
     dec_sp(2);                  // Pop array and index
     c = dstore_rounding(c);
-    store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES);
+    store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered);
     break;
   }
   case Bytecodes::_getfield:
--- a/src/share/vm/opto/parse3.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/parse3.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -227,8 +227,13 @@
   } else {
     type = Type::get_const_basic_type(bt);
   }
+  if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
+    insert_mem_bar(Op_MemBarVolatile);   // StoreLoad barrier
+  }
   // Build the load.
-  Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);
+  //
+  MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
+  Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, is_vol);
 
   // Adjust Java stack
   if (type2size[bt] == 1)
@@ -288,6 +293,16 @@
   // Round doubles before storing
   if (bt == T_DOUBLE)  val = dstore_rounding(val);
 
+  // Conservatively release stores of object references.
+  const MemNode::MemOrd mo =
+    is_vol ?
+    // Volatile fields need releasing stores.
+    MemNode::release :
+    // Non-volatile fields also need releasing stores if they hold an
+    // object reference, because the object reference might point to
+    // a freshly created object.
+    StoreNode::release_if_reference(bt);
+
   // Store the value.
   Node* store;
   if (bt == T_OBJECT) {
@@ -297,15 +312,24 @@
     } else {
       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
     }
-    store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt);
+    store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
   } else {
-    store = store_to_memory( control(), adr, val, bt, adr_type, is_vol );
+    store = store_to_memory(control(), adr, val, bt, adr_type, mo, is_vol);
   }
 
   // If reference is volatile, prevent following volatiles ops from
   // floating up before the volatile write.
   if (is_vol) {
-    insert_mem_bar(Op_MemBarVolatile); // Use fat membar
+    // If not multiple copy atomic, we do the MemBarVolatile before the load.
+    if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
+      insert_mem_bar(Op_MemBarVolatile); // Use fat membar
+    }
+    // Remember we wrote a volatile field.
+    // For not multiple copy atomic cpu (ppc64) a barrier should be issued
+    // in constructors which have such stores. See do_exits() in parse1.cpp.
+    if (is_field) {
+      set_wrote_volatile(true);
+    }
   }
 
   // If the field is final, the rules of Java say we are in <init> or <clinit>.
@@ -337,7 +361,7 @@
     //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
     // An oop is not scavengable if it is in the perm gen.
     if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
-      con_type = con_type->join(stable_type);
+      con_type = con_type->join_speculative(stable_type);
     break;
 
   case T_ILLEGAL:
@@ -414,7 +438,7 @@
       Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
       intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
       Node*    eaddr  = basic_plus_adr(array, offset);
-      store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT);
+      store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered);
     }
   }
   return array;
@@ -503,7 +527,7 @@
       // Fill-in it with values
       for (j = 0; j < ndimensions; j++) {
         Node *dims_elem = array_element_address(dims, intcon(j), T_INT);
-        store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS);
+        store_to_memory(control(), dims_elem, length[j], T_INT, TypeAryPtr::INTS, MemNode::unordered);
       }
     }
 
--- a/src/share/vm/opto/parseHelper.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/parseHelper.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -222,7 +222,7 @@
 
   Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset()));
   Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
-  Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS);
+  Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
   Node *tst   = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
   IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
   set_control(IfTrue(iff));
@@ -232,7 +232,7 @@
   adr_node = basic_plus_adr(kls, kls, init_state_offset);
   // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler
   // can generate code to load it as unsigned byte.
-  Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN);
+  Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered);
   Node* being_init = _gvn.intcon(InstanceKlass::being_initialized);
   tst   = Bool( CmpI( init_state, being_init), BoolTest::eq);
   iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
@@ -354,13 +354,13 @@
   Node *counters_node = makecon(adr_type);
   Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
     MethodCounters::interpreter_invocation_counter_offset_in_bytes());
-  Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type);
+  Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
 
   test_counter_against_threshold(cnt, limit);
 
   // Add one to the counter and store
   Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
-  store_to_memory( ctrl, adr_iic_node, incr, T_INT, adr_type );
+  store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered);
 }
 
 //----------------------------method_data_addressing---------------------------
@@ -392,9 +392,9 @@
   Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);
 
   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
-  Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type);
+  Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
   Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
-  store_to_memory(NULL, adr_node, incr, T_INT, adr_type );
+  store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered);
 }
 
 //--------------------------test_for_osr_md_counter_at-------------------------
@@ -402,7 +402,7 @@
   Node* adr_node = method_data_addressing(md, data, counter_offset);
 
   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
-  Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type);
+  Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);
 
   test_counter_against_threshold(cnt, limit);
 }
@@ -412,9 +412,9 @@
   Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());
 
   const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
-  Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type);
+  Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, MemNode::unordered);
   Node* incr = _gvn.transform(new (C) OrINode(flags, _gvn.intcon(flag_constant)));
-  store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type);
+  store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, MemNode::unordered);
 }
 
 //----------------------------profile_taken_branch-----------------------------
--- a/src/share/vm/opto/phase.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/phase.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #include "code/nmethod.hpp"
 #include "compiler/compileBroker.hpp"
 #include "opto/compile.hpp"
+#include "opto/matcher.hpp"
 #include "opto/node.hpp"
 #include "opto/phase.hpp"
 
@@ -55,6 +56,7 @@
 elapsedTimer Phase::_t_macroEliminate;
 elapsedTimer Phase::_t_macroExpand;
 elapsedTimer Phase::_t_peephole;
+elapsedTimer Phase::_t_postalloc_expand;
 elapsedTimer Phase::_t_codeGeneration;
 elapsedTimer Phase::_t_registerMethod;
 elapsedTimer Phase::_t_temporaryTimer1;
@@ -144,6 +146,9 @@
   }
   tty->print_cr ("    blockOrdering  : %3.3f sec", Phase::_t_blockOrdering.seconds());
   tty->print_cr ("    peephole       : %3.3f sec", Phase::_t_peephole.seconds());
+  if (Matcher::require_postalloc_expand) {
+    tty->print_cr ("    postalloc_expand: %3.3f sec", Phase::_t_postalloc_expand.seconds());
+  }
   tty->print_cr ("    codeGen        : %3.3f sec", Phase::_t_codeGeneration.seconds());
   tty->print_cr ("    install_code   : %3.3f sec", Phase::_t_registerMethod.seconds());
   tty->print_cr ("    -------------- : ----------");
--- a/src/share/vm/opto/phase.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/phase.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -91,6 +91,7 @@
   static elapsedTimer _t_macroEliminate;
   static elapsedTimer _t_macroExpand;
   static elapsedTimer _t_peephole;
+  static elapsedTimer _t_postalloc_expand;
   static elapsedTimer _t_codeGeneration;
   static elapsedTimer _t_registerMethod;
   static elapsedTimer _t_temporaryTimer1;
--- a/src/share/vm/opto/phaseX.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/phaseX.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -323,6 +323,23 @@
   }
 }
 
+
+void NodeHash::check_no_speculative_types() {
+#ifdef ASSERT
+  uint max = size();
+  Node *sentinel_node = sentinel();
+  for (uint i = 0; i < max; ++i) {
+    Node *n = at(i);
+    if(n != NULL && n != sentinel_node && n->is_Type()) {
+      TypeNode* tn = n->as_Type();
+      const Type* t = tn->type();
+      const Type* t_no_spec = t->remove_speculative();
+      assert(t == t_no_spec, "dead node in hash table or missed node during speculative cleanup");
+    }
+  }
+#endif
+}
+
 #ifndef PRODUCT
 //------------------------------dump-------------------------------------------
 // Dump statistics for the hash table
@@ -1392,11 +1409,11 @@
   assert(UseTypeSpeculation, "speculation is off");
   for (uint i = 0; i < _types.Size(); i++)  {
     const Type* t = _types.fast_lookup(i);
-    if (t != NULL && t->isa_oopptr()) {
-      const TypeOopPtr* to = t->is_oopptr();
-      _types.map(i, to->remove_speculative());
+    if (t != NULL) {
+      _types.map(i, t->remove_speculative());
     }
   }
+  _table.check_no_speculative_types();
 }
 
 //=============================================================================
--- a/src/share/vm/opto/phaseX.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/phaseX.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -92,7 +92,8 @@
   }
 
   void   remove_useless_nodes(VectorSet &useful); // replace with sentinel
-  void replace_with(NodeHash* nh);
+  void   replace_with(NodeHash* nh);
+  void   check_no_speculative_types(); // Check no speculative part for type nodes in table
 
   Node  *sentinel() { return _sentinel; }
 
@@ -501,6 +502,9 @@
                                         Deoptimization::DeoptReason reason);
 
   void remove_speculative_types();
+  void check_no_speculative_types() {
+    _table.check_no_speculative_types();
+  }
 
 #ifndef PRODUCT
 protected:
--- a/src/share/vm/opto/postaloc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/postaloc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/reg_split.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/reg_split.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,7 +55,7 @@
 // Get a SpillCopy node with wide-enough masks.  Use the 'wide-mask', the
 // wide ideal-register spill-mask if possible.  If the 'wide-mask' does
 // not cover the input (or output), use the input (or output) mask instead.
-Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) {
+Node *PhaseChaitin::get_spillcopy_wide(MachSpillCopyNode::SpillType spill_type, Node *def, Node *use, uint uidx ) {
   // If ideal reg doesn't exist we've got a bad schedule happening
   // that is forcing us to spill something that isn't spillable.
   // Bail rather than abort
@@ -93,7 +93,7 @@
       // Here we assume a trip through memory is required.
       w_i_mask = &C->FIRST_STACK_mask();
   }
-  return new (C) MachSpillCopyNode( def, *w_i_mask, *w_o_mask );
+  return new (C) MachSpillCopyNode(spill_type, def, *w_i_mask, *w_o_mask );
 }
 
 //------------------------------insert_proj------------------------------------
@@ -159,7 +159,7 @@
   assert( loc >= 0, "must insert past block head" );
 
   // Get a def-side SpillCopy
-  Node *spill = get_spillcopy_wide(def,NULL,0);
+  Node *spill = get_spillcopy_wide(MachSpillCopyNode::Definition, def, NULL, 0);
   // Did we fail to split?, then bail
   if (!spill) {
     return 0;
@@ -180,7 +180,7 @@
 //------------------------------split_USE--------------------------------------
 // Splits at uses can involve redeffing the LRG, so no CISC Spilling there.
 // Debug uses want to know if def is already stack enabled.
-uint PhaseChaitin::split_USE( Node *def, Block *b, Node *use, uint useidx, uint maxlrg, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx ) {
+uint PhaseChaitin::split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, Block *b, Node *use, uint useidx, uint maxlrg, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx ) {
 #ifdef ASSERT
   // Increment the counter for this lrg
   splits.at_put(slidx, splits.at(slidx)+1);
@@ -216,7 +216,7 @@
         // DEF is UP, so must copy it DOWN and hook in USE
         // Insert SpillCopy before the USE, which uses DEF as its input,
         // and defs a new live range, which is used by this node.
-        Node *spill = get_spillcopy_wide(def,use,useidx);
+        Node *spill = get_spillcopy_wide(spill_type, def,use,useidx);
         // did we fail to split?
         if (!spill) {
           // Bail
@@ -268,7 +268,7 @@
     bindex = b->find_node(use);
   }
 
-  Node *spill = get_spillcopy_wide( def, use, useidx );
+  Node *spill = get_spillcopy_wide(spill_type, def, use, useidx );
   if( !spill ) return 0;        // Bailed out
   // Insert SpillCopy before the USE, which uses the reaching DEF as
   // its input, and defs a new live range, which is used by this node.
@@ -327,7 +327,7 @@
 
       Block *b_def = _cfg.get_block_for_node(def);
       int idx_def = b_def->find_node(def);
-      Node *in_spill = get_spillcopy_wide( in, def, i );
+      Node *in_spill = get_spillcopy_wide(MachSpillCopyNode::InputToRematerialization, in, def, i );
       if( !in_spill ) return 0; // Bailed out
       insert_proj(b_def,idx_def,in_spill,maxlrg++);
       if( b_def == b )
@@ -935,7 +935,7 @@
                 // This def has been rematerialized a couple of times without
                 // progress. It doesn't care if it lives UP or DOWN, so
                 // spill it down now.
-                maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false,splits,slidx);
+                maxlrg = split_USE(MachSpillCopyNode::BasePointerToMem, def,b,n,inpidx,maxlrg,false,false,splits,slidx);
                 // If it wasn't split bail
                 if (!maxlrg) {
                   return 0;
@@ -1015,7 +1015,7 @@
                  !is_vect && umask.is_misaligned_pair())) {
               // These need a Split regardless of overlap or pressure
               // SPLIT - NO DEF - NO CISC SPILL
-              maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
+              maxlrg = split_USE(MachSpillCopyNode::Bound, def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
               // If it wasn't split bail
               if (!maxlrg) {
                 return 0;
@@ -1027,7 +1027,7 @@
             if (UseFPUForSpilling && n->is_MachCall() && !uup && !dup ) {
               // The use at the call can force the def down so insert
               // a split before the use to allow the def more freedom.
-              maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
+              maxlrg = split_USE(MachSpillCopyNode::CallUse, def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
               // If it wasn't split bail
               if (!maxlrg) {
                 return 0;
@@ -1063,7 +1063,7 @@
               else {  // Both are either up or down, and there is no overlap
                 if( dup ) {  // If UP, reg->reg copy
                   // COPY ACROSS HERE - NO DEF - NO CISC SPILL
-                  maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx);
+                  maxlrg = split_USE(MachSpillCopyNode::RegToReg, def,b,n,inpidx,maxlrg,false,false, splits,slidx);
                   // If it wasn't split bail
                   if (!maxlrg) {
                     return 0;
@@ -1075,10 +1075,10 @@
                   // First Split-UP to move value into Register
                   uint def_ideal = def->ideal_reg();
                   const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal];
-                  Node *spill = new (C) MachSpillCopyNode(def, dmask, *tmp_rm);
+                  Node *spill = new (C) MachSpillCopyNode(MachSpillCopyNode::MemToReg, def, dmask, *tmp_rm);
                   insert_proj( b, insidx, spill, maxlrg );
                   // Then Split-DOWN as if previous Split was DEF
-                  maxlrg = split_USE(spill,b,n,inpidx,maxlrg,false,false, splits,slidx);
+                  maxlrg = split_USE(MachSpillCopyNode::RegToMem, spill,b,n,inpidx,maxlrg,false,false, splits,slidx);
                   // If it wasn't split bail
                   if (!maxlrg) {
                     return 0;
@@ -1103,7 +1103,7 @@
                   }
                 }
                 // COPY DOWN HERE - NO DEF - NO CISC SPILL
-                maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx);
+                maxlrg = split_USE(MachSpillCopyNode::RegToMem, def,b,n,inpidx,maxlrg,false,false, splits,slidx);
                 // If it wasn't split bail
                 if (!maxlrg) {
                   return 0;
@@ -1118,7 +1118,7 @@
               else {       // DOWN, Split-UP and check register pressure
                 if( is_high_pressure( b, &lrgs(useidx), insidx ) ) {
                   // COPY UP HERE - NO DEF - CISC SPILL
-                  maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,true, splits,slidx);
+                  maxlrg = split_USE(MachSpillCopyNode::MemToReg, def,b,n,inpidx,maxlrg,true,true, splits,slidx);
                   // If it wasn't split bail
                   if (!maxlrg) {
                     return 0;
@@ -1126,7 +1126,7 @@
                   insidx++;  // Reset iterator to skip USE side split
                 } else {                          // LRP
                   // COPY UP HERE - WITH DEF - NO CISC SPILL
-                  maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,false, splits,slidx);
+                  maxlrg = split_USE(MachSpillCopyNode::MemToReg, def,b,n,inpidx,maxlrg,true,false, splits,slidx);
                   // If it wasn't split bail
                   if (!maxlrg) {
                     return 0;
@@ -1229,7 +1229,7 @@
               if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {  // Check when generating nodes
                 return 0;
               }
-              Node *spill = new (C) MachSpillCopyNode(use,use_rm,def_rm);
+              Node *spill = new (C) MachSpillCopyNode(MachSpillCopyNode::MemToReg, use,use_rm,def_rm);
               n->set_req(copyidx,spill);
               n->as_MachSpillCopy()->set_in_RegMask(def_rm);
               // Put the spill just before the copy
@@ -1336,7 +1336,7 @@
       // Grab the UP/DOWN sense for the input
       u1 = UP[pidx][slidx];
       if( u1 != (phi_up != 0)) {
-        maxlrg = split_USE(def, b, phi, i, maxlrg, !u1, false, splits,slidx);
+        maxlrg = split_USE(MachSpillCopyNode::PhiLocationDifferToInputLocation, def, b, phi, i, maxlrg, !u1, false, splits,slidx);
         // If it wasn't split bail
         if (!maxlrg) {
           return 0;
--- a/src/share/vm/opto/regalloc.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/regalloc.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -77,7 +77,9 @@
   assert( reg <  _matcher._old_SP ||
           (reg >= OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots()) &&
            reg <  _matcher._in_arg_limit) ||
-          reg >=  OptoReg::add(_matcher._new_SP,C->out_preserve_stack_slots()),
+          reg >=  OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ||
+          // Allow return_addr in the out-preserve area.
+          reg == _matcher.return_addr(),
           "register allocated in a preserve area" );
   return reg2offset_unchecked( reg );
 }
--- a/src/share/vm/opto/regalloc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/regalloc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/regmask.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/regmask.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -40,8 +40,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 #define RM_SIZE _RM_SIZE /* a constant private to the class RegMask */
--- a/src/share/vm/opto/regmask.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/regmask.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -43,8 +43,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/adGlobals_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/adGlobals_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/adGlobals_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/adGlobals_ppc_64.hpp"
 #endif
 
 // Some fun naming (textual) substitutions:
--- a/src/share/vm/opto/runtime.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/runtime.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -84,8 +84,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
 
 
@@ -569,8 +572,7 @@
 const TypeFunc *OptoRuntime::uncommon_trap_Type() {
   // create input type (domain)
   const Type **fields = TypeTuple::fields(1);
-  // Symbol* name of class to be loaded
-  fields[TypeFunc::Parms+0] = TypeInt::INT;
+  fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action)
   const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields);
 
   // create result type (range)
@@ -794,11 +796,20 @@
 
 
 const TypeFunc* OptoRuntime::array_fill_Type() {
-  // create input type (domain): pointer, int, size_t
-  const Type** fields = TypeTuple::fields(3 LP64_ONLY( + 1));
+  const Type** fields;
   int argp = TypeFunc::Parms;
-  fields[argp++] = TypePtr::NOTNULL;
-  fields[argp++] = TypeInt::INT;
+  if (CCallingConventionRequiresIntsAsLongs) {
+  // create input type (domain): pointer, int, size_t
+    fields = TypeTuple::fields(3 LP64_ONLY( + 2));
+    fields[argp++] = TypePtr::NOTNULL;
+    fields[argp++] = TypeLong::LONG;
+    fields[argp++] = Type::HALF;
+  } else {
+    // create input type (domain): pointer, int, size_t
+    fields = TypeTuple::fields(3 LP64_ONLY( + 1));
+    fields[argp++] = TypePtr::NOTNULL;
+    fields[argp++] = TypeInt::INT;
+  }
   fields[argp++] = TypeX_X;               // size in whatevers (size_t)
   LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length
   const TypeTuple *domain = TypeTuple::make(argp, fields);
@@ -815,12 +826,18 @@
 const TypeFunc* OptoRuntime::aescrypt_block_Type() {
   // create input type (domain)
   int num_args      = 3;
+  if (Matcher::pass_original_key_for_aes()) {
+    num_args = 4;
+  }
   int argcnt = num_args;
   const Type** fields = TypeTuple::fields(argcnt);
   int argp = TypeFunc::Parms;
   fields[argp++] = TypePtr::NOTNULL;    // src
   fields[argp++] = TypePtr::NOTNULL;    // dest
   fields[argp++] = TypePtr::NOTNULL;    // k array
+  if (Matcher::pass_original_key_for_aes()) {
+    fields[argp++] = TypePtr::NOTNULL;    // original k array
+  }
   assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
   const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
 
@@ -857,6 +874,9 @@
 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
   // create input type (domain)
   int num_args      = 5;
+  if (Matcher::pass_original_key_for_aes()) {
+    num_args = 6;
+  }
   int argcnt = num_args;
   const Type** fields = TypeTuple::fields(argcnt);
   int argp = TypeFunc::Parms;
@@ -865,13 +885,16 @@
   fields[argp++] = TypePtr::NOTNULL;    // k array
   fields[argp++] = TypePtr::NOTNULL;    // r array
   fields[argp++] = TypeInt::INT;        // src len
+  if (Matcher::pass_original_key_for_aes()) {
+    fields[argp++] = TypePtr::NOTNULL;    // original k array
+  }
   assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
   const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
 
-  // no result type needed
+  // returning cipher len (int)
   fields = TypeTuple::fields(1);
-  fields[TypeFunc::Parms+0] = NULL; // void
-  const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
+  fields[TypeFunc::Parms+0] = TypeInt::INT;
+  const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
   return TypeFunc::make(domain, range);
 }
 
@@ -1036,7 +1059,7 @@
     }
 
     // If we are forcing an unwind because of stack overflow then deopt is
-    // irrelevant sice we are throwing the frame away anyway.
+    // irrelevant since we are throwing the frame away anyway.
 
     if (deopting && !force_unwind) {
       handler_address = SharedRuntime::deopt_blob()->unpack_with_exception();
@@ -1079,7 +1102,7 @@
 // Note we enter without the usual JRT wrapper. We will call a helper routine that
 // will do the normal VM entry. We do it this way so that we can see if the nmethod
 // we looked up the handler for has been deoptimized in the meantime. If it has been
-// we must not use the handler and instread return the deopt blob.
+// we must not use the handler and instead return the deopt blob.
 address OptoRuntime::handle_exception_C(JavaThread* thread) {
 //
 // We are in Java not VM and in debug mode we have a NoHandleMark
--- a/src/share/vm/opto/stringopts.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/stringopts.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1122,7 +1122,8 @@
 
   return kit.make_load(NULL, kit.basic_plus_adr(klass_node, field->offset_in_bytes()),
                        type, T_OBJECT,
-                       C->get_alias_index(mirror_type->add_offset(field->offset_in_bytes())));
+                       C->get_alias_index(mirror_type->add_offset(field->offset_in_bytes())),
+                       MemNode::unordered);
 }
 
 Node* PhaseStringOpts::int_stringSize(GraphKit& kit, Node* arg) {
@@ -1314,7 +1315,7 @@
     Node* ch = __ AddI(r, __ intcon('0'));
 
     Node* st = __ store_to_memory(kit.control(), kit.array_element_address(char_array, m1, T_CHAR),
-                                  ch, T_CHAR, char_adr_idx);
+                                  ch, T_CHAR, char_adr_idx, MemNode::unordered);
 
 
     IfNode* iff = kit.create_and_map_if(head, __ Bool(__ CmpI(q, __ intcon(0)), BoolTest::ne),
@@ -1356,7 +1357,7 @@
     } else {
       Node* m1 = __ SubI(charPos, __ intcon(1));
       Node* st = __ store_to_memory(kit.control(), kit.array_element_address(char_array, m1, T_CHAR),
-                                    sign, T_CHAR, char_adr_idx);
+                                    sign, T_CHAR, char_adr_idx, MemNode::unordered);
 
       final_merge->init_req(1, kit.control());
       final_mem->init_req(1, st);
@@ -1387,7 +1388,8 @@
     ciTypeArray* value_array = t->const_oop()->as_type_array();
     for (int e = 0; e < c; e++) {
       __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
-                         __ intcon(value_array->char_at(o + e)), T_CHAR, char_adr_idx);
+                         __ intcon(value_array->char_at(o + e)), T_CHAR, char_adr_idx,
+                         MemNode::unordered);
       start = __ AddI(start, __ intcon(1));
     }
   } else {
@@ -1607,7 +1609,7 @@
         }
         case StringConcat::CharMode: {
           __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
-                             arg, T_CHAR, char_adr_idx);
+                             arg, T_CHAR, char_adr_idx, MemNode::unordered);
           start = __ AddI(start, __ intcon(1));
           break;
         }
--- a/src/share/vm/opto/subnode.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/subnode.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1126,11 +1126,15 @@
   Node *cmp = in(1);
   if( !cmp->is_Sub() ) return NULL;
   int cop = cmp->Opcode();
-  if( cop == Op_FastLock || cop == Op_FastUnlock || cop == Op_FlagsProj) return NULL;
+  if( cop == Op_FastLock || cop == Op_FastUnlock) return NULL;
   Node *cmp1 = cmp->in(1);
   Node *cmp2 = cmp->in(2);
   if( !cmp1 ) return NULL;
 
+  if (_test._test == BoolTest::overflow || _test._test == BoolTest::no_overflow) {
+    return NULL;
+  }
+
   // Constant on left?
   Node *con = cmp1;
   uint op2 = cmp2->Opcode();
--- a/src/share/vm/opto/subnode.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/subnode.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/superword.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/superword.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -441,6 +441,7 @@
     return true;   // no induction variable
   }
   CountedLoopEndNode* pre_end = get_pre_loop_end(lp()->as_CountedLoop());
+  assert(pre_end != NULL, "we must have a correct pre-loop");
   assert(pre_end->stride_is_con(), "pre loop stride is constant");
   int preloop_stride = pre_end->stride_con();
 
@@ -1981,7 +1982,7 @@
   CountedLoopNode *main_head = lp()->as_CountedLoop();
   assert(main_head->is_main_loop(), "");
   CountedLoopEndNode* pre_end = get_pre_loop_end(main_head);
-  assert(pre_end != NULL, "");
+  assert(pre_end != NULL, "we must have a correct pre-loop");
   Node *pre_opaq1 = pre_end->limit();
   assert(pre_opaq1->Opcode() == Op_Opaque1, "");
   Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1;
@@ -2145,7 +2146,8 @@
   if (!p_f->is_IfFalse()) return NULL;
   if (!p_f->in(0)->is_CountedLoopEnd()) return NULL;
   CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
-  if (!pre_end->loopnode()->is_pre_loop()) return NULL;
+  CountedLoopNode* loop_node = pre_end->loopnode();
+  if (loop_node == NULL || !loop_node->is_pre_loop()) return NULL;
   return pre_end;
 }
 
--- a/src/share/vm/opto/superword.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/superword.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/opto/type.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/type.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -61,17 +61,22 @@
   { Bad,             T_ILLEGAL,    "tuple:",        false, Node::NotAMachineReg, relocInfo::none          },  // Tuple
   { Bad,             T_ARRAY,      "array:",        false, Node::NotAMachineReg, relocInfo::none          },  // Array
 
-#ifndef SPARC
+#ifdef SPARC
+  { Bad,             T_ILLEGAL,    "vectors:",      false, 0,                    relocInfo::none          },  // VectorS
+  { Bad,             T_ILLEGAL,    "vectord:",      false, Op_RegD,              relocInfo::none          },  // VectorD
+  { Bad,             T_ILLEGAL,    "vectorx:",      false, 0,                    relocInfo::none          },  // VectorX
+  { Bad,             T_ILLEGAL,    "vectory:",      false, 0,                    relocInfo::none          },  // VectorY
+#elif defined(PPC64)
+  { Bad,             T_ILLEGAL,    "vectors:",      false, 0,                    relocInfo::none          },  // VectorS
+  { Bad,             T_ILLEGAL,    "vectord:",      false, Op_RegL,              relocInfo::none          },  // VectorD
+  { Bad,             T_ILLEGAL,    "vectorx:",      false, 0,                    relocInfo::none          },  // VectorX
+  { Bad,             T_ILLEGAL,    "vectory:",      false, 0,                    relocInfo::none          },  // VectorY
+#else // all other
   { Bad,             T_ILLEGAL,    "vectors:",      false, Op_VecS,              relocInfo::none          },  // VectorS
   { Bad,             T_ILLEGAL,    "vectord:",      false, Op_VecD,              relocInfo::none          },  // VectorD
   { Bad,             T_ILLEGAL,    "vectorx:",      false, Op_VecX,              relocInfo::none          },  // VectorX
   { Bad,             T_ILLEGAL,    "vectory:",      false, Op_VecY,              relocInfo::none          },  // VectorY
-#else
-  { Bad,             T_ILLEGAL,    "vectors:",      false, 0,                    relocInfo::none          },  // VectorS
-  { Bad,             T_ILLEGAL,    "vectord:",      false, Op_RegD,              relocInfo::none          },  // VectorD
-  { Bad,             T_ILLEGAL,    "vectorx:",      false, 0,                    relocInfo::none          },  // VectorX
-  { Bad,             T_ILLEGAL,    "vectory:",      false, 0,                    relocInfo::none          },  // VectorY
-#endif // IA32 || AMD64
+#endif
   { Bad,             T_ADDRESS,    "anyptr:",       false, Op_RegP,              relocInfo::none          },  // AnyPtr
   { Bad,             T_ADDRESS,    "rawptr:",       false, Op_RegP,              relocInfo::none          },  // RawPtr
   { Bad,             T_OBJECT,     "oop:",          true,  Op_RegP,              relocInfo::oop_type      },  // OopPtr
@@ -236,6 +241,13 @@
   return !t1->eq(t2);           // Return ZERO if equal
 }
 
+const Type* Type::maybe_remove_speculative(bool include_speculative) const {
+  if (!include_speculative) {
+    return remove_speculative();
+  }
+  return this;
+}
+
 //------------------------------hash-------------------------------------------
 int Type::uhash( const Type *const t ) {
   return t->hash();
@@ -294,6 +306,7 @@
   TypeInt::POS1    = TypeInt::make(1,max_jint,   WidenMin); // Positive values
   TypeInt::INT     = TypeInt::make(min_jint,max_jint, WidenMax); // 32-bit integers
   TypeInt::SYMINT  = TypeInt::make(-max_jint,max_jint,WidenMin); // symmetric range
+  TypeInt::TYPE_DOMAIN  = TypeInt::INT;
   // CmpL is overloaded both as the bytecode computation returning
   // a trinary (-1,0,+1) integer result AND as an efficient long
   // compare returning optimizer ideal-type flags.
@@ -310,6 +323,7 @@
   TypeLong::LONG    = TypeLong::make(min_jlong,max_jlong,WidenMax); // 64-bit integers
   TypeLong::INT     = TypeLong::make((jlong)min_jint,(jlong)max_jint,WidenMin);
   TypeLong::UINT    = TypeLong::make(0,(jlong)max_juint,WidenMin);
+  TypeLong::TYPE_DOMAIN  = TypeLong::LONG;
 
   const Type **fboth =(const Type**)shared_type_arena->Amalloc_4(2*sizeof(Type*));
   fboth[0] = Type::CONTROL;
@@ -628,41 +642,44 @@
 //------------------------------meet-------------------------------------------
 // Compute the MEET of two types.  NOT virtual.  It enforces that meet is
 // commutative and the lattice is symmetric.
-const Type *Type::meet( const Type *t ) const {
+const Type *Type::meet_helper(const Type *t, bool include_speculative) const {
   if (isa_narrowoop() && t->isa_narrowoop()) {
-    const Type* result = make_ptr()->meet(t->make_ptr());
+    const Type* result = make_ptr()->meet_helper(t->make_ptr(), include_speculative);
     return result->make_narrowoop();
   }
   if (isa_narrowklass() && t->isa_narrowklass()) {
-    const Type* result = make_ptr()->meet(t->make_ptr());
+    const Type* result = make_ptr()->meet_helper(t->make_ptr(), include_speculative);
     return result->make_narrowklass();
   }
 
-  const Type *mt = xmeet(t);
+  const Type *this_t = maybe_remove_speculative(include_speculative);
+  t = t->maybe_remove_speculative(include_speculative);
+
+  const Type *mt = this_t->xmeet(t);
   if (isa_narrowoop() || t->isa_narrowoop()) return mt;
   if (isa_narrowklass() || t->isa_narrowklass()) return mt;
 #ifdef ASSERT
-  assert( mt == t->xmeet(this), "meet not commutative" );
+  assert(mt == t->xmeet(this_t), "meet not commutative");
   const Type* dual_join = mt->_dual;
   const Type *t2t    = dual_join->xmeet(t->_dual);
-  const Type *t2this = dual_join->xmeet(   _dual);
+  const Type *t2this = dual_join->xmeet(this_t->_dual);
 
   // Interface meet Oop is Not Symmetric:
   // Interface:AnyNull meet Oop:AnyNull == Interface:AnyNull
   // Interface:NotNull meet Oop:NotNull == java/lang/Object:NotNull
 
-  if( !interface_vs_oop(t) && (t2t != t->_dual || t2this != _dual) ) {
+  if( !interface_vs_oop(t) && (t2t != t->_dual || t2this != this_t->_dual) ) {
     tty->print_cr("=== Meet Not Symmetric ===");
-    tty->print("t   =                   ");         t->dump(); tty->cr();
-    tty->print("this=                   ");            dump(); tty->cr();
-    tty->print("mt=(t meet this)=       ");        mt->dump(); tty->cr();
-
-    tty->print("t_dual=                 ");  t->_dual->dump(); tty->cr();
-    tty->print("this_dual=              ");     _dual->dump(); tty->cr();
-    tty->print("mt_dual=                "); mt->_dual->dump(); tty->cr();
-
-    tty->print("mt_dual meet t_dual=    "); t2t      ->dump(); tty->cr();
-    tty->print("mt_dual meet this_dual= "); t2this   ->dump(); tty->cr();
+    tty->print("t   =                   ");              t->dump(); tty->cr();
+    tty->print("this=                   ");         this_t->dump(); tty->cr();
+    tty->print("mt=(t meet this)=       ");             mt->dump(); tty->cr();
+
+    tty->print("t_dual=                 ");       t->_dual->dump(); tty->cr();
+    tty->print("this_dual=              ");  this_t->_dual->dump(); tty->cr();
+    tty->print("mt_dual=                ");      mt->_dual->dump(); tty->cr();
+
+    tty->print("mt_dual meet t_dual=    "); t2t           ->dump(); tty->cr();
+    tty->print("mt_dual meet this_dual= "); t2this        ->dump(); tty->cr();
 
     fatal("meet not symmetric" );
   }
@@ -754,8 +771,8 @@
 }
 
 //-----------------------------filter------------------------------------------
-const Type *Type::filter( const Type *kills ) const {
-  const Type* ft = join(kills);
+const Type *Type::filter_helper(const Type *kills, bool include_speculative) const {
+  const Type* ft = join_helper(kills, include_speculative);
   if (ft->empty())
     return Type::TOP;           // Canonical empty value
   return ft;
@@ -1146,6 +1163,7 @@
 const TypeInt *TypeInt::POS1;   // Positive 32-bit integers
 const TypeInt *TypeInt::INT;    // 32-bit integers
 const TypeInt *TypeInt::SYMINT; // symmetric range [-max_jint..max_jint]
+const TypeInt *TypeInt::TYPE_DOMAIN; // alias for TypeInt::INT
 
 //------------------------------TypeInt----------------------------------------
 TypeInt::TypeInt( jint lo, jint hi, int w ) : Type(Int), _lo(lo), _hi(hi), _widen(w) {
@@ -1309,8 +1327,8 @@
 }
 
 //-----------------------------filter------------------------------------------
-const Type *TypeInt::filter( const Type *kills ) const {
-  const TypeInt* ft = join(kills)->isa_int();
+const Type *TypeInt::filter_helper(const Type *kills, bool include_speculative) const {
+  const TypeInt* ft = join_helper(kills, include_speculative)->isa_int();
   if (ft == NULL || ft->empty())
     return Type::TOP;           // Canonical empty value
   if (ft->_widen < this->_widen) {
@@ -1403,6 +1421,7 @@
 const TypeLong *TypeLong::LONG; // 64-bit integers
 const TypeLong *TypeLong::INT;  // 32-bit subrange
 const TypeLong *TypeLong::UINT; // 32-bit unsigned subrange
+const TypeLong *TypeLong::TYPE_DOMAIN; // alias for TypeLong::LONG
 
 //------------------------------TypeLong---------------------------------------
 TypeLong::TypeLong( jlong lo, jlong hi, int w ) : Type(Long), _lo(lo), _hi(hi), _widen(w) {
@@ -1570,8 +1589,8 @@
 }
 
 //-----------------------------filter------------------------------------------
-const Type *TypeLong::filter( const Type *kills ) const {
-  const TypeLong* ft = join(kills)->isa_long();
+const Type *TypeLong::filter_helper(const Type *kills, bool include_speculative) const {
+  const TypeLong* ft = join_helper(kills, include_speculative)->isa_long();
   if (ft == NULL || ft->empty())
     return Type::TOP;           // Canonical empty value
   if (ft->_widen < this->_widen) {
@@ -1726,7 +1745,7 @@
     total_fields++;
     field_array = fields(total_fields);
     // Use get_const_type here because it respects UseUniqueSubclasses:
-    field_array[pos++] = get_const_type(recv)->join(TypePtr::NOTNULL);
+    field_array[pos++] = get_const_type(recv)->join_speculative(TypePtr::NOTNULL);
   } else {
     field_array = fields(total_fields);
   }
@@ -1916,7 +1935,7 @@
 
   case Array: {                 // Meeting 2 arrays?
     const TypeAry *a = t->is_ary();
-    return TypeAry::make(_elem->meet(a->_elem),
+    return TypeAry::make(_elem->meet_speculative(a->_elem),
                          _size->xmeet(a->_size)->is_int(),
                          _stable & a->_stable);
   }
@@ -1949,6 +1968,13 @@
   return (intptr_t)_elem + (intptr_t)_size + (_stable ? 43 : 0);
 }
 
+/**
+ * Return same type without a speculative part in the element
+ */
+const Type* TypeAry::remove_speculative() const {
+  return make(_elem->remove_speculative(), _size, _stable);
+}
+
 //----------------------interface_vs_oop---------------------------------------
 #ifdef ASSERT
 bool TypeAry::interface_vs_oop(const Type *t) const {
@@ -2035,6 +2061,7 @@
   switch (Matcher::vector_ideal_reg(size)) {
   case Op_VecS:
     return (TypeVect*)(new TypeVectS(elem, length))->hashcons();
+  case Op_RegL:
   case Op_VecD:
   case Op_RegD:
     return (TypeVect*)(new TypeVectD(elem, length))->hashcons();
@@ -2436,7 +2463,7 @@
 const TypeOopPtr *TypeOopPtr::BOTTOM;
 
 //------------------------------TypeOopPtr-------------------------------------
-TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative)
+TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth)
   : TypePtr(t, ptr, offset),
     _const_oop(o), _klass(k),
     _klass_is_exact(xk),
@@ -2444,7 +2471,8 @@
     _is_ptr_to_narrowklass(false),
     _is_ptr_to_boxed_value(false),
     _instance_id(instance_id),
-    _speculative(speculative) {
+    _speculative(speculative),
+    _inline_depth(inline_depth){
   if (Compile::current()->eliminate_boxing() && (t == InstPtr) &&
       (offset > 0) && xk && (k != 0) && k->is_instance_klass()) {
     _is_ptr_to_boxed_value = k->as_instance_klass()->is_boxed_value_offset(offset);
@@ -2511,12 +2539,12 @@
 
 //------------------------------make-------------------------------------------
 const TypeOopPtr *TypeOopPtr::make(PTR ptr,
-                                   int offset, int instance_id, const TypeOopPtr* speculative) {
+                                   int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth) {
   assert(ptr != Constant, "no constant generic pointers");
   ciKlass*  k = Compile::current()->env()->Object_klass();
   bool      xk = false;
   ciObject* o = NULL;
-  return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, instance_id, speculative))->hashcons();
+  return (TypeOopPtr*)(new TypeOopPtr(OopPtr, ptr, k, xk, o, offset, instance_id, speculative, inline_depth))->hashcons();
 }
 
 
@@ -2524,7 +2552,7 @@
 const Type *TypeOopPtr::cast_to_ptr_type(PTR ptr) const {
   assert(_base == OopPtr, "subclass must override cast_to_ptr_type");
   if( ptr == _ptr ) return this;
-  return make(ptr, _offset, _instance_id, _speculative);
+  return make(ptr, _offset, _instance_id, _speculative, _inline_depth);
 }
 
 //-----------------------------cast_to_instance_id----------------------------
@@ -2560,14 +2588,14 @@
     return res;
   }
 
-  if (res->isa_oopptr() != NULL) {
+  const TypeOopPtr* res_oopptr = res->is_oopptr();
+  if (res_oopptr->speculative() != NULL) {
     // type->speculative() == NULL means that speculation is no better
     // than type, i.e. type->speculative() == type. So there are 2
     // ways to represent the fact that we have no useful speculative
     // data and we should use a single one to be able to test for
     // equality between types. Check whether type->speculative() ==
     // type and set speculative to NULL if it is the case.
-    const TypeOopPtr* res_oopptr = res->is_oopptr();
     if (res_oopptr->remove_speculative() == res_oopptr->speculative()) {
       return res_oopptr->remove_speculative();
     }
@@ -2621,7 +2649,7 @@
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
       const TypeOopPtr* speculative = _speculative;
-      return make(ptr, offset, instance_id, speculative);
+      return make(ptr, offset, instance_id, speculative, _inline_depth);
     }
     case BotPTR:
     case NotNull:
@@ -2633,8 +2661,9 @@
   case OopPtr: {                 // Meeting to other OopPtrs
     const TypeOopPtr *tp = t->is_oopptr();
     int instance_id = meet_instance_id(tp->instance_id());
-    const TypeOopPtr* speculative = meet_speculative(tp);
-    return make(meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id, speculative);
+    const TypeOopPtr* speculative = xmeet_speculative(tp);
+    int depth = meet_inline_depth(tp->inline_depth());
+    return make(meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id, speculative, depth);
   }
 
   case InstPtr:                  // For these, flip the call around to cut down
@@ -2651,7 +2680,7 @@
 const Type *TypeOopPtr::xdual() const {
   assert(klass() == Compile::current()->env()->Object_klass(), "no klasses here");
   assert(const_oop() == NULL,             "no constants here");
-  return new TypeOopPtr(_base, dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id(), dual_speculative());
+  return new TypeOopPtr(_base, dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id(), dual_speculative(), dual_inline_depth());
 }
 
 //--------------------------make_from_klass_common-----------------------------
@@ -2742,7 +2771,7 @@
     } else if (!o->should_be_constant()) {
       return TypeAryPtr::make(TypePtr::NotNull, arr0, klass, true, 0);
     }
-    const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0, InstanceBot, NULL, is_autobox_cache);
+    const TypeAryPtr* arr = TypeAryPtr::make(TypePtr::Constant, o, arr0, klass, true, 0, InstanceBot, NULL, InlineDepthBottom, is_autobox_cache);
     return arr;
   } else if (klass->is_type_array_klass()) {
     // Element is an typeArray
@@ -2787,9 +2816,9 @@
 
 //-----------------------------filter------------------------------------------
 // Do not allow interface-vs.-noninterface joins to collapse to top.
-const Type *TypeOopPtr::filter(const Type *kills) const {
-
-  const Type* ft = join(kills);
+const Type *TypeOopPtr::filter_helper(const Type *kills, bool include_speculative) const {
+
+  const Type* ft = join_helper(kills, include_speculative);
   const TypeInstPtr* ftip = ft->isa_instptr();
   const TypeInstPtr* ktip = kills->isa_instptr();
 
@@ -2831,7 +2860,8 @@
   const TypeOopPtr *a = (const TypeOopPtr*)t;
   if (_klass_is_exact != a->_klass_is_exact ||
       _instance_id != a->_instance_id ||
-      !eq_speculative(a))  return false;
+      !eq_speculative(a) ||
+      _inline_depth != a->_inline_depth)  return false;
   ciObject* one = const_oop();
   ciObject* two = a->const_oop();
   if (one == NULL || two == NULL) {
@@ -2849,6 +2879,7 @@
     _klass_is_exact +
     _instance_id +
     hash_speculative() +
+    _inline_depth +
     TypePtr::hash();
 }
 
@@ -2869,6 +2900,7 @@
   else if (_instance_id != InstanceBot)
     st->print(",iid=%d",_instance_id);
 
+  dump_inline_depth(st);
   dump_speculative(st);
 }
 
@@ -2882,6 +2914,16 @@
     st->print(")");
   }
 }
+
+void TypeOopPtr::dump_inline_depth(outputStream *st) const {
+  if (_inline_depth != InlineDepthBottom) {
+    if (_inline_depth == InlineDepthTop) {
+      st->print(" (inline_depth=InlineDepthTop)");
+    } else {
+      st->print(" (inline_depth=%d)", _inline_depth);
+    }
+  }
+}
 #endif
 
 //------------------------------singleton--------------------------------------
@@ -2895,14 +2937,62 @@
 
 //------------------------------add_offset-------------------------------------
 const TypePtr *TypeOopPtr::add_offset(intptr_t offset) const {
-  return make(_ptr, xadd_offset(offset), _instance_id, add_offset_speculative(offset));
+  return make(_ptr, xadd_offset(offset), _instance_id, add_offset_speculative(offset), _inline_depth);
 }
 
 /**
  * Return same type without a speculative part
  */
-const TypeOopPtr* TypeOopPtr::remove_speculative() const {
-  return make(_ptr, _offset, _instance_id, NULL);
+const Type* TypeOopPtr::remove_speculative() const {
+  if (_speculative == NULL) {
+    return this;
+  }
+  assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth");
+  return make(_ptr, _offset, _instance_id, NULL, _inline_depth);
+}
+
+/**
+ * Return same type but with a different inline depth (used for speculation)
+ *
+ * @param depth  depth to meet with
+ */
+const TypeOopPtr* TypeOopPtr::with_inline_depth(int depth) const {
+  if (!UseInlineDepthForSpeculativeTypes) {
+    return this;
+  }
+  return make(_ptr, _offset, _instance_id, _speculative, depth);
+}
+
+/**
+ * Check whether new profiling would improve speculative type
+ *
+ * @param   exact_kls    class from profiling
+ * @param   inline_depth inlining depth of profile point
+ *
+ * @return  true if type profile is valuable
+ */
+bool TypeOopPtr::would_improve_type(ciKlass* exact_kls, int inline_depth) const {
+  // no way to improve an already exact type
+  if (klass_is_exact()) {
+    return false;
+  }
+  // no profiling?
+  if (exact_kls == NULL) {
+    return false;
+  }
+  // no speculative type or non exact speculative type?
+  if (speculative_type() == NULL) {
+    return true;
+  }
+  // If the node already has an exact speculative type keep it,
+  // unless it was provided by profiling that is at a deeper
+  // inlining level. Profiling at a higher inlining depth is
+  // expected to be less accurate.
+  if (_speculative->inline_depth() == InlineDepthBottom) {
+    return false;
+  }
+  assert(_speculative->inline_depth() != InlineDepthTop, "can't do the comparison");
+  return inline_depth < _speculative->inline_depth();
 }
 
 //------------------------------meet_instance_id--------------------------------
@@ -2927,7 +3017,7 @@
  *
  * @param other  type to meet with
  */
-const TypeOopPtr* TypeOopPtr::meet_speculative(const TypeOopPtr* other) const {
+const TypeOopPtr* TypeOopPtr::xmeet_speculative(const TypeOopPtr* other) const {
   bool this_has_spec = (_speculative != NULL);
   bool other_has_spec = (other->speculative() != NULL);
 
@@ -2952,7 +3042,7 @@
     other_spec = other;
   }
 
-  return this_spec->meet(other_spec)->is_oopptr();
+  return this_spec->meet_speculative(other_spec)->is_oopptr();
 }
 
 /**
@@ -3005,6 +3095,21 @@
   return _speculative->hash();
 }
 
+/**
+ * dual of the inline depth for this type (used for speculation)
+ */
+int TypeOopPtr::dual_inline_depth() const {
+  return -inline_depth();
+}
+
+/**
+ * meet of 2 inline depth (used for speculation)
+ *
+ * @param depth  depth to meet with
+ */
+int TypeOopPtr::meet_inline_depth(int depth) const {
+  return MAX2(inline_depth(), depth);
+}
 
 //=============================================================================
 // Convenience common pre-built types.
@@ -3015,8 +3120,8 @@
 const TypeInstPtr *TypeInstPtr::KLASS;
 
 //------------------------------TypeInstPtr-------------------------------------
-TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off, int instance_id, const TypeOopPtr* speculative)
-  : TypeOopPtr(InstPtr, ptr, k, xk, o, off, instance_id, speculative), _name(k->name()) {
+TypeInstPtr::TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int off, int instance_id, const TypeOopPtr* speculative, int inline_depth)
+  : TypeOopPtr(InstPtr, ptr, k, xk, o, off, instance_id, speculative, inline_depth), _name(k->name()) {
    assert(k != NULL &&
           (k->is_loaded() || o == NULL),
           "cannot have constants with non-loaded klass");
@@ -3029,7 +3134,8 @@
                                      ciObject* o,
                                      int offset,
                                      int instance_id,
-                                     const TypeOopPtr* speculative) {
+                                     const TypeOopPtr* speculative,
+                                     int inline_depth) {
   assert( !k->is_loaded() || k->is_instance_klass(), "Must be for instance");
   // Either const_oop() is NULL or else ptr is Constant
   assert( (!o && ptr != Constant) || (o && ptr == Constant),
@@ -3050,7 +3156,7 @@
 
   // Now hash this baby
   TypeInstPtr *result =
-    (TypeInstPtr*)(new TypeInstPtr(ptr, k, xk, o ,offset, instance_id, speculative))->hashcons();
+    (TypeInstPtr*)(new TypeInstPtr(ptr, k, xk, o ,offset, instance_id, speculative, inline_depth))->hashcons();
 
   return result;
 }
@@ -3083,7 +3189,7 @@
   if( ptr == _ptr ) return this;
   // Reconstruct _sig info here since not a problem with later lazy
   // construction, _sig will show up on demand.
-  return make(ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, _speculative);
+  return make(ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, _speculative, _inline_depth);
 }
 
 
@@ -3095,13 +3201,13 @@
   ciInstanceKlass* ik = _klass->as_instance_klass();
   if( (ik->is_final() || _const_oop) )  return this;  // cannot clear xk
   if( ik->is_interface() )              return this;  // cannot set xk
-  return make(ptr(), klass(), klass_is_exact, const_oop(), _offset, _instance_id, _speculative);
+  return make(ptr(), klass(), klass_is_exact, const_oop(), _offset, _instance_id, _speculative, _inline_depth);
 }
 
 //-----------------------------cast_to_instance_id----------------------------
 const TypeOopPtr *TypeInstPtr::cast_to_instance_id(int instance_id) const {
   if( instance_id == _instance_id ) return this;
-  return make(_ptr, klass(), _klass_is_exact, const_oop(), _offset, instance_id, _speculative);
+  return make(_ptr, klass(), _klass_is_exact, const_oop(), _offset, instance_id, _speculative, _inline_depth);
 }
 
 //------------------------------xmeet_unloaded---------------------------------
@@ -3111,7 +3217,8 @@
     int off = meet_offset(tinst->offset());
     PTR ptr = meet_ptr(tinst->ptr());
     int instance_id = meet_instance_id(tinst->instance_id());
-    const TypeOopPtr* speculative = meet_speculative(tinst);
+    const TypeOopPtr* speculative = xmeet_speculative(tinst);
+    int depth = meet_inline_depth(tinst->inline_depth());
 
     const TypeInstPtr *loaded    = is_loaded() ? this  : tinst;
     const TypeInstPtr *unloaded  = is_loaded() ? tinst : this;
@@ -3132,7 +3239,7 @@
       assert(loaded->ptr() != TypePtr::Null, "insanity check");
       //
       if(      loaded->ptr() == TypePtr::TopPTR ) { return unloaded; }
-      else if (loaded->ptr() == TypePtr::AnyNull) { return TypeInstPtr::make(ptr, unloaded->klass(), false, NULL, off, instance_id, speculative); }
+      else if (loaded->ptr() == TypePtr::AnyNull) { return TypeInstPtr::make(ptr, unloaded->klass(), false, NULL, off, instance_id, speculative, depth); }
       else if (loaded->ptr() == TypePtr::BotPTR ) { return TypeInstPtr::BOTTOM; }
       else if (loaded->ptr() == TypePtr::Constant || loaded->ptr() == TypePtr::NotNull) {
         if (unloaded->ptr() == TypePtr::BotPTR  ) { return TypeInstPtr::BOTTOM;  }
@@ -3188,7 +3295,8 @@
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
     int instance_id = meet_instance_id(tp->instance_id());
-    const TypeOopPtr* speculative = meet_speculative(tp);
+    const TypeOopPtr* speculative = xmeet_speculative(tp);
+    int depth = meet_inline_depth(tp->inline_depth());
     switch (ptr) {
     case TopPTR:
     case AnyNull:                // Fall 'down' to dual of object klass
@@ -3196,12 +3304,12 @@
       // below the centerline when the superclass is exact. We need to
       // do the same here.
       if (klass()->equals(ciEnv::current()->Object_klass()) && !klass_is_exact()) {
-        return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id, speculative);
+        return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id, speculative, depth);
       } else {
         // cannot subclass, so the meet has to fall badly below the centerline
         ptr = NotNull;
         instance_id = InstanceBot;
-        return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id, speculative);
+        return TypeInstPtr::make( ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id, speculative, depth);
       }
     case Constant:
     case NotNull:
@@ -3216,7 +3324,7 @@
         if (klass()->equals(ciEnv::current()->Object_klass()) && !klass_is_exact()) {
           // that is, tp's array type is a subtype of my klass
           return TypeAryPtr::make(ptr, (ptr == Constant ? tp->const_oop() : NULL),
-                                  tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id, speculative);
+                                  tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id, speculative, depth);
         }
       }
       // The other case cannot happen, since I cannot be a subtype of an array.
@@ -3224,7 +3332,7 @@
       if( ptr == Constant )
          ptr = NotNull;
       instance_id = InstanceBot;
-      return make(ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id, speculative);
+      return make(ptr, ciEnv::current()->Object_klass(), false, NULL, offset, instance_id, speculative, depth);
     default: typerr(t);
     }
   }
@@ -3238,15 +3346,17 @@
     case TopPTR:
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
-      const TypeOopPtr* speculative = meet_speculative(tp);
+      const TypeOopPtr* speculative = xmeet_speculative(tp);
+      int depth = meet_inline_depth(tp->inline_depth());
       return make(ptr, klass(), klass_is_exact(),
-                  (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative);
+                  (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative, depth);
     }
     case NotNull:
     case BotPTR: {
       int instance_id = meet_instance_id(tp->instance_id());
-      const TypeOopPtr* speculative = meet_speculative(tp);
-      return TypeOopPtr::make(ptr, offset, instance_id, speculative);
+      const TypeOopPtr* speculative = xmeet_speculative(tp);
+      int depth = meet_inline_depth(tp->inline_depth());
+      return TypeOopPtr::make(ptr, offset, instance_id, speculative, depth);
     }
     default: typerr(t);
     }
@@ -3266,7 +3376,7 @@
       int instance_id = meet_instance_id(InstanceTop);
       const TypeOopPtr* speculative = _speculative;
       return make(ptr, klass(), klass_is_exact(),
-                  (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative);
+                  (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative, _inline_depth);
     }
     case NotNull:
     case BotPTR:
@@ -3297,14 +3407,15 @@
     int off = meet_offset( tinst->offset() );
     PTR ptr = meet_ptr( tinst->ptr() );
     int instance_id = meet_instance_id(tinst->instance_id());
-    const TypeOopPtr* speculative = meet_speculative(tinst);
+    const TypeOopPtr* speculative = xmeet_speculative(tinst);
+    int depth = meet_inline_depth(tinst->inline_depth());
 
     // Check for easy case; klasses are equal (and perhaps not loaded!)
     // If we have constants, then we created oops so classes are loaded
     // and we can handle the constants further down.  This case handles
     // both-not-loaded or both-loaded classes
     if (ptr != Constant && klass()->equals(tinst->klass()) && klass_is_exact() == tinst->klass_is_exact()) {
-      return make(ptr, klass(), klass_is_exact(), NULL, off, instance_id, speculative);
+      return make(ptr, klass(), klass_is_exact(), NULL, off, instance_id, speculative, depth);
     }
 
     // Classes require inspection in the Java klass hierarchy.  Must be loaded.
@@ -3368,7 +3479,7 @@
         // Find out which constant.
         o = (this_klass == klass()) ? const_oop() : tinst->const_oop();
       }
-      return make(ptr, k, xk, o, off, instance_id, speculative);
+      return make(ptr, k, xk, o, off, instance_id, speculative, depth);
     }
 
     // Either oop vs oop or interface vs interface or interface vs Object
@@ -3445,7 +3556,7 @@
         else
           ptr = NotNull;
       }
-      return make(ptr, this_klass, this_xk, o, off, instance_id, speculative);
+      return make(ptr, this_klass, this_xk, o, off, instance_id, speculative, depth);
     } // Else classes are not equal
 
     // Since klasses are different, we require a LCA in the Java
@@ -3456,7 +3567,7 @@
 
     // Now we find the LCA of Java classes
     ciKlass* k = this_klass->least_common_ancestor(tinst_klass);
-    return make(ptr, k, false, NULL, off, instance_id, speculative);
+    return make(ptr, k, false, NULL, off, instance_id, speculative, depth);
   } // End of case InstPtr
 
   } // End of switch
@@ -3480,7 +3591,7 @@
 // Dual: do NOT dual on klasses.  This means I do NOT understand the Java
 // inheritance mechanism.
 const Type *TypeInstPtr::xdual() const {
-  return new TypeInstPtr(dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id(), dual_speculative());
+  return new TypeInstPtr(dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id(), dual_speculative(), dual_inline_depth());
 }
 
 //------------------------------eq---------------------------------------------
@@ -3537,6 +3648,7 @@
   else if (_instance_id != InstanceBot)
     st->print(",iid=%d",_instance_id);
 
+  dump_inline_depth(st);
   dump_speculative(st);
 }
 #endif
@@ -3546,8 +3658,19 @@
   return make(_ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id, add_offset_speculative(offset));
 }
 
-const TypeOopPtr *TypeInstPtr::remove_speculative() const {
-  return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, NULL);
+const Type *TypeInstPtr::remove_speculative() const {
+  if (_speculative == NULL) {
+    return this;
+  }
+  assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth");
+  return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, NULL, _inline_depth);
+}
+
+const TypeOopPtr *TypeInstPtr::with_inline_depth(int depth) const {
+  if (!UseInlineDepthForSpeculativeTypes) {
+    return this;
+  }
+  return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, _speculative, depth);
 }
 
 //=============================================================================
@@ -3564,30 +3687,30 @@
 const TypeAryPtr *TypeAryPtr::DOUBLES;
 
 //------------------------------make-------------------------------------------
-const TypeAryPtr *TypeAryPtr::make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypeOopPtr* speculative) {
+const TypeAryPtr *TypeAryPtr::make(PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth) {
   assert(!(k == NULL && ary->_elem->isa_int()),
          "integral arrays must be pre-equipped with a class");
   if (!xk)  xk = ary->ary_must_be_exact();
   assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
   if (!UseExactTypes)  xk = (ptr == Constant);
-  return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false, speculative))->hashcons();
+  return (TypeAryPtr*)(new TypeAryPtr(ptr, NULL, ary, k, xk, offset, instance_id, false, speculative, inline_depth))->hashcons();
 }
 
 //------------------------------make-------------------------------------------
-const TypeAryPtr *TypeAryPtr::make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypeOopPtr* speculative, bool is_autobox_cache) {
+const TypeAryPtr *TypeAryPtr::make(PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth, bool is_autobox_cache) {
   assert(!(k == NULL && ary->_elem->isa_int()),
          "integral arrays must be pre-equipped with a class");
   assert( (ptr==Constant && o) || (ptr!=Constant && !o), "" );
   if (!xk)  xk = (o != NULL) || ary->ary_must_be_exact();
   assert(instance_id <= 0 || xk || !UseExactTypes, "instances are always exactly typed");
   if (!UseExactTypes)  xk = (ptr == Constant);
-  return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache, speculative))->hashcons();
+  return (TypeAryPtr*)(new TypeAryPtr(ptr, o, ary, k, xk, offset, instance_id, is_autobox_cache, speculative, inline_depth))->hashcons();
 }
 
 //------------------------------cast_to_ptr_type-------------------------------
 const Type *TypeAryPtr::cast_to_ptr_type(PTR ptr) const {
   if( ptr == _ptr ) return this;
-  return make(ptr, const_oop(), _ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative);
+  return make(ptr, const_oop(), _ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative, _inline_depth);
 }
 
 
@@ -3596,13 +3719,13 @@
   if( klass_is_exact == _klass_is_exact ) return this;
   if (!UseExactTypes)  return this;
   if (_ary->ary_must_be_exact())  return this;  // cannot clear xk
-  return make(ptr(), const_oop(), _ary, klass(), klass_is_exact, _offset, _instance_id, _speculative);
+  return make(ptr(), const_oop(), _ary, klass(), klass_is_exact, _offset, _instance_id, _speculative, _inline_depth);
 }
 
 //-----------------------------cast_to_instance_id----------------------------
 const TypeOopPtr *TypeAryPtr::cast_to_instance_id(int instance_id) const {
   if( instance_id == _instance_id ) return this;
-  return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id, _speculative);
+  return make(_ptr, const_oop(), _ary, klass(), _klass_is_exact, _offset, instance_id, _speculative, _inline_depth);
 }
 
 //-----------------------------narrow_size_type-------------------------------
@@ -3665,7 +3788,7 @@
   new_size = narrow_size_type(new_size);
   if (new_size == size())  return this;
   const TypeAry* new_ary = TypeAry::make(elem(), new_size, is_stable());
-  return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative);
+  return make(ptr(), const_oop(), new_ary, klass(), klass_is_exact(), _offset, _instance_id, _speculative, _inline_depth);
 }
 
 
@@ -3744,19 +3867,20 @@
     const TypeOopPtr *tp = t->is_oopptr();
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
+    int depth = meet_inline_depth(tp->inline_depth());
     switch (tp->ptr()) {
     case TopPTR:
     case AnyNull: {
       int instance_id = meet_instance_id(InstanceTop);
-      const TypeOopPtr* speculative = meet_speculative(tp);
+      const TypeOopPtr* speculative = xmeet_speculative(tp);
       return make(ptr, (ptr == Constant ? const_oop() : NULL),
-                  _ary, _klass, _klass_is_exact, offset, instance_id, speculative);
+                  _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth);
     }
     case BotPTR:
     case NotNull: {
       int instance_id = meet_instance_id(tp->instance_id());
-      const TypeOopPtr* speculative = meet_speculative(tp);
-      return TypeOopPtr::make(ptr, offset, instance_id, speculative);
+      const TypeOopPtr* speculative = xmeet_speculative(tp);
+      return TypeOopPtr::make(ptr, offset, instance_id, speculative, depth);
     }
     default: ShouldNotReachHere();
     }
@@ -3780,7 +3904,7 @@
       int instance_id = meet_instance_id(InstanceTop);
       const TypeOopPtr* speculative = _speculative;
       return make(ptr, (ptr == Constant ? const_oop() : NULL),
-                  _ary, _klass, _klass_is_exact, offset, instance_id, speculative);
+                  _ary, _klass, _klass_is_exact, offset, instance_id, speculative, _inline_depth);
     }
     default: ShouldNotReachHere();
     }
@@ -3793,10 +3917,11 @@
   case AryPtr: {                // Meeting 2 references?
     const TypeAryPtr *tap = t->is_aryptr();
     int off = meet_offset(tap->offset());
-    const TypeAry *tary = _ary->meet(tap->_ary)->is_ary();
+    const TypeAry *tary = _ary->meet_speculative(tap->_ary)->is_ary();
     PTR ptr = meet_ptr(tap->ptr());
     int instance_id = meet_instance_id(tap->instance_id());
-    const TypeOopPtr* speculative = meet_speculative(tap);
+    const TypeOopPtr* speculative = xmeet_speculative(tap);
+    int depth = meet_inline_depth(tap->inline_depth());
     ciKlass* lazy_klass = NULL;
     if (tary->_elem->isa_int()) {
       // Integral array element types have irrelevant lattice relations.
@@ -3812,17 +3937,17 @@
         tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
       }
     } else // Non integral arrays.
-    // Must fall to bottom if exact klasses in upper lattice
-    // are not equal or super klass is exact.
-    if ( above_centerline(ptr) && klass() != tap->klass() &&
-         // meet with top[] and bottom[] are processed further down:
-         tap ->_klass != NULL  && this->_klass != NULL   &&
-         // both are exact and not equal:
-        ((tap ->_klass_is_exact && this->_klass_is_exact) ||
-         // 'tap'  is exact and super or unrelated:
-         (tap ->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) ||
-         // 'this' is exact and super or unrelated:
-         (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
+      // Must fall to bottom if exact klasses in upper lattice
+      // are not equal or super klass is exact.
+      if ((above_centerline(ptr) || ptr == Constant) && klass() != tap->klass() &&
+          // meet with top[] and bottom[] are processed further down:
+          tap->_klass != NULL  && this->_klass != NULL   &&
+          // both are exact and not equal:
+          ((tap->_klass_is_exact && this->_klass_is_exact) ||
+           // 'tap'  is exact and super or unrelated:
+           (tap->_klass_is_exact && !tap->klass()->is_subtype_of(klass())) ||
+           // 'this' is exact and super or unrelated:
+           (this->_klass_is_exact && !klass()->is_subtype_of(tap->klass())))) {
       tary = TypeAry::make(Type::BOTTOM, tary->_size, tary->_stable);
       return make(NotNull, NULL, tary, lazy_klass, false, off, InstanceBot);
     }
@@ -3837,7 +3962,7 @@
       } else {
         xk = (tap->_klass_is_exact | this->_klass_is_exact);
       }
-      return make(ptr, const_oop(), tary, lazy_klass, xk, off, instance_id, speculative);
+      return make(ptr, const_oop(), tary, lazy_klass, xk, off, instance_id, speculative, depth);
     case Constant: {
       ciObject* o = const_oop();
       if( _ptr == Constant ) {
@@ -3856,7 +3981,7 @@
         // Only precise for identical arrays
         xk = this->_klass_is_exact && (klass() == tap->klass());
       }
-      return TypeAryPtr::make(ptr, o, tary, lazy_klass, xk, off, instance_id, speculative);
+      return TypeAryPtr::make(ptr, o, tary, lazy_klass, xk, off, instance_id, speculative, depth);
     }
     case NotNull:
     case BotPTR:
@@ -3865,7 +3990,7 @@
             xk = tap->_klass_is_exact;
       else  xk = (tap->_klass_is_exact & this->_klass_is_exact) &&
               (klass() == tap->klass()); // Only precise for identical arrays
-      return TypeAryPtr::make(ptr, NULL, tary, lazy_klass, xk, off, instance_id, speculative);
+      return TypeAryPtr::make(ptr, NULL, tary, lazy_klass, xk, off, instance_id, speculative, depth);
     default: ShouldNotReachHere();
     }
   }
@@ -3876,7 +4001,8 @@
     int offset = meet_offset(tp->offset());
     PTR ptr = meet_ptr(tp->ptr());
     int instance_id = meet_instance_id(tp->instance_id());
-    const TypeOopPtr* speculative = meet_speculative(tp);
+    const TypeOopPtr* speculative = xmeet_speculative(tp);
+    int depth = meet_inline_depth(tp->inline_depth());
     switch (ptr) {
     case TopPTR:
     case AnyNull:                // Fall 'down' to dual of object klass
@@ -3884,12 +4010,12 @@
       // below the centerline when the superclass is exact. We need to
       // do the same here.
       if (tp->klass()->equals(ciEnv::current()->Object_klass()) && !tp->klass_is_exact()) {
-        return TypeAryPtr::make(ptr, _ary, _klass, _klass_is_exact, offset, instance_id, speculative);
+        return TypeAryPtr::make(ptr, _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth);
       } else {
         // cannot subclass, so the meet has to fall badly below the centerline
         ptr = NotNull;
         instance_id = InstanceBot;
-        return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id, speculative);
+        return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id, speculative, depth);
       }
     case Constant:
     case NotNull:
@@ -3904,7 +4030,7 @@
         if (tp->klass()->equals(ciEnv::current()->Object_klass()) && !tp->klass_is_exact()) {
           // that is, my array type is a subtype of 'tp' klass
           return make(ptr, (ptr == Constant ? const_oop() : NULL),
-                      _ary, _klass, _klass_is_exact, offset, instance_id, speculative);
+                      _ary, _klass, _klass_is_exact, offset, instance_id, speculative, depth);
         }
       }
       // The other case cannot happen, since t cannot be a subtype of an array.
@@ -3912,7 +4038,7 @@
       if( ptr == Constant )
          ptr = NotNull;
       instance_id = InstanceBot;
-      return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id, speculative);
+      return TypeInstPtr::make(ptr, ciEnv::current()->Object_klass(), false, NULL,offset, instance_id, speculative, depth);
     default: typerr(t);
     }
   }
@@ -3923,7 +4049,7 @@
 //------------------------------xdual------------------------------------------
 // Dual: compute field-by-field dual
 const Type *TypeAryPtr::xdual() const {
-  return new TypeAryPtr(dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id(), is_autobox_cache(), dual_speculative());
+  return new TypeAryPtr(dual_ptr(), _const_oop, _ary->dual()->is_ary(),_klass, _klass_is_exact, dual_offset(), dual_instance_id(), is_autobox_cache(), dual_speculative(), dual_inline_depth());
 }
 
 //----------------------interface_vs_oop---------------------------------------
@@ -3976,6 +4102,7 @@
   else if (_instance_id != InstanceBot)
     st->print(",iid=%d",_instance_id);
 
+  dump_inline_depth(st);
   dump_speculative(st);
 }
 #endif
@@ -3987,11 +4114,22 @@
 
 //------------------------------add_offset-------------------------------------
 const TypePtr *TypeAryPtr::add_offset(intptr_t offset) const {
-  return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id, add_offset_speculative(offset));
-}
-
-const TypeOopPtr *TypeAryPtr::remove_speculative() const {
-  return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, _offset, _instance_id, NULL);
+  return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id, add_offset_speculative(offset), _inline_depth);
+}
+
+const Type *TypeAryPtr::remove_speculative() const {
+  if (_speculative == NULL) {
+    return this;
+  }
+  assert(_inline_depth == InlineDepthTop || _inline_depth == InlineDepthBottom, "non speculative type shouldn't have inline depth");
+  return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, NULL, _inline_depth);
+}
+
+const TypeOopPtr *TypeAryPtr::with_inline_depth(int depth) const {
+  if (!UseInlineDepthForSpeculativeTypes) {
+    return this;
+  }
+  return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, _speculative, depth);
 }
 
 //=============================================================================
@@ -4031,9 +4169,9 @@
 }
 
 
-const Type *TypeNarrowPtr::filter( const Type *kills ) const {
+const Type *TypeNarrowPtr::filter_helper(const Type *kills, bool include_speculative) const {
   if (isa_same_narrowptr(kills)) {
-    const Type* ft =_ptrtype->filter(is_same_narrowptr(kills)->_ptrtype);
+    const Type* ft =_ptrtype->filter_helper(is_same_narrowptr(kills)->_ptrtype, include_speculative);
     if (ft->empty())
       return Type::TOP;           // Canonical empty value
     if (ft->isa_ptr()) {
@@ -4041,7 +4179,7 @@
     }
     return ft;
   } else if (kills->isa_ptr()) {
-    const Type* ft = _ptrtype->join(kills);
+    const Type* ft = _ptrtype->join_helper(kills, include_speculative);
     if (ft->empty())
       return Type::TOP;           // Canonical empty value
     return ft;
@@ -4171,8 +4309,8 @@
 
 //-----------------------------filter------------------------------------------
 // Do not allow interface-vs.-noninterface joins to collapse to top.
-const Type *TypeMetadataPtr::filter( const Type *kills ) const {
-  const TypeMetadataPtr* ft = join(kills)->isa_metadataptr();
+const Type *TypeMetadataPtr::filter_helper(const Type *kills, bool include_speculative) const {
+  const TypeMetadataPtr* ft = join_helper(kills, include_speculative)->isa_metadataptr();
   if (ft == NULL || ft->empty())
     return Type::TOP;           // Canonical empty value
   return ft;
@@ -4374,10 +4512,10 @@
 }
 
 // Do not allow interface-vs.-noninterface joins to collapse to top.
-const Type *TypeKlassPtr::filter(const Type *kills) const {
+const Type *TypeKlassPtr::filter_helper(const Type *kills, bool include_speculative) const {
   // logic here mirrors the one from TypeOopPtr::filter. See comments
   // there.
-  const Type* ft = join(kills);
+  const Type* ft = join_helper(kills, include_speculative);
   const TypeKlassPtr* ftkp = ft->isa_klassptr();
   const TypeKlassPtr* ktkp = kills->isa_klassptr();
 
--- a/src/share/vm/opto/type.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/type.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -164,6 +164,8 @@
   virtual bool interface_vs_oop_helper(const Type *t) const;
 #endif
 
+  const Type *meet_helper(const Type *t, bool include_speculative) const;
+
 protected:
   // Each class of type is also identified by its base.
   const TYPES _base;            // Enum of Types type
@@ -171,6 +173,10 @@
   Type( TYPES t ) : _dual(NULL),  _base(t) {} // Simple types
   // ~Type();                   // Use fast deallocation
   const Type *hashcons();       // Hash-cons the type
+  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
+  const Type *join_helper(const Type *t, bool include_speculative) const {
+    return dual()->meet_helper(t->dual(), include_speculative)->dual();
+  }
 
 public:
 
@@ -202,10 +208,24 @@
   // Test for equivalence of types
   static int cmp( const Type *const t1, const Type *const t2 );
   // Test for higher or equal in lattice
-  int higher_equal( const Type *t ) const { return !cmp(meet(t),t); }
+  // Variant that drops the speculative part of the types
+  int higher_equal(const Type *t) const {
+    return !cmp(meet(t),t->remove_speculative());
+  }
+  // Variant that keeps the speculative part of the types
+  int higher_equal_speculative(const Type *t) const {
+    return !cmp(meet_speculative(t),t);
+  }
 
   // MEET operation; lower in lattice.
-  const Type *meet( const Type *t ) const;
+  // Variant that drops the speculative part of the types
+  const Type *meet(const Type *t) const {
+    return meet_helper(t, false);
+  }
+  // Variant that keeps the speculative part of the types
+  const Type *meet_speculative(const Type *t) const {
+    return meet_helper(t, true);
+  }
   // WIDEN: 'widens' for Ints and other range types
   virtual const Type *widen( const Type *old, const Type* limit ) const { return this; }
   // NARROW: complement for widen, used by pessimistic phases
@@ -221,13 +241,26 @@
 
   // JOIN operation; higher in lattice.  Done by finding the dual of the
   // meet of the dual of the 2 inputs.
-  const Type *join( const Type *t ) const {
-    return dual()->meet(t->dual())->dual(); }
+  // Variant that drops the speculative part of the types
+  const Type *join(const Type *t) const {
+    return join_helper(t, false);
+  }
+  // Variant that keeps the speculative part of the types
+  const Type *join_speculative(const Type *t) const {
+    return join_helper(t, true);
+  }
 
   // Modified version of JOIN adapted to the needs Node::Value.
   // Normalizes all empty values to TOP.  Does not kill _widen bits.
   // Currently, it also works around limitations involving interface types.
-  virtual const Type *filter( const Type *kills ) const;
+  // Variant that drops the speculative part of the types
+  const Type *filter(const Type *kills) const {
+    return filter_helper(kills, false);
+  }
+  // Variant that keeps the speculative part of the types
+  const Type *filter_speculative(const Type *kills) const {
+    return filter_helper(kills, true);
+  }
 
 #ifdef ASSERT
   // One type is interface, the other is oop
@@ -382,7 +415,14 @@
                                         bool is_autobox_cache = false);
 
   // Speculative type. See TypeInstPtr
+  virtual const TypeOopPtr* speculative() const { return NULL; }
   virtual ciKlass* speculative_type() const { return NULL; }
+  const Type* maybe_remove_speculative(bool include_speculative) const;
+  virtual const Type* remove_speculative() const { return this; }
+
+  virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const {
+    return exact_kls != NULL;
+  }
 
 private:
   // support arrays
@@ -450,12 +490,15 @@
 // upper bound, inclusive.
 class TypeInt : public Type {
   TypeInt( jint lo, jint hi, int w );
+protected:
+  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
+
 public:
+  typedef jint NativeType;
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
   virtual bool singleton(void) const;    // TRUE if type is a singleton
   virtual bool empty(void) const;        // TRUE if type is vacuous
-public:
   const jint _lo, _hi;          // Lower bound, upper bound
   const short _widen;           // Limit on times we widen this sucker
 
@@ -475,7 +518,6 @@
   virtual const Type *widen( const Type *t, const Type* limit_type ) const;
   virtual const Type *narrow( const Type *t ) const;
   // Do not kill _widen bits.
-  virtual const Type *filter( const Type *kills ) const;
   // Convenience common pre-built types.
   static const TypeInt *MINUS_1;
   static const TypeInt *ZERO;
@@ -495,6 +537,9 @@
   static const TypeInt *POS1;
   static const TypeInt *INT;
   static const TypeInt *SYMINT; // symmetric range [-max_jint..max_jint]
+  static const TypeInt *TYPE_DOMAIN; // alias for TypeInt::INT
+
+  static const TypeInt *as_self(const Type *t) { return t->is_int(); }
 #ifndef PRODUCT
   virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
 #endif
@@ -506,7 +551,11 @@
 // an upper bound, inclusive.
 class TypeLong : public Type {
   TypeLong( jlong lo, jlong hi, int w );
+protected:
+  // Do not kill _widen bits.
+  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
 public:
+  typedef jlong NativeType;
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
   virtual bool singleton(void) const;    // TRUE if type is a singleton
@@ -524,14 +573,16 @@
   bool is_con(int i) const { return is_con() && _lo == i; }
   jlong get_con() const { assert( is_con(), "" ); return _lo; }
 
+  // Check for positive 32-bit value.
+  int is_positive_int() const { return _lo >= 0 && _hi <= (jlong)max_jint; }
+
   virtual bool        is_finite() const;  // Has a finite value
 
+
   virtual const Type *xmeet( const Type *t ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
   virtual const Type *widen( const Type *t, const Type* limit_type ) const;
   virtual const Type *narrow( const Type *t ) const;
-  // Do not kill _widen bits.
-  virtual const Type *filter( const Type *kills ) const;
   // Convenience common pre-built types.
   static const TypeLong *MINUS_1;
   static const TypeLong *ZERO;
@@ -540,6 +591,11 @@
   static const TypeLong *LONG;
   static const TypeLong *INT;    // 32-bit subrange [min_jint..max_jint]
   static const TypeLong *UINT;   // 32-bit unsigned [0..max_juint]
+  static const TypeLong *TYPE_DOMAIN; // alias for TypeLong::LONG
+
+  // static convenience methods.
+  static const TypeLong *as_self(const Type *t) { return t->is_long(); }
+
 #ifndef PRODUCT
   virtual void dump2( Dict &d, uint, outputStream *st  ) const;// Specialized per-Type dumping
 #endif
@@ -622,6 +678,7 @@
   virtual const Type *xmeet( const Type *t ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
   bool ary_must_be_exact() const;  // true if arrays of such are never generic
+  virtual const Type* remove_speculative() const;
 #ifdef ASSERT
   // One type is interface, the other is oop
   virtual bool interface_vs_oop(const Type *t) const;
@@ -793,7 +850,7 @@
 // Some kind of oop (Java pointer), either klass or instance or array.
 class TypeOopPtr : public TypePtr {
 protected:
-  TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative);
+  TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth);
 public:
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
@@ -804,6 +861,10 @@
   };
 protected:
 
+  enum {
+    InlineDepthBottom = INT_MAX,
+    InlineDepthTop = -InlineDepthBottom
+  };
   // Oop is NULL, unless this is a constant oop.
   ciObject*     _const_oop;   // Constant oop
   // If _klass is NULL, then so is _sig.  This is an unloaded klass.
@@ -824,6 +885,11 @@
   // use it, then we have to emit a guard: this part of the type is
   // not something we know but something we speculate about the type.
   const TypeOopPtr*   _speculative;
+  // For speculative types, we record at what inlining depth the
+  // profiling point that provided the data is. We want to favor
+  // profile data coming from outer scopes which are likely better for
+  // the current compilation.
+  int _inline_depth;
 
   static const TypeOopPtr* make_from_klass_common(ciKlass* klass, bool klass_change, bool try_for_exact);
 
@@ -832,13 +898,22 @@
 
   // utility methods to work on the speculative part of the type
   const TypeOopPtr* dual_speculative() const;
-  const TypeOopPtr* meet_speculative(const TypeOopPtr* other) const;
+  const TypeOopPtr* xmeet_speculative(const TypeOopPtr* other) const;
   bool eq_speculative(const TypeOopPtr* other) const;
   int hash_speculative() const;
   const TypeOopPtr* add_offset_speculative(intptr_t offset) const;
 #ifndef PRODUCT
   void dump_speculative(outputStream *st) const;
 #endif
+  // utility methods to work on the inline depth of the type
+  int dual_inline_depth() const;
+  int meet_inline_depth(int depth) const;
+#ifndef PRODUCT
+  void dump_inline_depth(outputStream *st) const;
+#endif
+
+  // Do not allow interface-vs.-noninterface joins to collapse to top.
+  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
 
 public:
   // Creates a type given a klass. Correctly handles multi-dimensional arrays
@@ -866,7 +941,7 @@
                                               bool not_null_elements = false);
 
   // Make a generic (unclassed) pointer to an oop.
-  static const TypeOopPtr* make(PTR ptr, int offset, int instance_id, const TypeOopPtr* speculative);
+  static const TypeOopPtr* make(PTR ptr, int offset, int instance_id, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom);
 
   ciObject* const_oop()    const { return _const_oop; }
   virtual ciKlass* klass() const { return _klass;     }
@@ -880,7 +955,7 @@
   bool is_known_instance()       const { return _instance_id > 0; }
   int  instance_id()             const { return _instance_id; }
   bool is_known_instance_field() const { return is_known_instance() && _offset >= 0; }
-  const TypeOopPtr* speculative() const { return _speculative; }
+  virtual const TypeOopPtr* speculative() const { return _speculative; }
 
   virtual intptr_t get_con() const;
 
@@ -895,16 +970,13 @@
 
   virtual const TypePtr *add_offset( intptr_t offset ) const;
   // Return same type without a speculative part
-  virtual const TypeOopPtr* remove_speculative() const;
+  virtual const Type* remove_speculative() const;
 
   virtual const Type *xmeet(const Type *t) const;
   virtual const Type *xdual() const;    // Compute dual right now.
   // the core of the computation of the meet for TypeOopPtr and for its subclasses
   virtual const Type *xmeet_helper(const Type *t) const;
 
-  // Do not allow interface-vs.-noninterface joins to collapse to top.
-  virtual const Type *filter( const Type *kills ) const;
-
   // Convenience common pre-built type.
   static const TypeOopPtr *BOTTOM;
 #ifndef PRODUCT
@@ -916,18 +988,23 @@
     if (_speculative != NULL) {
       const TypeOopPtr* speculative = _speculative->join(this)->is_oopptr();
       if (speculative->klass_is_exact()) {
-       return speculative->klass();
+        return speculative->klass();
       }
     }
     return NULL;
   }
+  int inline_depth() const {
+    return _inline_depth;
+  }
+  virtual const TypeOopPtr* with_inline_depth(int depth) const;
+  virtual bool would_improve_type(ciKlass* exact_kls, int inline_depth) const;
 };
 
 //------------------------------TypeInstPtr------------------------------------
 // Class of Java object pointers, pointing either to non-array Java instances
 // or to a Klass* (including array klasses).
 class TypeInstPtr : public TypeOopPtr {
-  TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative);
+  TypeInstPtr(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id, const TypeOopPtr* speculative, int inline_depth);
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
 
@@ -963,7 +1040,7 @@
   }
 
   // Make a pointer to an oop.
-  static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL);
+  static const TypeInstPtr *make(PTR ptr, ciKlass* k, bool xk, ciObject* o, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom);
 
   /** Create constant type for a constant boxed value */
   const Type* get_const_boxed_value() const;
@@ -981,7 +1058,8 @@
 
   virtual const TypePtr *add_offset( intptr_t offset ) const;
   // Return same type without a speculative part
-  virtual const TypeOopPtr* remove_speculative() const;
+  virtual const Type* remove_speculative() const;
+  virtual const TypeOopPtr* with_inline_depth(int depth) const;
 
   // the core of the computation of the meet of 2 types
   virtual const Type *xmeet_helper(const Type *t) const;
@@ -1003,8 +1081,8 @@
 // Class of Java array pointers
 class TypeAryPtr : public TypeOopPtr {
   TypeAryPtr( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk,
-              int offset, int instance_id, bool is_autobox_cache, const TypeOopPtr* speculative)
-    : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id, speculative),
+              int offset, int instance_id, bool is_autobox_cache, const TypeOopPtr* speculative, int inline_depth)
+    : TypeOopPtr(AryPtr,ptr,k,xk,o,offset, instance_id, speculative, inline_depth),
     _ary(ary),
     _is_autobox_cache(is_autobox_cache)
  {
@@ -1042,9 +1120,9 @@
 
   bool is_autobox_cache() const { return _is_autobox_cache; }
 
-  static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL);
+  static const TypeAryPtr *make( PTR ptr, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom);
   // Constant pointer to array
-  static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, bool is_autobox_cache = false);
+  static const TypeAryPtr *make( PTR ptr, ciObject* o, const TypeAry *ary, ciKlass* k, bool xk, int offset, int instance_id = InstanceBot, const TypeOopPtr* speculative = NULL, int inline_depth = InlineDepthBottom, bool is_autobox_cache= false);
 
   // Return a 'ptr' version of this type
   virtual const Type *cast_to_ptr_type(PTR ptr) const;
@@ -1059,7 +1137,8 @@
   virtual bool empty(void) const;        // TRUE if type is vacuous
   virtual const TypePtr *add_offset( intptr_t offset ) const;
   // Return same type without a speculative part
-  virtual const TypeOopPtr* remove_speculative() const;
+  virtual const Type* remove_speculative() const;
+  virtual const TypeOopPtr* with_inline_depth(int depth) const;
 
   // the core of the computation of the meet of 2 types
   virtual const Type *xmeet_helper(const Type *t) const;
@@ -1100,6 +1179,8 @@
 class TypeMetadataPtr : public TypePtr {
 protected:
   TypeMetadataPtr(PTR ptr, ciMetadata* metadata, int offset);
+  // Do not allow interface-vs.-noninterface joins to collapse to top.
+  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
 public:
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
@@ -1125,9 +1206,6 @@
 
   virtual intptr_t get_con() const;
 
-  // Do not allow interface-vs.-noninterface joins to collapse to top.
-  virtual const Type *filter( const Type *kills ) const;
-
   // Convenience common pre-built types.
   static const TypeMetadataPtr *BOTTOM;
 
@@ -1141,6 +1219,8 @@
 class TypeKlassPtr : public TypePtr {
   TypeKlassPtr( PTR ptr, ciKlass* klass, int offset );
 
+protected:
+  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
  public:
   virtual bool eq( const Type *t ) const;
   virtual int hash() const;             // Type specific hashing
@@ -1202,9 +1282,6 @@
 
   virtual intptr_t get_con() const;
 
-  // Do not allow interface-vs.-noninterface joins to collapse to top.
-  virtual const Type *filter( const Type *kills ) const;
-
   // Convenience common pre-built types.
   static const TypeKlassPtr* OBJECT; // Not-null object klass or below
   static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
@@ -1228,6 +1305,8 @@
   virtual const TypeNarrowPtr *is_same_narrowptr(const Type *t) const = 0;
   virtual const TypeNarrowPtr *make_same_narrowptr(const TypePtr *t) const = 0;
   virtual const TypeNarrowPtr *make_hash_same_narrowptr(const TypePtr *t) const = 0;
+  // Do not allow interface-vs.-noninterface joins to collapse to top.
+  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
 public:
   virtual bool eq( const Type *t ) const;
   virtual int  hash() const;             // Type specific hashing
@@ -1238,9 +1317,6 @@
 
   virtual intptr_t get_con() const;
 
-  // Do not allow interface-vs.-noninterface joins to collapse to top.
-  virtual const Type *filter( const Type *kills ) const;
-
   virtual bool empty(void) const;        // TRUE if type is vacuous
 
   // returns the equivalent ptr type for this compressed pointer
@@ -1291,6 +1367,10 @@
   static const TypeNarrowOop *BOTTOM;
   static const TypeNarrowOop *NULL_PTR;
 
+  virtual const Type* remove_speculative() const {
+    return make(_ptrtype->remove_speculative()->is_ptr());
+  }
+
 #ifndef PRODUCT
   virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
 #endif
--- a/src/share/vm/opto/vectornode.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/opto/vectornode.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -356,7 +356,7 @@
 class LoadVectorNode : public LoadNode {
  public:
   LoadVectorNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeVect* vt)
-    : LoadNode(c, mem, adr, at, vt) {
+    : LoadNode(c, mem, adr, at, vt, MemNode::unordered) {
     init_class_id(Class_LoadVector);
   }
 
@@ -380,7 +380,7 @@
 class StoreVectorNode : public StoreNode {
  public:
   StoreVectorNode(Node* c, Node* mem, Node* adr, const TypePtr* at, Node* val)
-    : StoreNode(c, mem, adr, at, val) {
+    : StoreNode(c, mem, adr, at, val, MemNode::unordered) {
     assert(val->is_Vector() || val->is_LoadVector(), "sanity");
     init_class_id(Class_StoreVector);
   }
--- a/src/share/vm/precompiled/precompiled.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/precompiled/precompiled.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/forte.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/forte.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,7 +71,7 @@
 // Native interfaces for use by Forte tools.
 
 
-#ifndef IA64
+#if !defined(IA64) && !defined(PPC64)
 
 class vframeStreamForte : public vframeStreamCommon {
  public:
@@ -613,7 +613,7 @@
 #ifdef __APPLE__
 // XXXDARWIN: Link errors occur even when __attribute__((weak_import))
 // is added
-#define collector_func_load(x0,x1,x2,x3,x4,x5,x6) (0)
+#define collector_func_load(x0,x1,x2,x3,x4,x5,x6) ((void) 0)
 #else
 void    collector_func_load(char* name,
                             void* null_argument_1,
@@ -629,16 +629,16 @@
 #endif // !_WINDOWS
 
 } // end extern "C"
-#endif // !IA64
+#endif // !IA64 && !PPC64
 
 void Forte::register_stub(const char* name, address start, address end) {
-#if !defined(_WINDOWS) && !defined(IA64)
+#if !defined(_WINDOWS) && !defined(IA64) && !defined(PPC64)
   assert(pointer_delta(end, start, sizeof(jbyte)) < INT_MAX,
          "Code size exceeds maximum range");
 
   collector_func_load((char*)name, NULL, NULL, start,
     pointer_delta(end, start, sizeof(jbyte)), 0, NULL);
-#endif // !_WINDOWS && !IA64
+#endif // !_WINDOWS && !IA64 && !PPC64
 }
 
 #else // INCLUDE_JVMTI
--- a/src/share/vm/prims/jni.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jni.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -117,33 +117,6 @@
 //   return_value = 5;
 //   return return_value;
 // JNI_END
-#ifndef USDT2
-#define DT_RETURN_MARK_DECL(name, type)                                    \
-  HS_DTRACE_PROBE_DECL1(hotspot_jni, name##__return, type);                \
-  DTRACE_ONLY(                                                             \
-    class DTraceReturnProbeMark_##name {                                   \
-     public:                                                               \
-      const type& _ret_ref;                                                \
-      DTraceReturnProbeMark_##name(const type& v) : _ret_ref(v) {}         \
-      ~DTraceReturnProbeMark_##name() {                                    \
-        HS_DTRACE_PROBE1(hotspot_jni, name##__return, _ret_ref);           \
-      }                                                                    \
-    }                                                                      \
-  )
-// Void functions are simpler since there's no return value
-#define DT_VOID_RETURN_MARK_DECL(name)                                     \
-  HS_DTRACE_PROBE_DECL0(hotspot_jni, name##__return);                      \
-  DTRACE_ONLY(                                                             \
-    class DTraceReturnProbeMark_##name {                                   \
-     public:                                                               \
-      ~DTraceReturnProbeMark_##name() {                                    \
-        HS_DTRACE_PROBE0(hotspot_jni, name##__return);                     \
-      }                                                                    \
-    }                                                                      \
-  )
-
-#else /* USDT2 */
-
 #define DT_RETURN_MARK_DECL(name, type, probe)                             \
   DTRACE_ONLY(                                                             \
     class DTraceReturnProbeMark_##name {                                   \
@@ -165,7 +138,6 @@
       }                                                                    \
     }                                                                      \
   )
-#endif /* USDT2 */
 
 // Place these macros in the function to mark the return.  Non-void
 // functions need the type and address of the return value.
@@ -194,15 +166,9 @@
 
 // Choose DT_RETURN_MARK macros  based on the type: float/double -> void
 // (dtrace doesn't do FP yet)
-#ifndef USDT2
-#define DT_RETURN_MARK_DECL_FOR(TypeName, name, type) \
-  FP_SELECT(TypeName, \
-    DT_RETURN_MARK_DECL(name, type), DT_VOID_RETURN_MARK_DECL(name) )
-#else /* USDT2 */
 #define DT_RETURN_MARK_DECL_FOR(TypeName, name, type, probe)    \
   FP_SELECT(TypeName, \
     DT_RETURN_MARK_DECL(name, type, probe), DT_VOID_RETURN_MARK_DECL(name, probe) )
-#endif /* USDT2 */
 #define DT_RETURN_MARK_FOR(TypeName, name, type, ref) \
   FP_SELECT(TypeName, \
     DT_RETURN_MARK(name, type, ref), DT_VOID_RETURN_MARK(name) )
@@ -361,24 +327,16 @@
 
 // Implementation of JNI entries
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(DefineClass, jclass);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(DefineClass, jclass
                     , HOTSPOT_JNI_DEFINECLASS_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jclass, jni_DefineClass(JNIEnv *env, const char *name, jobject loaderRef,
                                   const jbyte *buf, jsize bufLen))
   JNIWrapper("DefineClass");
 
-#ifndef USDT2
-  DTRACE_PROBE5(hotspot_jni, DefineClass__entry,
-    env, name, loaderRef, buf, bufLen);
-#else /* USDT2 */
   HOTSPOT_JNI_DEFINECLASS_ENTRY(
     env, (char*) name, loaderRef, (char*) buf, bufLen);
-#endif /* USDT2 */
+
   jclass cls = NULL;
   DT_RETURN_MARK(DefineClass, jclass, (const jclass&)cls);
 
@@ -424,21 +382,13 @@
 
 static bool first_time_FindClass = true;
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(FindClass, jclass);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(FindClass, jclass
                     , HOTSPOT_JNI_FINDCLASS_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jclass, jni_FindClass(JNIEnv *env, const char *name))
   JNIWrapper("FindClass");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, FindClass__entry, env, name);
-#else /* USDT2 */
-  HOTSPOT_JNI_FINDCLASS_ENTRY(
-                              env, (char *)name);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_FINDCLASS_ENTRY(env, (char *)name);
 
   jclass result = NULL;
   DT_RETURN_MARK(FindClass, jclass, (const jclass&)result);
@@ -502,21 +452,14 @@
   return result;
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(FromReflectedMethod, jmethodID);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(FromReflectedMethod, jmethodID
                     , HOTSPOT_JNI_FROMREFLECTEDMETHOD_RETURN((uintptr_t)_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jmethodID, jni_FromReflectedMethod(JNIEnv *env, jobject method))
   JNIWrapper("FromReflectedMethod");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, FromReflectedMethod__entry, env, method);
-#else /* USDT2 */
-  HOTSPOT_JNI_FROMREFLECTEDMETHOD_ENTRY(
-                                        env, method);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_FROMREFLECTEDMETHOD_ENTRY(env, method);
+
   jmethodID ret = NULL;
   DT_RETURN_MARK(FromReflectedMethod, jmethodID, (const jmethodID&)ret);
 
@@ -543,21 +486,14 @@
   return ret;
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(FromReflectedField, jfieldID);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(FromReflectedField, jfieldID
                     , HOTSPOT_JNI_FROMREFLECTEDFIELD_RETURN((uintptr_t)_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jfieldID, jni_FromReflectedField(JNIEnv *env, jobject field))
   JNIWrapper("FromReflectedField");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, FromReflectedField__entry, env, field);
-#else /* USDT2 */
-  HOTSPOT_JNI_FROMREFLECTEDFIELD_ENTRY(
-                                       env, field);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_FROMREFLECTEDFIELD_ENTRY(env, field);
+
   jfieldID ret = NULL;
   DT_RETURN_MARK(FromReflectedField, jfieldID, (const jfieldID&)ret);
 
@@ -592,21 +528,15 @@
   return ret;
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(ToReflectedMethod, jobject);
-#else /* USDT2 */
+
 DT_RETURN_MARK_DECL(ToReflectedMethod, jobject
                     , HOTSPOT_JNI_TOREFLECTEDMETHOD_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jobject, jni_ToReflectedMethod(JNIEnv *env, jclass cls, jmethodID method_id, jboolean isStatic))
   JNIWrapper("ToReflectedMethod");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, ToReflectedMethod__entry, env, cls, method_id, isStatic);
-#else /* USDT2 */
-  HOTSPOT_JNI_TOREFLECTEDMETHOD_ENTRY(
-                                      env, cls, (uintptr_t) method_id, isStatic);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_TOREFLECTEDMETHOD_ENTRY(env, cls, (uintptr_t) method_id, isStatic);
+
   jobject ret = NULL;
   DT_RETURN_MARK(ToReflectedMethod, jobject, (const jobject&)ret);
 
@@ -622,21 +552,14 @@
   return ret;
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(GetSuperclass, jclass);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(GetSuperclass, jclass
                     , HOTSPOT_JNI_GETSUPERCLASS_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jclass, jni_GetSuperclass(JNIEnv *env, jclass sub))
   JNIWrapper("GetSuperclass");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, GetSuperclass__entry, env, sub);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETSUPERCLASS_ENTRY(
-                                  env, sub);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_GETSUPERCLASS_ENTRY(env, sub);
+
   jclass obj = NULL;
   DT_RETURN_MARK(GetSuperclass, jclass, (const jclass&)obj);
 
@@ -665,23 +588,16 @@
 
 JNI_QUICK_ENTRY(jboolean, jni_IsAssignableFrom(JNIEnv *env, jclass sub, jclass super))
   JNIWrapper("IsSubclassOf");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, IsAssignableFrom__entry, env, sub, super);
-#else /* USDT2 */
-  HOTSPOT_JNI_ISASSIGNABLEFROM_ENTRY(
-                                     env, sub, super);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_ISASSIGNABLEFROM_ENTRY(env, sub, super);
+
   oop sub_mirror   = JNIHandles::resolve_non_null(sub);
   oop super_mirror = JNIHandles::resolve_non_null(super);
   if (java_lang_Class::is_primitive(sub_mirror) ||
       java_lang_Class::is_primitive(super_mirror)) {
     jboolean ret = (sub_mirror == super_mirror);
-#ifndef USDT2
-    DTRACE_PROBE1(hotspot_jni, IsAssignableFrom__return, ret);
-#else /* USDT2 */
-    HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(
-                                        ret);
-#endif /* USDT2 */
+
+    HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(ret);
     return ret;
   }
   Klass* sub_klass   = java_lang_Class::as_Klass(sub_mirror);
@@ -689,30 +605,20 @@
   assert(sub_klass != NULL && super_klass != NULL, "invalid arguments to jni_IsAssignableFrom");
   jboolean ret = sub_klass->is_subtype_of(super_klass) ?
                    JNI_TRUE : JNI_FALSE;
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, IsAssignableFrom__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(
-                                      ret);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(ret);
   return ret;
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(Throw, jint);
-#else /* USDT2 */
+
 DT_RETURN_MARK_DECL(Throw, jint
                     , HOTSPOT_JNI_THROW_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jint, jni_Throw(JNIEnv *env, jthrowable obj))
   JNIWrapper("Throw");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, Throw__entry, env, obj);
-#else /* USDT2 */
-  HOTSPOT_JNI_THROW_ENTRY(
- env, obj);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_THROW_ENTRY(env, obj);
+
   jint ret = JNI_OK;
   DT_RETURN_MARK(Throw, jint, (const jint&)ret);
 
@@ -720,21 +626,15 @@
   ShouldNotReachHere();
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(ThrowNew, jint);
-#else /* USDT2 */
+
 DT_RETURN_MARK_DECL(ThrowNew, jint
                     , HOTSPOT_JNI_THROWNEW_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jint, jni_ThrowNew(JNIEnv *env, jclass clazz, const char *message))
   JNIWrapper("ThrowNew");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, ThrowNew__entry, env, clazz, message);
-#else /* USDT2 */
-  HOTSPOT_JNI_THROWNEW_ENTRY(
-                             env, clazz, (char *) message);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_THROWNEW_ENTRY(env, clazz, (char *) message);
+
   jint ret = JNI_OK;
   DT_RETURN_MARK(ThrowNew, jint, (const jint&)ret);
 
@@ -763,33 +663,23 @@
 
 JNI_ENTRY_NO_PRESERVE(jthrowable, jni_ExceptionOccurred(JNIEnv *env))
   JNIWrapper("ExceptionOccurred");
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, ExceptionOccurred__entry, env);
-#else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONOCCURRED_ENTRY(
-                                      env);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_EXCEPTIONOCCURRED_ENTRY(env);
+
   jni_check_async_exceptions(thread);
   oop exception = thread->pending_exception();
   jthrowable ret = (jthrowable) JNIHandles::make_local(env, exception);
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, ExceptionOccurred__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONOCCURRED_RETURN(
-                                       ret);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_EXCEPTIONOCCURRED_RETURN(ret);
   return ret;
 JNI_END
 
 
 JNI_ENTRY_NO_PRESERVE(void, jni_ExceptionDescribe(JNIEnv *env))
   JNIWrapper("ExceptionDescribe");
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, ExceptionDescribe__entry, env);
-#else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONDESCRIBE_ENTRY(
-                                      env);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_EXCEPTIONDESCRIBE_ENTRY(env);
+
   if (thread->has_pending_exception()) {
     Handle ex(thread, thread->pending_exception());
     thread->clear_pending_exception();
@@ -825,23 +715,15 @@
       }
     }
   }
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, ExceptionDescribe__return);
-#else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONDESCRIBE_RETURN(
-                                       );
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_EXCEPTIONDESCRIBE_RETURN();
 JNI_END
 
 
 JNI_QUICK_ENTRY(void, jni_ExceptionClear(JNIEnv *env))
   JNIWrapper("ExceptionClear");
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, ExceptionClear__entry, env);
-#else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONCLEAR_ENTRY(
-                                   env);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_EXCEPTIONCLEAR_ENTRY(env);
 
   // The jni code might be using this API to clear java thrown exception.
   // So just mark jvmti thread exception state as exception caught.
@@ -850,23 +732,16 @@
     state->set_exception_caught();
   }
   thread->clear_pending_exception();
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, ExceptionClear__return);
-#else /* USDT2 */
-  HOTSPOT_JNI_EXCEPTIONCLEAR_RETURN(
-                                    );
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_EXCEPTIONCLEAR_RETURN();
 JNI_END
 
 
 JNI_ENTRY(void, jni_FatalError(JNIEnv *env, const char *msg))
   JNIWrapper("FatalError");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, FatalError__entry, env, msg);
-#else /* USDT2 */
-  HOTSPOT_JNI_FATALERROR_ENTRY(
-                               env, (char *) msg);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_FATALERROR_ENTRY(env, (char *) msg);
+
   tty->print_cr("FATAL ERROR in native method: %s", msg);
   thread->print_stack();
   os::abort(); // Dump core and abort
@@ -875,20 +750,12 @@
 
 JNI_ENTRY(jint, jni_PushLocalFrame(JNIEnv *env, jint capacity))
   JNIWrapper("PushLocalFrame");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, PushLocalFrame__entry, env, capacity);
-#else /* USDT2 */
-  HOTSPOT_JNI_PUSHLOCALFRAME_ENTRY(
-                                   env, capacity);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_PUSHLOCALFRAME_ENTRY(env, capacity);
+
   //%note jni_11
   if (capacity < 0 || capacity > MAX_REASONABLE_LOCAL_CAPACITY) {
-#ifndef USDT2
-    DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, JNI_ERR);
-#else /* USDT2 */
-    HOTSPOT_JNI_PUSHLOCALFRAME_RETURN(
-                                      (uint32_t)JNI_ERR);
-#endif /* USDT2 */
+    HOTSPOT_JNI_PUSHLOCALFRAME_RETURN((uint32_t)JNI_ERR);
     return JNI_ERR;
   }
   JNIHandleBlock* old_handles = thread->active_handles();
@@ -897,24 +764,16 @@
   new_handles->set_pop_frame_link(old_handles);
   thread->set_active_handles(new_handles);
   jint ret = JNI_OK;
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_PUSHLOCALFRAME_RETURN(
-                                    ret);
-#endif /* USDT2 */
+  HOTSPOT_JNI_PUSHLOCALFRAME_RETURN(ret);
   return ret;
 JNI_END
 
 
 JNI_ENTRY(jobject, jni_PopLocalFrame(JNIEnv *env, jobject result))
   JNIWrapper("PopLocalFrame");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, PopLocalFrame__entry, env, result);
-#else /* USDT2 */
-  HOTSPOT_JNI_POPLOCALFRAME_ENTRY(
-                                  env, result);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_POPLOCALFRAME_ENTRY(env, result);
+
   //%note jni_11
   Handle result_handle(thread, JNIHandles::resolve(result));
   JNIHandleBlock* old_handles = thread->active_handles();
@@ -929,141 +788,91 @@
     JNIHandleBlock::release_block(old_handles, thread); // may block
     result = JNIHandles::make_local(thread, result_handle());
   }
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, PopLocalFrame__return, result);
-#else /* USDT2 */
-  HOTSPOT_JNI_POPLOCALFRAME_RETURN(
-                                   result);
-#endif /* USDT2 */
+  HOTSPOT_JNI_POPLOCALFRAME_RETURN(result);
   return result;
 JNI_END
 
 
 JNI_ENTRY(jobject, jni_NewGlobalRef(JNIEnv *env, jobject ref))
   JNIWrapper("NewGlobalRef");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, NewGlobalRef__entry, env, ref);
-#else /* USDT2 */
-  HOTSPOT_JNI_NEWGLOBALREF_ENTRY(
-                                 env, ref);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_NEWGLOBALREF_ENTRY(env, ref);
+
   Handle ref_handle(thread, JNIHandles::resolve(ref));
   jobject ret = JNIHandles::make_global(ref_handle);
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, NewGlobalRef__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_NEWGLOBALREF_RETURN(
-                                  ret);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_NEWGLOBALREF_RETURN(ret);
   return ret;
 JNI_END
 
 // Must be JNI_ENTRY (with HandleMark)
 JNI_ENTRY_NO_PRESERVE(void, jni_DeleteGlobalRef(JNIEnv *env, jobject ref))
   JNIWrapper("DeleteGlobalRef");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, DeleteGlobalRef__entry, env, ref);
-#else /* USDT2 */
-  HOTSPOT_JNI_DELETEGLOBALREF_ENTRY(
-                                    env, ref);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_DELETEGLOBALREF_ENTRY(env, ref);
+
   JNIHandles::destroy_global(ref);
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, DeleteGlobalRef__return);
-#else /* USDT2 */
-  HOTSPOT_JNI_DELETEGLOBALREF_RETURN(
-                                     );
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_DELETEGLOBALREF_RETURN();
 JNI_END
 
 JNI_QUICK_ENTRY(void, jni_DeleteLocalRef(JNIEnv *env, jobject obj))
   JNIWrapper("DeleteLocalRef");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, DeleteLocalRef__entry, env, obj);
-#else /* USDT2 */
-  HOTSPOT_JNI_DELETELOCALREF_ENTRY(
-                                   env, obj);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_DELETELOCALREF_ENTRY(env, obj);
+
   JNIHandles::destroy_local(obj);
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, DeleteLocalRef__return);
-#else /* USDT2 */
-  HOTSPOT_JNI_DELETELOCALREF_RETURN(
-                                    );
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_DELETELOCALREF_RETURN();
 JNI_END
 
 JNI_QUICK_ENTRY(jboolean, jni_IsSameObject(JNIEnv *env, jobject r1, jobject r2))
   JNIWrapper("IsSameObject");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, IsSameObject__entry, env, r1, r2);
-#else /* USDT2 */
-  HOTSPOT_JNI_ISSAMEOBJECT_ENTRY(
-                                 env, r1, r2);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_ISSAMEOBJECT_ENTRY(env, r1, r2);
+
   oop a = JNIHandles::resolve(r1);
   oop b = JNIHandles::resolve(r2);
   jboolean ret = (a == b) ? JNI_TRUE : JNI_FALSE;
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, IsSameObject__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_ISSAMEOBJECT_RETURN(
-                                  ret);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_ISSAMEOBJECT_RETURN(ret);
   return ret;
 JNI_END
 
 
 JNI_ENTRY(jobject, jni_NewLocalRef(JNIEnv *env, jobject ref))
   JNIWrapper("NewLocalRef");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, NewLocalRef__entry, env, ref);
-#else /* USDT2 */
-  HOTSPOT_JNI_NEWLOCALREF_ENTRY(
-                                env, ref);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_NEWLOCALREF_ENTRY(env, ref);
+
   jobject ret = JNIHandles::make_local(env, JNIHandles::resolve(ref));
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, NewLocalRef__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_NEWLOCALREF_RETURN(
-                                 ret);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_NEWLOCALREF_RETURN(ret);
   return ret;
 JNI_END
 
 JNI_LEAF(jint, jni_EnsureLocalCapacity(JNIEnv *env, jint capacity))
   JNIWrapper("EnsureLocalCapacity");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, EnsureLocalCapacity__entry, env, capacity);
-#else /* USDT2 */
-  HOTSPOT_JNI_ENSURELOCALCAPACITY_ENTRY(
-                                        env, capacity);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_ENSURELOCALCAPACITY_ENTRY(env, capacity);
+
   jint ret;
   if (capacity >= 0 && capacity <= MAX_REASONABLE_LOCAL_CAPACITY) {
     ret = JNI_OK;
   } else {
     ret = JNI_ERR;
   }
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, EnsureLocalCapacity__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_ENSURELOCALCAPACITY_RETURN(
-                                         ret);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_ENSURELOCALCAPACITY_RETURN(ret);
   return ret;
 JNI_END
 
 // Return the Handle Type
 JNI_LEAF(jobjectRefType, jni_GetObjectRefType(JNIEnv *env, jobject obj))
   JNIWrapper("GetObjectRefType");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, GetObjectRefType__entry, env, obj);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY(
-                                     env, obj);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY(env, obj);
+
   jobjectRefType ret;
   if (JNIHandles::is_local_handle(thread, obj) ||
       JNIHandles::is_frame_handle(thread, obj))
@@ -1074,12 +883,8 @@
     ret = JNIWeakGlobalRefType;
   else
     ret = JNIInvalidRefType;
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetObjectRefType__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN(
-                                      (void *) ret);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN((void *) ret);
   return ret;
 JNI_END
 
@@ -1359,9 +1164,13 @@
       // interface call
       KlassHandle h_holder(THREAD, holder);
 
-      int itbl_index = m->itable_index();
-      Klass* k = h_recv->klass();
-      selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
+      if (call_type == JNI_VIRTUAL) {
+        int itbl_index = m->itable_index();
+        Klass* k = h_recv->klass();
+        selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
+      } else {
+        selected_method = m;
+      }
     }
   }
 
@@ -1394,28 +1203,24 @@
 
 static instanceOop alloc_object(jclass clazz, TRAPS) {
   KlassHandle k(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz)));
+  if (k == NULL) {
+    ResourceMark rm(THREAD);
+    THROW_(vmSymbols::java_lang_InstantiationException(), NULL);
+  }
   k()->check_valid_for_instantiation(false, CHECK_NULL);
   InstanceKlass::cast(k())->initialize(CHECK_NULL);
   instanceOop ih = InstanceKlass::cast(k())->allocate_instance(THREAD);
   return ih;
 }
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(AllocObject, jobject);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(AllocObject, jobject
                     , HOTSPOT_JNI_ALLOCOBJECT_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jobject, jni_AllocObject(JNIEnv *env, jclass clazz))
   JNIWrapper("AllocObject");
 
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, AllocObject__entry, env, clazz);
-#else /* USDT2 */
-  HOTSPOT_JNI_ALLOCOBJECT_ENTRY(
-                                env, clazz);
-#endif /* USDT2 */
+  HOTSPOT_JNI_ALLOCOBJECT_ENTRY(env, clazz);
+
   jobject ret = NULL;
   DT_RETURN_MARK(AllocObject, jobject, (const jobject&)ret);
 
@@ -1424,21 +1229,14 @@
   return ret;
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(NewObjectA, jobject);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(NewObjectA, jobject
                     , HOTSPOT_JNI_NEWOBJECTA_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jobject, jni_NewObjectA(JNIEnv *env, jclass clazz, jmethodID methodID, const jvalue *args))
   JNIWrapper("NewObjectA");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, NewObjectA__entry, env, clazz, methodID);
-#else /* USDT2 */
-  HOTSPOT_JNI_NEWOBJECTA_ENTRY(
-                               env, clazz, (uintptr_t) methodID);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_NEWOBJECTA_ENTRY(env, clazz, (uintptr_t) methodID);
+
   jobject obj = NULL;
   DT_RETURN_MARK(NewObjectA, jobject, (const jobject)obj);
 
@@ -1450,21 +1248,15 @@
   return obj;
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(NewObjectV, jobject);
-#else /* USDT2 */
+
 DT_RETURN_MARK_DECL(NewObjectV, jobject
                     , HOTSPOT_JNI_NEWOBJECTV_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jobject, jni_NewObjectV(JNIEnv *env, jclass clazz, jmethodID methodID, va_list args))
   JNIWrapper("NewObjectV");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, NewObjectV__entry, env, clazz, methodID);
-#else /* USDT2 */
-  HOTSPOT_JNI_NEWOBJECTV_ENTRY(
-                               env, clazz, (uintptr_t) methodID);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_NEWOBJECTV_ENTRY(env, clazz, (uintptr_t) methodID);
+
   jobject obj = NULL;
   DT_RETURN_MARK(NewObjectV, jobject, (const jobject&)obj);
 
@@ -1476,21 +1268,15 @@
   return obj;
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(NewObject, jobject);
-#else /* USDT2 */
+
 DT_RETURN_MARK_DECL(NewObject, jobject
                     , HOTSPOT_JNI_NEWOBJECT_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jobject, jni_NewObject(JNIEnv *env, jclass clazz, jmethodID methodID, ...))
   JNIWrapper("NewObject");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, NewObject__entry, env, clazz, methodID);
-#else /* USDT2 */
-  HOTSPOT_JNI_NEWOBJECT_ENTRY(
-                              env, clazz, (uintptr_t) methodID);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_NEWOBJECT_ENTRY(env, clazz, (uintptr_t) methodID);
+
   jobject obj = NULL;
   DT_RETURN_MARK(NewObject, jobject, (const jobject&)obj);
 
@@ -1508,32 +1294,22 @@
 
 JNI_ENTRY(jclass, jni_GetObjectClass(JNIEnv *env, jobject obj))
   JNIWrapper("GetObjectClass");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, GetObjectClass__entry, env, obj);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETOBJECTCLASS_ENTRY(
-                                   env, obj);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_GETOBJECTCLASS_ENTRY(env, obj);
+
   Klass* k = JNIHandles::resolve_non_null(obj)->klass();
   jclass ret =
     (jclass) JNIHandles::make_local(env, k->java_mirror());
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetObjectClass__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETOBJECTCLASS_RETURN(
-                                    ret);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_GETOBJECTCLASS_RETURN(ret);
   return ret;
 JNI_END
 
 JNI_QUICK_ENTRY(jboolean, jni_IsInstanceOf(JNIEnv *env, jobject obj, jclass clazz))
   JNIWrapper("IsInstanceOf");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, IsInstanceOf__entry, env, obj, clazz);
-#else /* USDT2 */
-  HOTSPOT_JNI_ISINSTANCEOF_ENTRY(
-                                 env, obj, clazz);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_ISINSTANCEOF_ENTRY(env, obj, clazz);
+
   jboolean ret = JNI_TRUE;
   if (obj != NULL) {
     ret = JNI_FALSE;
@@ -1543,12 +1319,8 @@
       ret = JNIHandles::resolve_non_null(obj)->is_a(k) ? JNI_TRUE : JNI_FALSE;
     }
   }
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, IsInstanceOf__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_ISINSTANCEOF_RETURN(
-                                  ret);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_ISINSTANCEOF_RETURN(ret);
   return ret;
 JNI_END
 
@@ -1608,19 +1380,9 @@
 JNI_ENTRY(jmethodID, jni_GetMethodID(JNIEnv *env, jclass clazz,
           const char *name, const char *sig))
   JNIWrapper("GetMethodID");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, GetMethodID__entry, env, clazz, name, sig);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETMETHODID_ENTRY(
-                                env, clazz, (char *) name, (char *) sig);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETMETHODID_ENTRY(env, clazz, (char *) name, (char *) sig);
   jmethodID ret = get_method_id(env, clazz, name, sig, false, thread);
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetMethodID__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETMETHODID_RETURN(
-                                 (uintptr_t) ret);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETMETHODID_RETURN((uintptr_t) ret);
   return ret;
 JNI_END
 
@@ -1628,19 +1390,9 @@
 JNI_ENTRY(jmethodID, jni_GetStaticMethodID(JNIEnv *env, jclass clazz,
           const char *name, const char *sig))
   JNIWrapper("GetStaticMethodID");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, GetStaticMethodID__entry, env, clazz, name, sig);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETSTATICMETHODID_ENTRY(
-                                      env, (char *) clazz, (char *) name, (char *)sig);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETSTATICMETHODID_ENTRY(env, (char *) clazz, (char *) name, (char *)sig);
   jmethodID ret = get_method_id(env, clazz, name, sig, true, thread);
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetStaticMethodID__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETSTATICMETHODID_RETURN(
-                                       (uintptr_t) ret);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETSTATICMETHODID_RETURN((uintptr_t) ret);
   return ret;
 JNI_END
 
@@ -1650,82 +1402,6 @@
 // Calling Methods
 //
 
-#ifndef USDT2
-#define DEFINE_CALLMETHOD(ResultType, Result, Tag) \
-\
-  DT_RETURN_MARK_DECL_FOR(Result, Call##Result##Method, ResultType);\
-  DT_RETURN_MARK_DECL_FOR(Result, Call##Result##MethodV, ResultType);\
-  DT_RETURN_MARK_DECL_FOR(Result, Call##Result##MethodA, ResultType);\
-\
-JNI_ENTRY(ResultType, \
-          jni_Call##Result##Method(JNIEnv *env, jobject obj, jmethodID methodID, ...)) \
-  JNIWrapper("Call" XSTR(Result) "Method"); \
-\
-  DTRACE_PROBE3(hotspot_jni, Call##Result##Method__entry, env, obj, methodID); \
-  ResultType ret = 0;\
-  DT_RETURN_MARK_FOR(Result, Call##Result##Method, ResultType, \
-                     (const ResultType&)ret);\
-\
-  va_list args; \
-  va_start(args, methodID); \
-  JavaValue jvalue(Tag); \
-  JNI_ArgumentPusherVaArg ap(methodID, args); \
-  jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK_0); \
-  va_end(args); \
-  ret = jvalue.get_##ResultType(); \
-  return ret;\
-JNI_END \
-\
-\
-JNI_ENTRY(ResultType, \
-          jni_Call##Result##MethodV(JNIEnv *env, jobject obj, jmethodID methodID, va_list args)) \
-  JNIWrapper("Call" XSTR(Result) "MethodV"); \
-\
-  DTRACE_PROBE3(hotspot_jni, Call##Result##MethodV__entry, env, obj, methodID); \
-  ResultType ret = 0;\
-  DT_RETURN_MARK_FOR(Result, Call##Result##MethodV, ResultType, \
-                     (const ResultType&)ret);\
-\
-  JavaValue jvalue(Tag); \
-  JNI_ArgumentPusherVaArg ap(methodID, args); \
-  jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK_0); \
-  ret = jvalue.get_##ResultType(); \
-  return ret;\
-JNI_END \
-\
-\
-JNI_ENTRY(ResultType, \
-          jni_Call##Result##MethodA(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args)) \
-  JNIWrapper("Call" XSTR(Result) "MethodA"); \
-  DTRACE_PROBE3(hotspot_jni, Call##Result##MethodA__entry, env, obj, methodID); \
-  ResultType ret = 0;\
-  DT_RETURN_MARK_FOR(Result, Call##Result##MethodA, ResultType, \
-                     (const ResultType&)ret);\
-\
-  JavaValue jvalue(Tag); \
-  JNI_ArgumentPusherArray ap(methodID, args); \
-  jni_invoke_nonstatic(env, &jvalue, obj, JNI_VIRTUAL, methodID, &ap, CHECK_0); \
-  ret = jvalue.get_##ResultType(); \
-  return ret;\
-JNI_END
-
-// the runtime type of subword integral basic types is integer
-DEFINE_CALLMETHOD(jboolean, Boolean, T_BOOLEAN)
-DEFINE_CALLMETHOD(jbyte,    Byte,    T_BYTE)
-DEFINE_CALLMETHOD(jchar,    Char,    T_CHAR)
-DEFINE_CALLMETHOD(jshort,   Short,   T_SHORT)
-
-DEFINE_CALLMETHOD(jobject,  Object,  T_OBJECT)
-DEFINE_CALLMETHOD(jint,     Int,     T_INT)
-DEFINE_CALLMETHOD(jlong,    Long,    T_LONG)
-DEFINE_CALLMETHOD(jfloat,   Float,   T_FLOAT)
-DEFINE_CALLMETHOD(jdouble,  Double,  T_DOUBLE)
-
-DT_VOID_RETURN_MARK_DECL(CallVoidMethod);
-DT_VOID_RETURN_MARK_DECL(CallVoidMethodV);
-DT_VOID_RETURN_MARK_DECL(CallVoidMethodA);
-
-#else /* USDT2 */
 
 #define DEFINE_CALLMETHOD(ResultType, Result, Tag \
                           , EntryProbe, ReturnProbe)    \
@@ -1807,34 +1483,34 @@
 
 // the runtime type of subword integral basic types is integer
 DEFINE_CALLMETHODV(jboolean, Boolean, T_BOOLEAN
-                  , HOTSPOT_JNI_CALLBOOLEANMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLBOOLEANMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLBOOLEANMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLBOOLEANMETHODV_RETURN(_ret_ref))
 DEFINE_CALLMETHODV(jbyte,    Byte,    T_BYTE
-                  , HOTSPOT_JNI_CALLBYTEMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLBYTEMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLBYTEMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLBYTEMETHODV_RETURN(_ret_ref))
 DEFINE_CALLMETHODV(jchar,    Char,    T_CHAR
-                  , HOTSPOT_JNI_CALLCHARMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLCHARMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLCHARMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLCHARMETHODV_RETURN(_ret_ref))
 DEFINE_CALLMETHODV(jshort,   Short,   T_SHORT
-                  , HOTSPOT_JNI_CALLSHORTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLSHORTMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLSHORTMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLSHORTMETHODV_RETURN(_ret_ref))
 
 DEFINE_CALLMETHODV(jobject,  Object,  T_OBJECT
-                  , HOTSPOT_JNI_CALLOBJECTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLOBJECTMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLOBJECTMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLOBJECTMETHODV_RETURN(_ret_ref))
 DEFINE_CALLMETHODV(jint,     Int,     T_INT,
-                  HOTSPOT_JNI_CALLINTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLINTMETHOD_RETURN(_ret_ref))
+                  HOTSPOT_JNI_CALLINTMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLINTMETHODV_RETURN(_ret_ref))
 DEFINE_CALLMETHODV(jlong,    Long,    T_LONG
-                  , HOTSPOT_JNI_CALLLONGMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLLONGMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLLONGMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLLONGMETHODV_RETURN(_ret_ref))
 // Float and double probes don't return value because dtrace doesn't currently support it
 DEFINE_CALLMETHODV(jfloat,   Float,   T_FLOAT
-                  , HOTSPOT_JNI_CALLFLOATMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLFLOATMETHOD_RETURN())
+                  , HOTSPOT_JNI_CALLFLOATMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLFLOATMETHODV_RETURN())
 DEFINE_CALLMETHODV(jdouble,  Double,  T_DOUBLE
-                  , HOTSPOT_JNI_CALLDOUBLEMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLDOUBLEMETHOD_RETURN())
+                  , HOTSPOT_JNI_CALLDOUBLEMETHODV_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLDOUBLEMETHODV_RETURN())
 
 #define DEFINE_CALLMETHODA(ResultType, Result, Tag \
                           , EntryProbe, ReturnProbe)    \
@@ -1859,49 +1535,43 @@
 
 // the runtime type of subword integral basic types is integer
 DEFINE_CALLMETHODA(jboolean, Boolean, T_BOOLEAN
-                  , HOTSPOT_JNI_CALLBOOLEANMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLBOOLEANMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLBOOLEANMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLBOOLEANMETHODA_RETURN(_ret_ref))
 DEFINE_CALLMETHODA(jbyte,    Byte,    T_BYTE
-                  , HOTSPOT_JNI_CALLBYTEMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLBYTEMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLBYTEMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLBYTEMETHODA_RETURN(_ret_ref))
 DEFINE_CALLMETHODA(jchar,    Char,    T_CHAR
-                  , HOTSPOT_JNI_CALLCHARMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLCHARMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLCHARMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLCHARMETHODA_RETURN(_ret_ref))
 DEFINE_CALLMETHODA(jshort,   Short,   T_SHORT
-                  , HOTSPOT_JNI_CALLSHORTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLSHORTMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLSHORTMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLSHORTMETHODA_RETURN(_ret_ref))
 
 DEFINE_CALLMETHODA(jobject,  Object,  T_OBJECT
-                  , HOTSPOT_JNI_CALLOBJECTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLOBJECTMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLOBJECTMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLOBJECTMETHODA_RETURN(_ret_ref))
 DEFINE_CALLMETHODA(jint,     Int,     T_INT,
-                  HOTSPOT_JNI_CALLINTMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLINTMETHOD_RETURN(_ret_ref))
+                  HOTSPOT_JNI_CALLINTMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLINTMETHODA_RETURN(_ret_ref))
 DEFINE_CALLMETHODA(jlong,    Long,    T_LONG
-                  , HOTSPOT_JNI_CALLLONGMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLLONGMETHOD_RETURN(_ret_ref))
+                  , HOTSPOT_JNI_CALLLONGMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLLONGMETHODA_RETURN(_ret_ref))
 // Float and double probes don't return value because dtrace doesn't currently support it
 DEFINE_CALLMETHODA(jfloat,   Float,   T_FLOAT
-                  , HOTSPOT_JNI_CALLFLOATMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLFLOATMETHOD_RETURN())
+                  , HOTSPOT_JNI_CALLFLOATMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLFLOATMETHODA_RETURN())
 DEFINE_CALLMETHODA(jdouble,  Double,  T_DOUBLE
-                  , HOTSPOT_JNI_CALLDOUBLEMETHOD_ENTRY(env, obj, (uintptr_t)methodID),
-                  HOTSPOT_JNI_CALLDOUBLEMETHOD_RETURN())
+                  , HOTSPOT_JNI_CALLDOUBLEMETHODA_ENTRY(env, obj, (uintptr_t)methodID),
+                  HOTSPOT_JNI_CALLDOUBLEMETHODA_RETURN())
 
 DT_VOID_RETURN_MARK_DECL(CallVoidMethod, HOTSPOT_JNI_CALLVOIDMETHOD_RETURN());
 DT_VOID_RETURN_MARK_DECL(CallVoidMethodV, HOTSPOT_JNI_CALLVOIDMETHODV_RETURN());
 DT_VOID_RETURN_MARK_DECL(CallVoidMethodA, HOTSPOT_JNI_CALLVOIDMETHODA_RETURN());
 
-#endif /* USDT2 */
 
 JNI_ENTRY(void, jni_CallVoidMethod(JNIEnv *env, jobject obj, jmethodID methodID, ...))
   JNIWrapper("CallVoidMethod");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, CallVoidMethod__entry, env, obj, methodID);
-#else /* USDT2 */
-  HOTSPOT_JNI_CALLVOIDMETHOD_ENTRY(
-                                   env, obj, (uintptr_t) methodID);
-#endif /* USDT2 */
+  HOTSPOT_JNI_CALLVOIDMETHOD_ENTRY(env, obj, (uintptr_t) methodID);
   DT_VOID_RETURN_MARK(CallVoidMethod);
 
   va_list args;
@@ -1915,12 +1585,7 @@
 
 JNI_ENTRY(void, jni_CallVoidMethodV(JNIEnv *env, jobject obj, jmethodID methodID, va_list args))
   JNIWrapper("CallVoidMethodV");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, CallVoidMethodV__entry, env, obj, methodID);
-#else /* USDT2 */
-  HOTSPOT_JNI_CALLVOIDMETHODV_ENTRY(
-                                    env, obj, (uintptr_t) methodID);
-#endif /* USDT2 */
+  HOTSPOT_JNI_CALLVOIDMETHODV_ENTRY(env, obj, (uintptr_t) methodID);
   DT_VOID_RETURN_MARK(CallVoidMethodV);
 
   JavaValue jvalue(T_VOID);
@@ -1931,12 +1596,7 @@
 
 JNI_ENTRY(void, jni_CallVoidMethodA(JNIEnv *env, jobject obj, jmethodID methodID, const jvalue *args))
   JNIWrapper("CallVoidMethodA");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, CallVoidMethodA__entry, env, obj, methodID);
-#else /* USDT2 */
-  HOTSPOT_JNI_CALLVOIDMETHODA_ENTRY(
-                                    env, obj, (uintptr_t) methodID);
-#endif /* USDT2 */
+  HOTSPOT_JNI_CALLVOIDMETHODA_ENTRY(env, obj, (uintptr_t) methodID);
   DT_VOID_RETURN_MARK(CallVoidMethodA);
 
   JavaValue jvalue(T_VOID);
@@ -1945,80 +1605,6 @@
 JNI_END
 
 
-#ifndef USDT2
-#define DEFINE_CALLNONVIRTUALMETHOD(ResultType, Result, Tag) \
-\
-  DT_RETURN_MARK_DECL_FOR(Result, CallNonvirtual##Result##Method, ResultType);\
-  DT_RETURN_MARK_DECL_FOR(Result, CallNonvirtual##Result##MethodV, ResultType);\
-  DT_RETURN_MARK_DECL_FOR(Result, CallNonvirtual##Result##MethodA, ResultType);\
-\
-JNI_ENTRY(ResultType, \
-          jni_CallNonvirtual##Result##Method(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, ...)) \
-  JNIWrapper("CallNonvitual" XSTR(Result) "Method"); \
-\
-  DTRACE_PROBE4(hotspot_jni, CallNonvirtual##Result##Method__entry, env, obj, cls, methodID);\
-  ResultType ret;\
-  DT_RETURN_MARK_FOR(Result, CallNonvirtual##Result##Method, ResultType, \
-                     (const ResultType&)ret);\
-\
-  va_list args; \
-  va_start(args, methodID); \
-  JavaValue jvalue(Tag); \
-  JNI_ArgumentPusherVaArg ap(methodID, args); \
-  jni_invoke_nonstatic(env, &jvalue, obj, JNI_NONVIRTUAL, methodID, &ap, CHECK_0); \
-  va_end(args); \
-  ret = jvalue.get_##ResultType(); \
-  return ret;\
-JNI_END \
-\
-JNI_ENTRY(ResultType, \
-          jni_CallNonvirtual##Result##MethodV(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, va_list args)) \
-  JNIWrapper("CallNonvitual" XSTR(Result) "#MethodV"); \
-  DTRACE_PROBE4(hotspot_jni, CallNonvirtual##Result##MethodV__entry, env, obj, cls, methodID);\
-  ResultType ret;\
-  DT_RETURN_MARK_FOR(Result, CallNonvirtual##Result##MethodV, ResultType, \
-                     (const ResultType&)ret);\
-\
-  JavaValue jvalue(Tag); \
-  JNI_ArgumentPusherVaArg ap(methodID, args); \
-  jni_invoke_nonstatic(env, &jvalue, obj, JNI_NONVIRTUAL, methodID, &ap, CHECK_0); \
-  ret = jvalue.get_##ResultType(); \
-  return ret;\
-JNI_END \
-\
-JNI_ENTRY(ResultType, \
-          jni_CallNonvirtual##Result##MethodA(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, const jvalue *args)) \
-  JNIWrapper("CallNonvitual" XSTR(Result) "MethodA"); \
-  DTRACE_PROBE4(hotspot_jni, CallNonvirtual##Result##MethodA__entry, env, obj, cls, methodID);\
-  ResultType ret;\
-  DT_RETURN_MARK_FOR(Result, CallNonvirtual##Result##MethodA, ResultType, \
-                     (const ResultType&)ret);\
-\
-  JavaValue jvalue(Tag); \
-  JNI_ArgumentPusherArray ap(methodID, args); \
-  jni_invoke_nonstatic(env, &jvalue, obj, JNI_NONVIRTUAL, methodID, &ap, CHECK_0); \
-  ret = jvalue.get_##ResultType(); \
-  return ret;\
-JNI_END
-
-// the runtime type of subword integral basic types is integer
-DEFINE_CALLNONVIRTUALMETHOD(jboolean, Boolean, T_BOOLEAN)
-DEFINE_CALLNONVIRTUALMETHOD(jbyte,    Byte,    T_BYTE)
-DEFINE_CALLNONVIRTUALMETHOD(jchar,    Char,    T_CHAR)
-DEFINE_CALLNONVIRTUALMETHOD(jshort,   Short,   T_SHORT)
-
-DEFINE_CALLNONVIRTUALMETHOD(jobject,  Object,  T_OBJECT)
-DEFINE_CALLNONVIRTUALMETHOD(jint,     Int,     T_INT)
-DEFINE_CALLNONVIRTUALMETHOD(jlong,    Long,    T_LONG)
-DEFINE_CALLNONVIRTUALMETHOD(jfloat,   Float,   T_FLOAT)
-DEFINE_CALLNONVIRTUALMETHOD(jdouble,  Double,  T_DOUBLE)
-
-
-DT_VOID_RETURN_MARK_DECL(CallNonvirtualVoidMethod);
-DT_VOID_RETURN_MARK_DECL(CallNonvirtualVoidMethodV);
-DT_VOID_RETURN_MARK_DECL(CallNonvirtualVoidMethodA);
-
-#else /* USDT2 */
 
 #define DEFINE_CALLNONVIRTUALMETHOD(ResultType, Result, Tag \
                                     , EntryProbe, ReturnProbe)      \
@@ -2188,18 +1774,11 @@
                          , HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODV_RETURN());
 DT_VOID_RETURN_MARK_DECL(CallNonvirtualVoidMethodA
                          , HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODA_RETURN());
-#endif /* USDT2 */
 
 JNI_ENTRY(void, jni_CallNonvirtualVoidMethod(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, ...))
   JNIWrapper("CallNonvirtualVoidMethod");
 
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethod__entry,
-               env, obj, cls, methodID);
-#else /* USDT2 */
-  HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_ENTRY(
-               env, obj, cls, (uintptr_t) methodID);
-#endif /* USDT2 */
+  HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_ENTRY(env, obj, cls, (uintptr_t) methodID);
   DT_VOID_RETURN_MARK(CallNonvirtualVoidMethod);
 
   va_list args;
@@ -2214,13 +1793,8 @@
 JNI_ENTRY(void, jni_CallNonvirtualVoidMethodV(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, va_list args))
   JNIWrapper("CallNonvirtualVoidMethodV");
 
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethodV__entry,
-               env, obj, cls, methodID);
-#else /* USDT2 */
   HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODV_ENTRY(
                env, obj, cls, (uintptr_t) methodID);
-#endif /* USDT2 */
   DT_VOID_RETURN_MARK(CallNonvirtualVoidMethodV);
 
   JavaValue jvalue(T_VOID);
@@ -2231,13 +1805,8 @@
 
 JNI_ENTRY(void, jni_CallNonvirtualVoidMethodA(JNIEnv *env, jobject obj, jclass cls, jmethodID methodID, const jvalue *args))
   JNIWrapper("CallNonvirtualVoidMethodA");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, CallNonvirtualVoidMethodA__entry,
-                env, obj, cls, methodID);
-#else /* USDT2 */
   HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODA_ENTRY(
                 env, obj, cls, (uintptr_t) methodID);
-#endif /* USDT2 */
   DT_VOID_RETURN_MARK(CallNonvirtualVoidMethodA);
   JavaValue jvalue(T_VOID);
   JNI_ArgumentPusherArray ap(methodID, args);
@@ -2245,80 +1814,6 @@
 JNI_END
 
 
-#ifndef USDT2
-#define DEFINE_CALLSTATICMETHOD(ResultType, Result, Tag) \
-\
-  DT_RETURN_MARK_DECL_FOR(Result, CallStatic##Result##Method, ResultType);\
-  DT_RETURN_MARK_DECL_FOR(Result, CallStatic##Result##MethodV, ResultType);\
-  DT_RETURN_MARK_DECL_FOR(Result, CallStatic##Result##MethodA, ResultType);\
-\
-JNI_ENTRY(ResultType, \
-          jni_CallStatic##Result##Method(JNIEnv *env, jclass cls, jmethodID methodID, ...)) \
-  JNIWrapper("CallStatic" XSTR(Result) "Method"); \
-\
-  DTRACE_PROBE3(hotspot_jni, CallStatic##Result##Method__entry, env, cls, methodID);\
-  ResultType ret = 0;\
-  DT_RETURN_MARK_FOR(Result, CallStatic##Result##Method, ResultType, \
-                     (const ResultType&)ret);\
-\
-  va_list args; \
-  va_start(args, methodID); \
-  JavaValue jvalue(Tag); \
-  JNI_ArgumentPusherVaArg ap(methodID, args); \
-  jni_invoke_static(env, &jvalue, NULL, JNI_STATIC, methodID, &ap, CHECK_0); \
-  va_end(args); \
-  ret = jvalue.get_##ResultType(); \
-  return ret;\
-JNI_END \
-\
-JNI_ENTRY(ResultType, \
-          jni_CallStatic##Result##MethodV(JNIEnv *env, jclass cls, jmethodID methodID, va_list args)) \
-  JNIWrapper("CallStatic" XSTR(Result) "MethodV"); \
-  DTRACE_PROBE3(hotspot_jni, CallStatic##Result##MethodV__entry, env, cls, methodID);\
-  ResultType ret = 0;\
-  DT_RETURN_MARK_FOR(Result, CallStatic##Result##MethodV, ResultType, \
-                     (const ResultType&)ret);\
-\
-  JavaValue jvalue(Tag); \
-  JNI_ArgumentPusherVaArg ap(methodID, args); \
-  jni_invoke_static(env, &jvalue, NULL, JNI_STATIC, methodID, &ap, CHECK_0); \
-  ret = jvalue.get_##ResultType(); \
-  return ret;\
-JNI_END \
-\
-JNI_ENTRY(ResultType, \
-          jni_CallStatic##Result##MethodA(JNIEnv *env, jclass cls, jmethodID methodID, const jvalue *args)) \
-  JNIWrapper("CallStatic" XSTR(Result) "MethodA"); \
-  DTRACE_PROBE3(hotspot_jni, CallStatic##Result##MethodA__entry, env, cls, methodID);\
-  ResultType ret = 0;\
-  DT_RETURN_MARK_FOR(Result, CallStatic##Result##MethodA, ResultType, \
-                     (const ResultType&)ret);\
-\
-  JavaValue jvalue(Tag); \
-  JNI_ArgumentPusherArray ap(methodID, args); \
-  jni_invoke_static(env, &jvalue, NULL, JNI_STATIC, methodID, &ap, CHECK_0); \
-  ret = jvalue.get_##ResultType(); \
-  return ret;\
-JNI_END
-
-// the runtime type of subword integral basic types is integer
-DEFINE_CALLSTATICMETHOD(jboolean, Boolean, T_BOOLEAN)
-DEFINE_CALLSTATICMETHOD(jbyte,    Byte,    T_BYTE)
-DEFINE_CALLSTATICMETHOD(jchar,    Char,    T_CHAR)
-DEFINE_CALLSTATICMETHOD(jshort,   Short,   T_SHORT)
-
-DEFINE_CALLSTATICMETHOD(jobject,  Object,  T_OBJECT)
-DEFINE_CALLSTATICMETHOD(jint,     Int,     T_INT)
-DEFINE_CALLSTATICMETHOD(jlong,    Long,    T_LONG)
-DEFINE_CALLSTATICMETHOD(jfloat,   Float,   T_FLOAT)
-DEFINE_CALLSTATICMETHOD(jdouble,  Double,  T_DOUBLE)
-
-
-DT_VOID_RETURN_MARK_DECL(CallStaticVoidMethod);
-DT_VOID_RETURN_MARK_DECL(CallStaticVoidMethodV);
-DT_VOID_RETURN_MARK_DECL(CallStaticVoidMethodA);
-
-#else /* USDT2 */
 
 #define DEFINE_CALLSTATICMETHOD(ResultType, Result, Tag \
                                 , EntryProbe, ResultProbe) \
@@ -2492,16 +1987,10 @@
                          , HOTSPOT_JNI_CALLSTATICVOIDMETHODV_RETURN());
 DT_VOID_RETURN_MARK_DECL(CallStaticVoidMethodA
                          , HOTSPOT_JNI_CALLSTATICVOIDMETHODA_RETURN());
-#endif /* USDT2 */
 
 JNI_ENTRY(void, jni_CallStaticVoidMethod(JNIEnv *env, jclass cls, jmethodID methodID, ...))
   JNIWrapper("CallStaticVoidMethod");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethod__entry, env, cls, methodID);
-#else /* USDT2 */
-  HOTSPOT_JNI_CALLSTATICVOIDMETHOD_ENTRY(
-                                         env, cls, (uintptr_t) methodID);
-#endif /* USDT2 */
+  HOTSPOT_JNI_CALLSTATICVOIDMETHOD_ENTRY(env, cls, (uintptr_t) methodID);
   DT_VOID_RETURN_MARK(CallStaticVoidMethod);
 
   va_list args;
@@ -2515,12 +2004,7 @@
 
 JNI_ENTRY(void, jni_CallStaticVoidMethodV(JNIEnv *env, jclass cls, jmethodID methodID, va_list args))
   JNIWrapper("CallStaticVoidMethodV");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethodV__entry, env, cls, methodID);
-#else /* USDT2 */
-  HOTSPOT_JNI_CALLSTATICVOIDMETHODV_ENTRY(
-                                          env, cls, (uintptr_t) methodID);
-#endif /* USDT2 */
+  HOTSPOT_JNI_CALLSTATICVOIDMETHODV_ENTRY(env, cls, (uintptr_t) methodID);
   DT_VOID_RETURN_MARK(CallStaticVoidMethodV);
 
   JavaValue jvalue(T_VOID);
@@ -2531,12 +2015,7 @@
 
 JNI_ENTRY(void, jni_CallStaticVoidMethodA(JNIEnv *env, jclass cls, jmethodID methodID, const jvalue *args))
   JNIWrapper("CallStaticVoidMethodA");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, CallStaticVoidMethodA__entry, env, cls, methodID);
-#else /* USDT2 */
-  HOTSPOT_JNI_CALLSTATICVOIDMETHODA_ENTRY(
-                                          env, cls, (uintptr_t) methodID);
-#endif /* USDT2 */
+  HOTSPOT_JNI_CALLSTATICVOIDMETHODA_ENTRY(env, cls, (uintptr_t) methodID);
   DT_VOID_RETURN_MARK(CallStaticVoidMethodA);
 
   JavaValue jvalue(T_VOID);
@@ -2550,22 +2029,13 @@
 //
 
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(GetFieldID, jfieldID);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(GetFieldID, jfieldID
                     , HOTSPOT_JNI_GETFIELDID_RETURN((uintptr_t)_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jfieldID, jni_GetFieldID(JNIEnv *env, jclass clazz,
           const char *name, const char *sig))
   JNIWrapper("GetFieldID");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, GetFieldID__entry, env, clazz, name, sig);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETFIELDID_ENTRY(
-                               env, clazz, (char *) name, (char *) sig);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETFIELDID_ENTRY(env, clazz, (char *) name, (char *) sig);
   jfieldID ret = 0;
   DT_RETURN_MARK(GetFieldID, jfieldID, (const jfieldID&)ret);
 
@@ -2597,12 +2067,7 @@
 
 JNI_ENTRY(jobject, jni_GetObjectField(JNIEnv *env, jobject obj, jfieldID fieldID))
   JNIWrapper("GetObjectField");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, GetObjectField__entry, env, obj, fieldID);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETOBJECTFIELD_ENTRY(
-                                   env, obj, (uintptr_t) fieldID);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETOBJECTFIELD_ENTRY(env, obj, (uintptr_t) fieldID);
   oop o = JNIHandles::resolve_non_null(obj);
   Klass* k = o->klass();
   int offset = jfieldIDWorkaround::from_instance_jfieldID(k, fieldID);
@@ -2632,51 +2097,11 @@
     }
   }
 #endif // INCLUDE_ALL_GCS
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetObjectField__return, ret);
-#else /* USDT2 */
-HOTSPOT_JNI_GETOBJECTFIELD_RETURN(
-                                  ret);
-#endif /* USDT2 */
+HOTSPOT_JNI_GETOBJECTFIELD_RETURN(ret);
   return ret;
 JNI_END
 
 
-#ifndef USDT2
-#define DEFINE_GETFIELD(Return,Fieldname,Result) \
-\
-  DT_RETURN_MARK_DECL_FOR(Result, Get##Result##Field, Return);\
-\
-JNI_QUICK_ENTRY(Return, jni_Get##Result##Field(JNIEnv *env, jobject obj, jfieldID fieldID)) \
-  JNIWrapper("Get" XSTR(Result) "Field"); \
-\
-  DTRACE_PROBE3(hotspot_jni, Get##Result##Field__entry, env, obj, fieldID);\
-  Return ret = 0;\
-  DT_RETURN_MARK_FOR(Result, Get##Result##Field, Return, (const Return&)ret);\
-\
-  oop o = JNIHandles::resolve_non_null(obj); \
-  Klass* k = o->klass(); \
-  int offset = jfieldIDWorkaround::from_instance_jfieldID(k, fieldID);  \
-  /* Keep JVMTI addition small and only check enabled flag here.       */ \
-  /* jni_GetField_probe_nh() assumes that is not okay to create handles */ \
-  /* and creates a ResetNoHandleMark.                                   */ \
-  if (JvmtiExport::should_post_field_access()) { \
-    o = JvmtiExport::jni_GetField_probe_nh(thread, obj, o, k, fieldID, false); \
-  } \
-  ret = o->Fieldname##_field(offset); \
-  return ret; \
-JNI_END
-
-DEFINE_GETFIELD(jboolean, bool,   Boolean)
-DEFINE_GETFIELD(jbyte,    byte,   Byte)
-DEFINE_GETFIELD(jchar,    char,   Char)
-DEFINE_GETFIELD(jshort,   short,  Short)
-DEFINE_GETFIELD(jint,     int,    Int)
-DEFINE_GETFIELD(jlong,    long,   Long)
-DEFINE_GETFIELD(jfloat,   float,  Float)
-DEFINE_GETFIELD(jdouble,  double, Double)
-
-#else /* USDT2 */
 
 #define DEFINE_GETFIELD(Return,Fieldname,Result \
   , EntryProbe, ReturnProbe) \
@@ -2729,7 +2154,6 @@
 DEFINE_GETFIELD(jdouble,  double, Double
                 , HOTSPOT_JNI_GETDOUBLEFIELD_ENTRY(env, obj, (uintptr_t)fieldID),
                 HOTSPOT_JNI_GETDOUBLEFIELD_RETURN())
-#endif /* USDT2 */
 
 address jni_GetBooleanField_addr() {
   return (address)jni_GetBooleanField;
@@ -2758,12 +2182,7 @@
 
 JNI_QUICK_ENTRY(void, jni_SetObjectField(JNIEnv *env, jobject obj, jfieldID fieldID, jobject value))
   JNIWrapper("SetObjectField");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, SetObjectField__entry, env, obj, fieldID, value);
-#else /* USDT2 */
-  HOTSPOT_JNI_SETOBJECTFIELD_ENTRY(
-                                   env, obj, (uintptr_t) fieldID, value);
-#endif /* USDT2 */
+  HOTSPOT_JNI_SETOBJECTFIELD_ENTRY(env, obj, (uintptr_t) fieldID, value);
   oop o = JNIHandles::resolve_non_null(obj);
   Klass* k = o->klass();
   int offset = jfieldIDWorkaround::from_instance_jfieldID(k, fieldID);
@@ -2776,49 +2195,9 @@
     o = JvmtiExport::jni_SetField_probe_nh(thread, obj, o, k, fieldID, false, 'L', (jvalue *)&field_value);
   }
   o->obj_field_put(offset, JNIHandles::resolve(value));
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, SetObjectField__return);
-#else /* USDT2 */
-  HOTSPOT_JNI_SETOBJECTFIELD_RETURN(
-);
-#endif /* USDT2 */
+  HOTSPOT_JNI_SETOBJECTFIELD_RETURN();
 JNI_END
 
-#ifndef USDT2
-#define DEFINE_SETFIELD(Argument,Fieldname,Result,SigType,unionType) \
-\
-JNI_QUICK_ENTRY(void, jni_Set##Result##Field(JNIEnv *env, jobject obj, jfieldID fieldID, Argument value)) \
-  JNIWrapper("Set" XSTR(Result) "Field"); \
-\
-  FP_SELECT_##Result( \
-    DTRACE_PROBE4(hotspot_jni, Set##Result##Field__entry, env, obj, fieldID, value), \
-    DTRACE_PROBE3(hotspot_jni, Set##Result##Field__entry, env, obj, fieldID)); \
-\
-  oop o = JNIHandles::resolve_non_null(obj); \
-  Klass* k = o->klass(); \
-  int offset = jfieldIDWorkaround::from_instance_jfieldID(k, fieldID);  \
-  /* Keep JVMTI addition small and only check enabled flag here.       */ \
-  /* jni_SetField_probe_nh() assumes that is not okay to create handles */ \
-  /* and creates a ResetNoHandleMark.                                   */ \
-  if (JvmtiExport::should_post_field_modification()) { \
-    jvalue field_value; \
-    field_value.unionType = value; \
-    o = JvmtiExport::jni_SetField_probe_nh(thread, obj, o, k, fieldID, false, SigType, (jvalue *)&field_value); \
-  } \
-  o->Fieldname##_field_put(offset, value); \
-  DTRACE_PROBE(hotspot_jni, Set##Result##Field__return);\
-JNI_END
-
-DEFINE_SETFIELD(jboolean, bool,   Boolean, 'Z', z)
-DEFINE_SETFIELD(jbyte,    byte,   Byte,    'B', b)
-DEFINE_SETFIELD(jchar,    char,   Char,    'C', c)
-DEFINE_SETFIELD(jshort,   short,  Short,   'S', s)
-DEFINE_SETFIELD(jint,     int,    Int,     'I', i)
-DEFINE_SETFIELD(jlong,    long,   Long,    'J', j)
-DEFINE_SETFIELD(jfloat,   float,  Float,   'F', f)
-DEFINE_SETFIELD(jdouble,  double, Double,  'D', d)
-
-#else /* USDT2 */
 
 #define DEFINE_SETFIELD(Argument,Fieldname,Result,SigType,unionType \
                         , EntryProbe, ReturnProbe) \
@@ -2868,24 +2247,13 @@
 DEFINE_SETFIELD(jdouble,  double, Double,  'D', d
                 , HOTSPOT_JNI_SETDOUBLEFIELD_ENTRY(env, obj, (uintptr_t)fieldID),
                 HOTSPOT_JNI_SETDOUBLEFIELD_RETURN())
-#endif /* USDT2 */
-
-#ifndef USDT2
-DT_RETURN_MARK_DECL(ToReflectedField, jobject);
-#else /* USDT2 */
+
 DT_RETURN_MARK_DECL(ToReflectedField, jobject
                     , HOTSPOT_JNI_TOREFLECTEDFIELD_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jobject, jni_ToReflectedField(JNIEnv *env, jclass cls, jfieldID fieldID, jboolean isStatic))
   JNIWrapper("ToReflectedField");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, ToReflectedField__entry,
-                env, cls, fieldID, isStatic);
-#else /* USDT2 */
-  HOTSPOT_JNI_TOREFLECTEDFIELD_ENTRY(
-                env, cls, (uintptr_t) fieldID, isStatic);
-#endif /* USDT2 */
+  HOTSPOT_JNI_TOREFLECTEDFIELD_ENTRY(env, cls, (uintptr_t) fieldID, isStatic);
   jobject ret = NULL;
   DT_RETURN_MARK(ToReflectedField, jobject, (const jobject&)ret);
 
@@ -2915,22 +2283,13 @@
 //
 // Accessing Static Fields
 //
-#ifndef USDT2
-DT_RETURN_MARK_DECL(GetStaticFieldID, jfieldID);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(GetStaticFieldID, jfieldID
                     , HOTSPOT_JNI_GETSTATICFIELDID_RETURN((uintptr_t)_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jfieldID, jni_GetStaticFieldID(JNIEnv *env, jclass clazz,
           const char *name, const char *sig))
   JNIWrapper("GetStaticFieldID");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, GetStaticFieldID__entry, env, clazz, name, sig);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETSTATICFIELDID_ENTRY(
-                                     env, clazz, (char *) name, (char *) sig);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETSTATICFIELDID_ENTRY(env, clazz, (char *) name, (char *) sig);
   jfieldID ret = NULL;
   DT_RETURN_MARK(GetStaticFieldID, jfieldID, (const jfieldID&)ret);
 
@@ -2966,12 +2325,7 @@
 
 JNI_ENTRY(jobject, jni_GetStaticObjectField(JNIEnv *env, jclass clazz, jfieldID fieldID))
   JNIWrapper("GetStaticObjectField");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, GetStaticObjectField__entry, env, clazz, fieldID);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY(
-                                         env, clazz, (uintptr_t) fieldID);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY(env, clazz, (uintptr_t) fieldID);
 #if INCLUDE_JNI_CHECK
   DEBUG_ONLY(Klass* param_k = jniCheck::validate_class(thread, clazz);)
 #endif // INCLUDE_JNI_CHECK
@@ -2983,47 +2337,10 @@
     JvmtiExport::jni_GetField_probe(thread, NULL, NULL, id->holder(), fieldID, true);
   }
   jobject ret = JNIHandles::make_local(id->holder()->java_mirror()->obj_field(id->offset()));
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetStaticObjectField__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETSTATICOBJECTFIELD_RETURN(
-                                          ret);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETSTATICOBJECTFIELD_RETURN(ret);
   return ret;
 JNI_END
 
-#ifndef USDT2
-#define DEFINE_GETSTATICFIELD(Return,Fieldname,Result) \
-\
-  DT_RETURN_MARK_DECL_FOR(Result, GetStatic##Result##Field, Return);\
-\
-JNI_ENTRY(Return, jni_GetStatic##Result##Field(JNIEnv *env, jclass clazz, jfieldID fieldID)) \
-  JNIWrapper("GetStatic" XSTR(Result) "Field"); \
-  DTRACE_PROBE3(hotspot_jni, GetStatic##Result##Field__entry, env, clazz, fieldID);\
-  Return ret = 0;\
-  DT_RETURN_MARK_FOR(Result, GetStatic##Result##Field, Return, \
-                     (const Return&)ret);\
-  JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID); \
-  assert(id->is_static_field_id(), "invalid static field id"); \
-  /* Keep JVMTI addition small and only check enabled flag here. */ \
-  /* jni_GetField_probe() assumes that is okay to create handles. */ \
-  if (JvmtiExport::should_post_field_access()) { \
-    JvmtiExport::jni_GetField_probe(thread, NULL, NULL, id->holder(), fieldID, true); \
-  } \
-  ret = id->holder()->java_mirror()-> Fieldname##_field (id->offset()); \
-  return ret;\
-JNI_END
-
-DEFINE_GETSTATICFIELD(jboolean, bool,   Boolean)
-DEFINE_GETSTATICFIELD(jbyte,    byte,   Byte)
-DEFINE_GETSTATICFIELD(jchar,    char,   Char)
-DEFINE_GETSTATICFIELD(jshort,   short,  Short)
-DEFINE_GETSTATICFIELD(jint,     int,    Int)
-DEFINE_GETSTATICFIELD(jlong,    long,   Long)
-DEFINE_GETSTATICFIELD(jfloat,   float,  Float)
-DEFINE_GETSTATICFIELD(jdouble,  double, Double)
-
-#else /* USDT2 */
 
 #define DEFINE_GETSTATICFIELD(Return,Fieldname,Result \
                               , EntryProbe, ReturnProbe) \
@@ -3065,16 +2382,10 @@
                       , HOTSPOT_JNI_GETSTATICFLOATFIELD_ENTRY(env, clazz, (uintptr_t) fieldID),   HOTSPOT_JNI_GETSTATICFLOATFIELD_RETURN()          )
 DEFINE_GETSTATICFIELD(jdouble,  double, Double
                       , HOTSPOT_JNI_GETSTATICDOUBLEFIELD_ENTRY(env, clazz, (uintptr_t) fieldID),  HOTSPOT_JNI_GETSTATICDOUBLEFIELD_RETURN()         )
-#endif /* USDT2 */
 
 JNI_ENTRY(void, jni_SetStaticObjectField(JNIEnv *env, jclass clazz, jfieldID fieldID, jobject value))
   JNIWrapper("SetStaticObjectField");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, SetStaticObjectField__entry, env, clazz, fieldID, value);
-#else /* USDT2 */
- HOTSPOT_JNI_SETSTATICOBJECTFIELD_ENTRY(
-                                        env, clazz, (uintptr_t) fieldID, value);
-#endif /* USDT2 */
+ HOTSPOT_JNI_SETSTATICOBJECTFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value);
   JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID);
   assert(id->is_static_field_id(), "invalid static field id");
   // Keep JVMTI addition small and only check enabled flag here.
@@ -3085,47 +2396,10 @@
     JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, 'L', (jvalue *)&field_value);
   }
   id->holder()->java_mirror()->obj_field_put(id->offset(), JNIHandles::resolve(value));
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, SetStaticObjectField__return);
-#else /* USDT2 */
-  HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN(
-                                          );
-#endif /* USDT2 */
+  HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN();
 JNI_END
 
 
-#ifndef USDT2
-#define DEFINE_SETSTATICFIELD(Argument,Fieldname,Result,SigType,unionType) \
-\
-JNI_ENTRY(void, jni_SetStatic##Result##Field(JNIEnv *env, jclass clazz, jfieldID fieldID, Argument value)) \
-  JNIWrapper("SetStatic" XSTR(Result) "Field"); \
-  FP_SELECT_##Result( \
-     DTRACE_PROBE4(hotspot_jni, SetStatic##Result##Field__entry, env, clazz, fieldID, value), \
-     DTRACE_PROBE3(hotspot_jni, SetStatic##Result##Field__entry, env, clazz, fieldID)); \
-\
-  JNIid* id = jfieldIDWorkaround::from_static_jfieldID(fieldID); \
-  assert(id->is_static_field_id(), "invalid static field id"); \
-  /* Keep JVMTI addition small and only check enabled flag here. */ \
-  /* jni_SetField_probe() assumes that is okay to create handles. */ \
-  if (JvmtiExport::should_post_field_modification()) { \
-    jvalue field_value; \
-    field_value.unionType = value; \
-    JvmtiExport::jni_SetField_probe(thread, NULL, NULL, id->holder(), fieldID, true, SigType, (jvalue *)&field_value); \
-  } \
-  id->holder()->java_mirror()-> Fieldname##_field_put (id->offset(), value); \
-  DTRACE_PROBE(hotspot_jni, SetStatic##Result##Field__return);\
-JNI_END
-
-DEFINE_SETSTATICFIELD(jboolean, bool,   Boolean, 'Z', z)
-DEFINE_SETSTATICFIELD(jbyte,    byte,   Byte,    'B', b)
-DEFINE_SETSTATICFIELD(jchar,    char,   Char,    'C', c)
-DEFINE_SETSTATICFIELD(jshort,   short,  Short,   'S', s)
-DEFINE_SETSTATICFIELD(jint,     int,    Int,     'I', i)
-DEFINE_SETSTATICFIELD(jlong,    long,   Long,    'J', j)
-DEFINE_SETSTATICFIELD(jfloat,   float,  Float,   'F', f)
-DEFINE_SETSTATICFIELD(jdouble,  double, Double,  'D', d)
-
-#else /* USDT2 */
 
 #define DEFINE_SETSTATICFIELD(Argument,Fieldname,Result,SigType,unionType \
                               , EntryProbe, ReturnProbe) \
@@ -3148,7 +2422,7 @@
 JNI_END
 
 DEFINE_SETSTATICFIELD(jboolean, bool,   Boolean, 'Z', z
-                      , HOTSPOT_JNI_SETBOOLEANFIELD_ENTRY(env, clazz, (uintptr_t)fieldID, value),
+                      , HOTSPOT_JNI_SETSTATICBOOLEANFIELD_ENTRY(env, clazz, (uintptr_t)fieldID, value),
                       HOTSPOT_JNI_SETBOOLEANFIELD_RETURN())
 DEFINE_SETSTATICFIELD(jbyte,    byte,   Byte,    'B', b
                       , HOTSPOT_JNI_SETSTATICBYTEFIELD_ENTRY(env, clazz, (uintptr_t) fieldID, value),
@@ -3172,7 +2446,6 @@
 DEFINE_SETSTATICFIELD(jdouble,  double, Double,  'D', d
                       , HOTSPOT_JNI_SETSTATICDOUBLEFIELD_ENTRY(env, clazz, (uintptr_t) fieldID),
                       HOTSPOT_JNI_SETSTATICDOUBLEFIELD_RETURN())
-#endif /* USDT2 */
 
 //
 // String Operations
@@ -3180,21 +2453,12 @@
 
 // Unicode Interface
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(NewString, jstring);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(NewString, jstring
                     , HOTSPOT_JNI_NEWSTRING_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jstring, jni_NewString(JNIEnv *env, const jchar *unicodeChars, jsize len))
   JNIWrapper("NewString");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, NewString__entry, env, unicodeChars, len);
-#else /* USDT2 */
- HOTSPOT_JNI_NEWSTRING_ENTRY(
-                             env, (uint16_t *) unicodeChars, len);
-#endif /* USDT2 */
+ HOTSPOT_JNI_NEWSTRING_ENTRY(env, (uint16_t *) unicodeChars, len);
   jstring ret = NULL;
   DT_RETURN_MARK(NewString, jstring, (const jstring&)ret);
   oop string=java_lang_String::create_oop_from_unicode((jchar*) unicodeChars, len, CHECK_NULL);
@@ -3205,23 +2469,13 @@
 
 JNI_QUICK_ENTRY(jsize, jni_GetStringLength(JNIEnv *env, jstring string))
   JNIWrapper("GetStringLength");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, GetStringLength__entry, env, string);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY(
-                                    env, string);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY(env, string);
   jsize ret = 0;
   oop s = JNIHandles::resolve_non_null(string);
   if (java_lang_String::value(s) != NULL) {
     ret = java_lang_String::length(s);
   }
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetStringLength__return, ret);
-#else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGLENGTH_RETURN(
-                                    ret);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETSTRINGLENGTH_RETURN(ret);
   return ret;
 JNI_END
 
@@ -3229,12 +2483,7 @@
 JNI_QUICK_ENTRY(const jchar*, jni_GetStringChars(
   JNIEnv *env, jstring string, jboolean *isCopy))
   JNIWrapper("GetStringChars");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, GetStringChars__entry, env, string, isCopy);
-#else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(
-                                  env, string, (uintptr_t *) isCopy);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(env, string, (uintptr_t *) isCopy);
   jchar* buf = NULL;
   oop s = JNIHandles::resolve_non_null(string);
   typeArrayOop s_value = java_lang_String::value(s);
@@ -3254,56 +2503,32 @@
       }
     }
   }
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetStringChars__return, buf);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETSTRINGCHARS_RETURN(
-                                    buf);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETSTRINGCHARS_RETURN(buf);
   return buf;
 JNI_END
 
 
 JNI_QUICK_ENTRY(void, jni_ReleaseStringChars(JNIEnv *env, jstring str, const jchar *chars))
   JNIWrapper("ReleaseStringChars");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, ReleaseStringChars__entry, env, str, chars);
-#else /* USDT2 */
-  HOTSPOT_JNI_RELEASESTRINGCHARS_ENTRY(
-                                       env, str, (uint16_t *) chars);
-#endif /* USDT2 */
+  HOTSPOT_JNI_RELEASESTRINGCHARS_ENTRY(env, str, (uint16_t *) chars);
   //%note jni_6
   if (chars != NULL) {
     // Since String objects are supposed to be immutable, don't copy any
     // new data back.  A bad user will have to go after the char array.
     FreeHeap((void*) chars);
   }
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, ReleaseStringChars__return);
-#else /* USDT2 */
-  HOTSPOT_JNI_RELEASESTRINGCHARS_RETURN(
-);
-#endif /* USDT2 */
+  HOTSPOT_JNI_RELEASESTRINGCHARS_RETURN();
 JNI_END
 
 
 // UTF Interface
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(NewStringUTF, jstring);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(NewStringUTF, jstring
                     , HOTSPOT_JNI_NEWSTRINGUTF_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jstring, jni_NewStringUTF(JNIEnv *env, const char *bytes))
   JNIWrapper("NewStringUTF");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, NewStringUTF__entry, env, bytes);
-#else /* USDT2 */
-  HOTSPOT_JNI_NEWSTRINGUTF_ENTRY(
-                                 env, (char *) bytes);
-#endif /* USDT2 */
+  HOTSPOT_JNI_NEWSTRINGUTF_ENTRY(env, (char *) bytes);
   jstring ret;
   DT_RETURN_MARK(NewStringUTF, jstring, (const jstring&)ret);
 
@@ -3315,35 +2540,20 @@
 
 JNI_ENTRY(jsize, jni_GetStringUTFLength(JNIEnv *env, jstring string))
   JNIWrapper("GetStringUTFLength");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, GetStringUTFLength__entry, env, string);
-#else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY(
-                                      env, string);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY(env, string);
   jsize ret = 0;
   oop java_string = JNIHandles::resolve_non_null(string);
   if (java_lang_String::value(java_string) != NULL) {
     ret = java_lang_String::utf8_length(java_string);
   }
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetStringUTFLength__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETSTRINGUTFLENGTH_RETURN(
-                                        ret);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETSTRINGUTFLENGTH_RETURN(ret);
   return ret;
 JNI_END
 
 
 JNI_ENTRY(const char*, jni_GetStringUTFChars(JNIEnv *env, jstring string, jboolean *isCopy))
   JNIWrapper("GetStringUTFChars");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, GetStringUTFChars__entry, env, string, isCopy);
-#else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY(
-                                     env, string, (uintptr_t *) isCopy);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY(env, string, (uintptr_t *) isCopy);
   char* result = NULL;
   oop java_string = JNIHandles::resolve_non_null(string);
   if (java_lang_String::value(java_string) != NULL) {
@@ -3357,53 +2567,28 @@
       }
     }
   }
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetStringUTFChars__return, result);
-#else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGUTFCHARS_RETURN(
-                                      result);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETSTRINGUTFCHARS_RETURN(result);
   return result;
 JNI_END
 
 
 JNI_LEAF(void, jni_ReleaseStringUTFChars(JNIEnv *env, jstring str, const char *chars))
   JNIWrapper("ReleaseStringUTFChars");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, ReleaseStringUTFChars__entry, env, str, chars);
-#else /* USDT2 */
- HOTSPOT_JNI_RELEASESTRINGUTFCHARS_ENTRY(
-                                         env, str, (char *) chars);
-#endif /* USDT2 */
+ HOTSPOT_JNI_RELEASESTRINGUTFCHARS_ENTRY(env, str, (char *) chars);
   if (chars != NULL) {
     FreeHeap((char*) chars);
   }
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, ReleaseStringUTFChars__return);
-#else /* USDT2 */
-HOTSPOT_JNI_RELEASESTRINGUTFCHARS_RETURN(
-);
-#endif /* USDT2 */
+HOTSPOT_JNI_RELEASESTRINGUTFCHARS_RETURN();
 JNI_END
 
 
 JNI_QUICK_ENTRY(jsize, jni_GetArrayLength(JNIEnv *env, jarray array))
   JNIWrapper("GetArrayLength");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, GetArrayLength__entry, env, array);
-#else /* USDT2 */
- HOTSPOT_JNI_GETARRAYLENGTH_ENTRY(
-                                  env, array);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETARRAYLENGTH_ENTRY(env, array);
   arrayOop a = arrayOop(JNIHandles::resolve_non_null(array));
   assert(a->is_array(), "must be array");
   jsize ret = a->length();
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetArrayLength__return, ret);
-#else /* USDT2 */
- HOTSPOT_JNI_GETARRAYLENGTH_RETURN(
-                                   ret);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETARRAYLENGTH_RETURN(ret);
   return ret;
 JNI_END
 
@@ -3412,21 +2597,12 @@
 // Object Array Operations
 //
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(NewObjectArray, jobjectArray);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(NewObjectArray, jobjectArray
                     , HOTSPOT_JNI_NEWOBJECTARRAY_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jobjectArray, jni_NewObjectArray(JNIEnv *env, jsize length, jclass elementClass, jobject initialElement))
   JNIWrapper("NewObjectArray");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, NewObjectArray__entry, env, length, elementClass, initialElement);
-#else /* USDT2 */
- HOTSPOT_JNI_NEWOBJECTARRAY_ENTRY(
-                                  env, length, elementClass, initialElement);
-#endif /* USDT2 */
+ HOTSPOT_JNI_NEWOBJECTARRAY_ENTRY(env, length, elementClass, initialElement);
   jobjectArray ret = NULL;
   DT_RETURN_MARK(NewObjectArray, jobjectArray, (const jobjectArray&)ret);
   KlassHandle ek(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(elementClass)));
@@ -3444,21 +2620,12 @@
   return ret;
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(GetObjectArrayElement, jobject);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(GetObjectArrayElement, jobject
                     , HOTSPOT_JNI_GETOBJECTARRAYELEMENT_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jobject, jni_GetObjectArrayElement(JNIEnv *env, jobjectArray array, jsize index))
   JNIWrapper("GetObjectArrayElement");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, GetObjectArrayElement__entry, env, array, index);
-#else /* USDT2 */
- HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY(
-                                         env, array, index);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY(env, array, index);
   jobject ret = NULL;
   DT_RETURN_MARK(GetObjectArrayElement, jobject, (const jobject&)ret);
   objArrayOop a = objArrayOop(JNIHandles::resolve_non_null(array));
@@ -3472,21 +2639,12 @@
   }
 JNI_END
 
-#ifndef USDT2
-DT_VOID_RETURN_MARK_DECL(SetObjectArrayElement);
-#else /* USDT2 */
 DT_VOID_RETURN_MARK_DECL(SetObjectArrayElement
                          , HOTSPOT_JNI_SETOBJECTARRAYELEMENT_RETURN());
-#endif /* USDT2 */
 
 JNI_ENTRY(void, jni_SetObjectArrayElement(JNIEnv *env, jobjectArray array, jsize index, jobject value))
   JNIWrapper("SetObjectArrayElement");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, SetObjectArrayElement__entry, env, array, index, value);
-#else /* USDT2 */
- HOTSPOT_JNI_SETOBJECTARRAYELEMENT_ENTRY(
-                                         env, array, index, value);
-#endif /* USDT2 */
+ HOTSPOT_JNI_SETOBJECTARRAYELEMENT_ENTRY(env, array, index, value);
   DT_VOID_RETURN_MARK(SetObjectArrayElement);
 
   objArrayOop a = objArrayOop(JNIHandles::resolve_non_null(array));
@@ -3505,33 +2663,6 @@
 JNI_END
 
 
-#ifndef USDT2
-#define DEFINE_NEWSCALARARRAY(Return,Allocator,Result) \
-\
-  DT_RETURN_MARK_DECL(New##Result##Array, Return); \
-\
-JNI_ENTRY(Return, \
-          jni_New##Result##Array(JNIEnv *env, jsize len)) \
-  JNIWrapper("New" XSTR(Result) "Array"); \
-  DTRACE_PROBE2(hotspot_jni, New##Result##Array__entry, env, len);\
-  Return ret = NULL;\
-  DT_RETURN_MARK(New##Result##Array, Return, (const Return&)ret);\
-\
-  oop obj= oopFactory::Allocator(len, CHECK_0); \
-  ret = (Return) JNIHandles::make_local(env, obj); \
-  return ret;\
-JNI_END
-
-DEFINE_NEWSCALARARRAY(jbooleanArray, new_boolArray,   Boolean)
-DEFINE_NEWSCALARARRAY(jbyteArray,    new_byteArray,   Byte)
-DEFINE_NEWSCALARARRAY(jshortArray,   new_shortArray,  Short)
-DEFINE_NEWSCALARARRAY(jcharArray,    new_charArray,   Char)
-DEFINE_NEWSCALARARRAY(jintArray,     new_intArray,    Int)
-DEFINE_NEWSCALARARRAY(jlongArray,    new_longArray,   Long)
-DEFINE_NEWSCALARARRAY(jfloatArray,   new_singleArray, Float)
-DEFINE_NEWSCALARARRAY(jdoubleArray,  new_doubleArray, Double)
-
-#else /* USDT2 */
 
 #define DEFINE_NEWSCALARARRAY(Return,Allocator,Result \
                               ,EntryProbe,ReturnProbe)  \
@@ -3575,7 +2706,6 @@
 DEFINE_NEWSCALARARRAY(jdoubleArray,  new_doubleArray, Double,
                       HOTSPOT_JNI_NEWDOUBLEARRAY_ENTRY(env, len),
                       HOTSPOT_JNI_NEWDOUBLEARRAY_RETURN(_ret_ref))
-#endif /* USDT2 */
 
 // Return an address which will fault if the caller writes to it.
 
@@ -3593,47 +2723,6 @@
 }
 
 
-#ifndef USDT2
-#define DEFINE_GETSCALARARRAYELEMENTS(ElementTag,ElementType,Result, Tag) \
-\
-JNI_QUICK_ENTRY(ElementType*, \
-          jni_Get##Result##ArrayElements(JNIEnv *env, ElementType##Array array, jboolean *isCopy)) \
-  JNIWrapper("Get" XSTR(Result) "ArrayElements"); \
-  DTRACE_PROBE3(hotspot_jni, Get##Result##ArrayElements__entry, env, array, isCopy);\
-  /* allocate an chunk of memory in c land */ \
-  typeArrayOop a = typeArrayOop(JNIHandles::resolve_non_null(array)); \
-  ElementType* result; \
-  int len = a->length(); \
-  if (len == 0) { \
-    /* Empty array: legal but useless, can't return NULL. \
-     * Return a pointer to something useless. \
-     * Avoid asserts in typeArrayOop. */ \
-    result = (ElementType*)get_bad_address(); \
-  } else { \
-    /* JNI Specification states return NULL on OOM */                    \
-    result = NEW_C_HEAP_ARRAY_RETURN_NULL(ElementType, len, mtInternal); \
-    if (result != NULL) {                                                \
-      /* copy the array to the c chunk */                                \
-      memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len);      \
-      if (isCopy) {                                                      \
-        *isCopy = JNI_TRUE;                                              \
-      }                                                                  \
-    }                                                                    \
-  } \
-  DTRACE_PROBE1(hotspot_jni, Get##Result##ArrayElements__return, result);\
-  return result; \
-JNI_END
-
-DEFINE_GETSCALARARRAYELEMENTS(T_BOOLEAN, jboolean, Boolean, bool)
-DEFINE_GETSCALARARRAYELEMENTS(T_BYTE,    jbyte,    Byte,    byte)
-DEFINE_GETSCALARARRAYELEMENTS(T_SHORT,   jshort,   Short,   short)
-DEFINE_GETSCALARARRAYELEMENTS(T_CHAR,    jchar,    Char,    char)
-DEFINE_GETSCALARARRAYELEMENTS(T_INT,     jint,     Int,     int)
-DEFINE_GETSCALARARRAYELEMENTS(T_LONG,    jlong,    Long,    long)
-DEFINE_GETSCALARARRAYELEMENTS(T_FLOAT,   jfloat,   Float,   float)
-DEFINE_GETSCALARARRAYELEMENTS(T_DOUBLE,  jdouble,  Double,  double)
-
-#else /* USDT2 */
 
 #define DEFINE_GETSCALARARRAYELEMENTS(ElementTag,ElementType,Result, Tag \
                                       , EntryProbe, ReturnProbe) \
@@ -3691,39 +2780,7 @@
 DEFINE_GETSCALARARRAYELEMENTS(T_DOUBLE,  jdouble,  Double,  double
                               , HOTSPOT_JNI_GETDOUBLEARRAYELEMENTS_ENTRY(env, array, (uintptr_t *) isCopy),
                               HOTSPOT_JNI_GETDOUBLEARRAYELEMENTS_RETURN(result))
-#endif /* USDT2 */
-
-#ifndef USDT2
-#define DEFINE_RELEASESCALARARRAYELEMENTS(ElementTag,ElementType,Result,Tag) \
-\
-JNI_QUICK_ENTRY(void, \
-          jni_Release##Result##ArrayElements(JNIEnv *env, ElementType##Array array, \
-                                             ElementType *buf, jint mode)) \
-  JNIWrapper("Release" XSTR(Result) "ArrayElements"); \
-  DTRACE_PROBE4(hotspot_jni, Release##Result##ArrayElements__entry, env, array, buf, mode);\
-  typeArrayOop a = typeArrayOop(JNIHandles::resolve_non_null(array)); \
-  int len = a->length(); \
-  if (len != 0) {   /* Empty array:  nothing to free or copy. */  \
-    if ((mode == 0) || (mode == JNI_COMMIT)) { \
-      memcpy(a->Tag##_at_addr(0), buf, sizeof(ElementType)*len); \
-    } \
-    if ((mode == 0) || (mode == JNI_ABORT)) { \
-      FreeHeap(buf); \
-    } \
-  } \
-  DTRACE_PROBE(hotspot_jni, Release##Result##ArrayElements__return);\
-JNI_END
-
-DEFINE_RELEASESCALARARRAYELEMENTS(T_BOOLEAN, jboolean, Boolean, bool)
-DEFINE_RELEASESCALARARRAYELEMENTS(T_BYTE,    jbyte,    Byte,    byte)
-DEFINE_RELEASESCALARARRAYELEMENTS(T_SHORT,   jshort,   Short,   short)
-DEFINE_RELEASESCALARARRAYELEMENTS(T_CHAR,    jchar,    Char,    char)
-DEFINE_RELEASESCALARARRAYELEMENTS(T_INT,     jint,     Int,     int)
-DEFINE_RELEASESCALARARRAYELEMENTS(T_LONG,    jlong,    Long,    long)
-DEFINE_RELEASESCALARARRAYELEMENTS(T_FLOAT,   jfloat,   Float,   float)
-DEFINE_RELEASESCALARARRAYELEMENTS(T_DOUBLE,  jdouble,  Double,  double)
-
-#else /* USDT2 */
+
 
 #define DEFINE_RELEASESCALARARRAYELEMENTS(ElementTag,ElementType,Result,Tag \
                                           , EntryProbe, ReturnProbe);\
@@ -3770,41 +2827,7 @@
 DEFINE_RELEASESCALARARRAYELEMENTS(T_DOUBLE,  jdouble,  Double,  double
                                   , HOTSPOT_JNI_RELEASEDOUBLEARRAYELEMENTS_ENTRY(env, array, (double *) buf, mode),
                                   HOTSPOT_JNI_RELEASEDOUBLEARRAYELEMENTS_RETURN())
-#endif /* USDT2 */
-
-#ifndef USDT2
-#define DEFINE_GETSCALARARRAYREGION(ElementTag,ElementType,Result, Tag) \
-  DT_VOID_RETURN_MARK_DECL(Get##Result##ArrayRegion);\
-\
-JNI_ENTRY(void, \
-jni_Get##Result##ArrayRegion(JNIEnv *env, ElementType##Array array, jsize start, \
-             jsize len, ElementType *buf)) \
-  JNIWrapper("Get" XSTR(Result) "ArrayRegion"); \
-  DTRACE_PROBE5(hotspot_jni, Get##Result##ArrayRegion__entry, env, array, start, len, buf);\
-  DT_VOID_RETURN_MARK(Get##Result##ArrayRegion); \
-  typeArrayOop src = typeArrayOop(JNIHandles::resolve_non_null(array)); \
-  if (start < 0 || len < 0 || ((unsigned int)start + (unsigned int)len > (unsigned int)src->length())) { \
-    THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); \
-  } else { \
-    if (len > 0) { \
-      int sc = TypeArrayKlass::cast(src->klass())->log2_element_size(); \
-      memcpy((u_char*) buf, \
-             (u_char*) src->Tag##_at_addr(start), \
-             len << sc);                          \
-    } \
-  } \
-JNI_END
-
-DEFINE_GETSCALARARRAYREGION(T_BOOLEAN, jboolean,Boolean, bool)
-DEFINE_GETSCALARARRAYREGION(T_BYTE,    jbyte,   Byte,    byte)
-DEFINE_GETSCALARARRAYREGION(T_SHORT,   jshort,  Short,   short)
-DEFINE_GETSCALARARRAYREGION(T_CHAR,    jchar,   Char,    char)
-DEFINE_GETSCALARARRAYREGION(T_INT,     jint,    Int,     int)
-DEFINE_GETSCALARARRAYREGION(T_LONG,    jlong,   Long,    long)
-DEFINE_GETSCALARARRAYREGION(T_FLOAT,   jfloat,  Float,   float)
-DEFINE_GETSCALARARRAYREGION(T_DOUBLE,  jdouble, Double,  double)
-
-#else /* USDT2 */
+
 
 #define DEFINE_GETSCALARARRAYREGION(ElementTag,ElementType,Result, Tag \
                                     , EntryProbe, ReturnProbe); \
@@ -3854,41 +2877,7 @@
 DEFINE_GETSCALARARRAYREGION(T_DOUBLE,  jdouble, Double,  double
                             , HOTSPOT_JNI_GETDOUBLEARRAYREGION_ENTRY(env, array, start, len, (double *) buf),
                             HOTSPOT_JNI_GETDOUBLEARRAYREGION_RETURN());
-#endif /* USDT2 */
-
-#ifndef USDT2
-#define DEFINE_SETSCALARARRAYREGION(ElementTag,ElementType,Result, Tag) \
-  DT_VOID_RETURN_MARK_DECL(Set##Result##ArrayRegion);\
-\
-JNI_ENTRY(void, \
-jni_Set##Result##ArrayRegion(JNIEnv *env, ElementType##Array array, jsize start, \
-             jsize len, const ElementType *buf)) \
-  JNIWrapper("Set" XSTR(Result) "ArrayRegion"); \
-  DTRACE_PROBE5(hotspot_jni, Set##Result##ArrayRegion__entry, env, array, start, len, buf);\
-  DT_VOID_RETURN_MARK(Set##Result##ArrayRegion); \
-  typeArrayOop dst = typeArrayOop(JNIHandles::resolve_non_null(array)); \
-  if (start < 0 || len < 0 || ((unsigned int)start + (unsigned int)len > (unsigned int)dst->length())) { \
-    THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException()); \
-  } else { \
-    if (len > 0) { \
-      int sc = TypeArrayKlass::cast(dst->klass())->log2_element_size(); \
-      memcpy((u_char*) dst->Tag##_at_addr(start), \
-             (u_char*) buf, \
-             len << sc);    \
-    } \
-  } \
-JNI_END
-
-DEFINE_SETSCALARARRAYREGION(T_BOOLEAN, jboolean, Boolean, bool)
-DEFINE_SETSCALARARRAYREGION(T_BYTE,    jbyte,    Byte,    byte)
-DEFINE_SETSCALARARRAYREGION(T_SHORT,   jshort,   Short,   short)
-DEFINE_SETSCALARARRAYREGION(T_CHAR,    jchar,    Char,    char)
-DEFINE_SETSCALARARRAYREGION(T_INT,     jint,     Int,     int)
-DEFINE_SETSCALARARRAYREGION(T_LONG,    jlong,    Long,    long)
-DEFINE_SETSCALARARRAYREGION(T_FLOAT,   jfloat,   Float,   float)
-DEFINE_SETSCALARARRAYREGION(T_DOUBLE,  jdouble,  Double,  double)
-
-#else /* USDT2 */
+
 
 #define DEFINE_SETSCALARARRAYREGION(ElementTag,ElementType,Result, Tag \
                                     , EntryProbe, ReturnProbe); \
@@ -3938,7 +2927,6 @@
 DEFINE_SETSCALARARRAYREGION(T_DOUBLE,  jdouble,  Double,  double
                             , HOTSPOT_JNI_SETDOUBLEARRAYREGION_ENTRY(env, array, start, len, (double *) buf),
                             HOTSPOT_JNI_SETDOUBLEARRAYREGION_RETURN())
-#endif /* USDT2 */
 
 
 //
@@ -4023,23 +3011,14 @@
   return true;
 }
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(RegisterNatives, jint);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(RegisterNatives, jint
                     , HOTSPOT_JNI_REGISTERNATIVES_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jint, jni_RegisterNatives(JNIEnv *env, jclass clazz,
                                     const JNINativeMethod *methods,
                                     jint nMethods))
   JNIWrapper("RegisterNatives");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, RegisterNatives__entry, env, clazz, methods, nMethods);
-#else /* USDT2 */
-  HOTSPOT_JNI_REGISTERNATIVES_ENTRY(
-                                    env, clazz, (void *) methods, nMethods);
-#endif /* USDT2 */
+  HOTSPOT_JNI_REGISTERNATIVES_ENTRY(env, clazz, (void *) methods, nMethods);
   jint ret = 0;
   DT_RETURN_MARK(RegisterNatives, jint, (const jint&)ret);
 
@@ -4077,12 +3056,7 @@
 
 JNI_ENTRY(jint, jni_UnregisterNatives(JNIEnv *env, jclass clazz))
   JNIWrapper("UnregisterNatives");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, UnregisterNatives__entry, env, clazz);
-#else /* USDT2 */
- HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY(
-                                     env, clazz);
-#endif /* USDT2 */
+ HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY(env, clazz);
   Klass* k   = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz));
   //%note jni_2
   if (k->oop_is_instance()) {
@@ -4094,12 +3068,7 @@
       }
     }
   }
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, UnregisterNatives__return, 0);
-#else /* USDT2 */
- HOTSPOT_JNI_UNREGISTERNATIVES_RETURN(
-                                      0);
-#endif /* USDT2 */
+ HOTSPOT_JNI_UNREGISTERNATIVES_RETURN(0);
   return 0;
 JNI_END
 
@@ -4107,20 +3076,11 @@
 // Monitor functions
 //
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(MonitorEnter, jint);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(MonitorEnter, jint
                     , HOTSPOT_JNI_MONITORENTER_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jint, jni_MonitorEnter(JNIEnv *env, jobject jobj))
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, MonitorEnter__entry, env, jobj);
-#else /* USDT2 */
- HOTSPOT_JNI_MONITORENTER_ENTRY(
-                                env, jobj);
-#endif /* USDT2 */
+ HOTSPOT_JNI_MONITORENTER_ENTRY(env, jobj);
   jint ret = JNI_ERR;
   DT_RETURN_MARK(MonitorEnter, jint, (const jint&)ret);
 
@@ -4135,20 +3095,11 @@
   return ret;
 JNI_END
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(MonitorExit, jint);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(MonitorExit, jint
                     , HOTSPOT_JNI_MONITOREXIT_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 JNI_ENTRY(jint, jni_MonitorExit(JNIEnv *env, jobject jobj))
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, MonitorExit__entry, env, jobj);
-#else /* USDT2 */
- HOTSPOT_JNI_MONITOREXIT_ENTRY(
-                               env, jobj);
-#endif /* USDT2 */
+ HOTSPOT_JNI_MONITOREXIT_ENTRY(env, jobj);
   jint ret = JNI_ERR;
   DT_RETURN_MARK(MonitorExit, jint, (const jint&)ret);
 
@@ -4168,21 +3119,12 @@
 // Extensions
 //
 
-#ifndef USDT2
-DT_VOID_RETURN_MARK_DECL(GetStringRegion);
-#else /* USDT2 */
 DT_VOID_RETURN_MARK_DECL(GetStringRegion
                          , HOTSPOT_JNI_GETSTRINGREGION_RETURN());
-#endif /* USDT2 */
 
 JNI_ENTRY(void, jni_GetStringRegion(JNIEnv *env, jstring string, jsize start, jsize len, jchar *buf))
   JNIWrapper("GetStringRegion");
-#ifndef USDT2
-  DTRACE_PROBE5(hotspot_jni, GetStringRegion__entry, env, string, start, len, buf);
-#else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGREGION_ENTRY(
-                                   env, string, start, len, buf);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETSTRINGREGION_ENTRY(env, string, start, len, buf);
   DT_VOID_RETURN_MARK(GetStringRegion);
   oop s = JNIHandles::resolve_non_null(string);
   int s_len = java_lang_String::length(s);
@@ -4197,21 +3139,12 @@
   }
 JNI_END
 
-#ifndef USDT2
-DT_VOID_RETURN_MARK_DECL(GetStringUTFRegion);
-#else /* USDT2 */
 DT_VOID_RETURN_MARK_DECL(GetStringUTFRegion
                          , HOTSPOT_JNI_GETSTRINGUTFREGION_RETURN());
-#endif /* USDT2 */
 
 JNI_ENTRY(void, jni_GetStringUTFRegion(JNIEnv *env, jstring string, jsize start, jsize len, char *buf))
   JNIWrapper("GetStringUTFRegion");
-#ifndef USDT2
-  DTRACE_PROBE5(hotspot_jni, GetStringUTFRegion__entry, env, string, start, len, buf);
-#else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGUTFREGION_ENTRY(
-                                      env, string, start, len, buf);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETSTRINGUTFREGION_ENTRY(env, string, start, len, buf);
   DT_VOID_RETURN_MARK(GetStringUTFRegion);
   oop s = JNIHandles::resolve_non_null(string);
   int s_len = java_lang_String::length(s);
@@ -4237,12 +3170,7 @@
 
 JNI_ENTRY(void*, jni_GetPrimitiveArrayCritical(JNIEnv *env, jarray array, jboolean *isCopy))
   JNIWrapper("GetPrimitiveArrayCritical");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, GetPrimitiveArrayCritical__entry, env, array, isCopy);
-#else /* USDT2 */
- HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(
-                                             env, array, (uintptr_t *) isCopy);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(env, array, (uintptr_t *) isCopy);
   GC_locker::lock_critical(thread);
   if (isCopy != NULL) {
     *isCopy = JNI_FALSE;
@@ -4256,43 +3184,23 @@
     type = TypeArrayKlass::cast(a->klass())->element_type();
   }
   void* ret = arrayOop(a)->base(type);
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetPrimitiveArrayCritical__return, ret);
-#else /* USDT2 */
- HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN(
-                                              ret);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN(ret);
   return ret;
 JNI_END
 
 
 JNI_ENTRY(void, jni_ReleasePrimitiveArrayCritical(JNIEnv *env, jarray array, void *carray, jint mode))
   JNIWrapper("ReleasePrimitiveArrayCritical");
-#ifndef USDT2
-  DTRACE_PROBE4(hotspot_jni, ReleasePrimitiveArrayCritical__entry, env, array, carray, mode);
-#else /* USDT2 */
-  HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(
-                                                  env, array, carray, mode);
-#endif /* USDT2 */
+  HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(env, array, carray, mode);
   // The array, carray and mode arguments are ignored
   GC_locker::unlock_critical(thread);
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, ReleasePrimitiveArrayCritical__return);
-#else /* USDT2 */
-HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN(
-);
-#endif /* USDT2 */
+HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN();
 JNI_END
 
 
 JNI_ENTRY(const jchar*, jni_GetStringCritical(JNIEnv *env, jstring string, jboolean *isCopy))
   JNIWrapper("GetStringCritical");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, GetStringCritical__entry, env, string, isCopy);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(
-                                      env, string, (uintptr_t *) isCopy);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(env, string, (uintptr_t *) isCopy);
   GC_locker::lock_critical(thread);
   if (isCopy != NULL) {
     *isCopy = JNI_FALSE;
@@ -4307,89 +3215,44 @@
   } else {
     ret = (jchar*) s_value->base(T_CHAR);
   }
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetStringCritical__return, ret);
-#else /* USDT2 */
- HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN(
-                                      (uint16_t *) ret);
-#endif /* USDT2 */
+ HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN((uint16_t *) ret);
   return ret;
 JNI_END
 
 
 JNI_ENTRY(void, jni_ReleaseStringCritical(JNIEnv *env, jstring str, const jchar *chars))
   JNIWrapper("ReleaseStringCritical");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, ReleaseStringCritical__entry, env, str, chars);
-#else /* USDT2 */
-  HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY(
-                                          env, str, (uint16_t *) chars);
-#endif /* USDT2 */
+  HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY(env, str, (uint16_t *) chars);
   // The str and chars arguments are ignored
   GC_locker::unlock_critical(thread);
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, ReleaseStringCritical__return);
-#else /* USDT2 */
-HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN(
-);
-#endif /* USDT2 */
+HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN();
 JNI_END
 
 
 JNI_ENTRY(jweak, jni_NewWeakGlobalRef(JNIEnv *env, jobject ref))
   JNIWrapper("jni_NewWeakGlobalRef");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, NewWeakGlobalRef__entry, env, ref);
-#else /* USDT2 */
- HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY(
-                                    env, ref);
-#endif /* USDT2 */
+ HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY(env, ref);
   Handle ref_handle(thread, JNIHandles::resolve(ref));
   jweak ret = JNIHandles::make_weak_global(ref_handle);
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, NewWeakGlobalRef__return, ret);
-#else /* USDT2 */
- HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN(
-                                     ret);
-#endif /* USDT2 */
+ HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN(ret);
   return ret;
 JNI_END
 
 // Must be JNI_ENTRY (with HandleMark)
 JNI_ENTRY(void, jni_DeleteWeakGlobalRef(JNIEnv *env, jweak ref))
   JNIWrapper("jni_DeleteWeakGlobalRef");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, DeleteWeakGlobalRef__entry, env, ref);
-#else /* USDT2 */
-  HOTSPOT_JNI_DELETEWEAKGLOBALREF_ENTRY(
-                                        env, ref);
-#endif /* USDT2 */
+  HOTSPOT_JNI_DELETEWEAKGLOBALREF_ENTRY(env, ref);
   JNIHandles::destroy_weak_global(ref);
-#ifndef USDT2
-  DTRACE_PROBE(hotspot_jni, DeleteWeakGlobalRef__return);
-#else /* USDT2 */
-  HOTSPOT_JNI_DELETEWEAKGLOBALREF_RETURN(
-                                         );
-#endif /* USDT2 */
+  HOTSPOT_JNI_DELETEWEAKGLOBALREF_RETURN();
 JNI_END
 
 
 JNI_QUICK_ENTRY(jboolean, jni_ExceptionCheck(JNIEnv *env))
   JNIWrapper("jni_ExceptionCheck");
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, ExceptionCheck__entry, env);
-#else /* USDT2 */
- HOTSPOT_JNI_EXCEPTIONCHECK_ENTRY(
-                                  env);
-#endif /* USDT2 */
+ HOTSPOT_JNI_EXCEPTIONCHECK_ENTRY(env);
   jni_check_async_exceptions(thread);
   jboolean ret = (thread->has_pending_exception()) ? JNI_TRUE : JNI_FALSE;
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, ExceptionCheck__return, ret);
-#else /* USDT2 */
- HOTSPOT_JNI_EXCEPTIONCHECK_RETURN(
-                                   ret);
-#endif /* USDT2 */
+ HOTSPOT_JNI_EXCEPTIONCHECK_RETURN(ret);
   return ret;
 JNI_END
 
@@ -4449,8 +3312,23 @@
 
     // Get needed field and method IDs
     directByteBufferConstructor = env->GetMethodID(directByteBufferClass, "<init>", "(JI)V");
+    if (env->ExceptionCheck()) {
+      env->ExceptionClear();
+      directBufferSupportInitializeFailed = 1;
+      return false;
+    }
     directBufferAddressField    = env->GetFieldID(bufferClass, "address", "J");
+    if (env->ExceptionCheck()) {
+      env->ExceptionClear();
+      directBufferSupportInitializeFailed = 1;
+      return false;
+    }
     bufferCapacityField         = env->GetFieldID(bufferClass, "capacity", "I");
+    if (env->ExceptionCheck()) {
+      env->ExceptionClear();
+      directBufferSupportInitializeFailed = 1;
+      return false;
+    }
 
     if ((directByteBufferConstructor == NULL) ||
         (directBufferAddressField    == NULL) ||
@@ -4481,21 +3359,11 @@
   JavaThread* thread = JavaThread::thread_from_jni_environment(env);
 
   JNIWrapper("jni_NewDirectByteBuffer");
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, NewDirectByteBuffer__entry, env, address, capacity);
-#else /* USDT2 */
- HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_ENTRY(
-                                       env, address, capacity);
-#endif /* USDT2 */
+ HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_ENTRY(env, address, capacity);
 
   if (!directBufferSupportInitializeEnded) {
     if (!initializeDirectBufferSupport(env, thread)) {
-#ifndef USDT2
-      DTRACE_PROBE1(hotspot_jni, NewDirectByteBuffer__return, NULL);
-#else /* USDT2 */
-      HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(
-                                             NULL);
-#endif /* USDT2 */
+      HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(NULL);
       return NULL;
     }
   }
@@ -4506,21 +3374,12 @@
   // takes int capacity
   jint  cap  = (jint)  capacity;
   jobject ret = env->NewObject(directByteBufferClass, directByteBufferConstructor, addr, cap);
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, NewDirectByteBuffer__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(
-                                         ret);
-#endif /* USDT2 */
+  HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(ret);
   return ret;
 }
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(GetDirectBufferAddress, void*);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(GetDirectBufferAddress, void*
                     , HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_RETURN((void*) _ret_ref));
-#endif /* USDT2 */
 
 extern "C" void* JNICALL jni_GetDirectBufferAddress(JNIEnv *env, jobject buf)
 {
@@ -4528,12 +3387,7 @@
   JavaThread* thread = JavaThread::thread_from_jni_environment(env);
 
   JNIWrapper("jni_GetDirectBufferAddress");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, GetDirectBufferAddress__entry, env, buf);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_ENTRY(
-                                           env, buf);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_ENTRY(env, buf);
   void* ret = NULL;
   DT_RETURN_MARK(GetDirectBufferAddress, void*, (const void*&)ret);
 
@@ -4551,12 +3405,8 @@
   return ret;
 }
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(GetDirectBufferCapacity, jlong);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(GetDirectBufferCapacity, jlong
                     , HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 extern "C" jlong JNICALL jni_GetDirectBufferCapacity(JNIEnv *env, jobject buf)
 {
@@ -4564,12 +3414,7 @@
   JavaThread* thread = JavaThread::thread_from_jni_environment(env);
 
   JNIWrapper("jni_GetDirectBufferCapacity");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, GetDirectBufferCapacity__entry, env, buf);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_ENTRY(
-                                            env, buf);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_ENTRY(env, buf);
   jlong ret = -1;
   DT_RETURN_MARK(GetDirectBufferCapacity, jlong, (const jlong&)ret);
 
@@ -4596,18 +3441,8 @@
 
 JNI_LEAF(jint, jni_GetVersion(JNIEnv *env))
   JNIWrapper("GetVersion");
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetVersion__entry, env);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETVERSION_ENTRY(
-                               env);
-#endif /* USDT2 */
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetVersion__return, CurrentVersion);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETVERSION_RETURN(
-                                CurrentVersion);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETVERSION_ENTRY(env);
+  HOTSPOT_JNI_GETVERSION_RETURN(CurrentVersion);
   return CurrentVersion;
 JNI_END
 
@@ -4615,19 +3450,9 @@
 
 JNI_LEAF(jint, jni_GetJavaVM(JNIEnv *env, JavaVM **vm))
   JNIWrapper("jni_GetJavaVM");
-#ifndef USDT2
-  DTRACE_PROBE2(hotspot_jni, GetJavaVM__entry, env, vm);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETJAVAVM_ENTRY(
-                              env, (void **) vm);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETJAVAVM_ENTRY(env, (void **) vm);
   *vm  = (JavaVM *)(&main_vm);
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, GetJavaVM__return, JNI_OK);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETJAVAVM_RETURN(
-                               JNI_OK);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETJAVAVM_RETURN(JNI_OK);
   return JNI_OK;
 JNI_END
 
@@ -5005,21 +3830,11 @@
 #define JAVASTACKSIZE (400 * 1024)    /* Default size of a thread java stack */
 enum { VERIFY_NONE, VERIFY_REMOTE, VERIFY_ALL };
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL1(hotspot_jni, GetDefaultJavaVMInitArgs__entry, void*);
-DT_RETURN_MARK_DECL(GetDefaultJavaVMInitArgs, jint);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(GetDefaultJavaVMInitArgs, jint
                     , HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
-#ifndef USDT2
-  HS_DTRACE_PROBE1(hotspot_jni, GetDefaultJavaVMInitArgs__entry, args_);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_ENTRY(
-                                             args_);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_ENTRY(args_);
   JDK1_1InitArgs *args = (JDK1_1InitArgs *)args_;
   jint ret = JNI_ERR;
   DT_RETURN_MARK(GetDefaultJavaVMInitArgs, jint, (const jint&)ret);
@@ -5064,8 +3879,12 @@
 void TestMetaspaceAux_test();
 void TestMetachunk_test();
 void TestVirtualSpaceNode_test();
+void TestNewSize_test();
+void TestKlass_test();
 #if INCLUDE_ALL_GCS
+void TestOldFreeSpaceCalculation_test();
 void TestG1BiasedArray_test();
+void TestBufferingOopClosure_test();
 #endif
 
 void execute_internal_vm_tests() {
@@ -5084,12 +3903,16 @@
     run_unit_test(QuickSort::test_quick_sort());
     run_unit_test(AltHashing::test_alt_hash());
     run_unit_test(test_loggc_filename());
+    run_unit_test(TestNewSize_test());
+    run_unit_test(TestKlass_test());
 #if INCLUDE_VM_STRUCTS
     run_unit_test(VMStructs::test());
 #endif
 #if INCLUDE_ALL_GCS
+    run_unit_test(TestOldFreeSpaceCalculation_test());
     run_unit_test(TestG1BiasedArray_test());
     run_unit_test(HeapRegionRemSet::test_prt());
+    run_unit_test(TestBufferingOopClosure_test());
 #endif
     tty->print_cr("All internal VM tests passed");
   }
@@ -5099,21 +3922,11 @@
 
 #endif
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL3(hotspot_jni, CreateJavaVM__entry, vm, penv, args);
-DT_RETURN_MARK_DECL(CreateJavaVM, jint);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(CreateJavaVM, jint
                     , HOTSPOT_JNI_CREATEJAVAVM_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM **vm, void **penv, void *args) {
-#ifndef USDT2
-  HS_DTRACE_PROBE3(hotspot_jni, CreateJavaVM__entry, vm, penv, args);
-#else /* USDT2 */
-  HOTSPOT_JNI_CREATEJAVAVM_ENTRY(
-                                 (void **) vm, penv, args);
-#endif /* USDT2 */
+  HOTSPOT_JNI_CREATEJAVAVM_ENTRY((void **) vm, penv, args);
 
   jint result = JNI_ERR;
   DT_RETURN_MARK(CreateJavaVM, jint, (const jint&)result);
@@ -5169,6 +3982,7 @@
   result = Threads::create_vm((JavaVMInitArgs*) args, &can_try_again);
   if (result == JNI_OK) {
     JavaThread *thread = JavaThread::current();
+    assert(!thread->has_pending_exception(), "should have returned not OK");
     /* thread is thread_in_vm here */
     *vm = (JavaVM *)(&main_vm);
     *(JNIEnv**)penv = thread->jni_environment();
@@ -5215,6 +4029,19 @@
     // Since this is not a JVM_ENTRY we have to set the thread state manually before leaving.
     ThreadStateTransition::transition_and_fence(thread, _thread_in_vm, _thread_in_native);
   } else {
+    // If create_vm exits because of a pending exception, exit with that
+    // exception.  In the future when we figure out how to reclaim memory,
+    // we may be able to exit with JNI_ERR and allow the calling application
+    // to continue.
+    if (Universe::is_fully_initialized()) {
+      // otherwise no pending exception possible - VM will already have aborted
+      JavaThread* THREAD = JavaThread::current();
+      if (HAS_PENDING_EXCEPTION) {
+        HandleMark hm;
+        vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
+      }
+    }
+
     if (can_try_again) {
       // reset safe_to_recreate_vm to 1 so that retrial would be possible
       safe_to_recreate_vm = 1;
@@ -5231,53 +4058,30 @@
   return result;
 }
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL3(hotspot_jni, GetCreatedJavaVMs__entry, \
-  JavaVM**, jsize, jsize*);
-HS_DTRACE_PROBE_DECL1(hotspot_jni, GetCreatedJavaVMs__return, jint);
-#endif /* !USDT2 */
 
 _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetCreatedJavaVMs(JavaVM **vm_buf, jsize bufLen, jsize *numVMs) {
   // See bug 4367188, the wrapper can sometimes cause VM crashes
   // JNIWrapper("GetCreatedJavaVMs");
-#ifndef USDT2
-  HS_DTRACE_PROBE3(hotspot_jni, GetCreatedJavaVMs__entry, \
-    vm_buf, bufLen, numVMs);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETCREATEDJAVAVMS_ENTRY(
-                                      (void **) vm_buf, bufLen, (uintptr_t *) numVMs);
-#endif /* USDT2 */
+
+  HOTSPOT_JNI_GETCREATEDJAVAVMS_ENTRY((void **) vm_buf, bufLen, (uintptr_t *) numVMs);
+
   if (vm_created) {
     if (numVMs != NULL) *numVMs = 1;
     if (bufLen > 0)     *vm_buf = (JavaVM *)(&main_vm);
   } else {
     if (numVMs != NULL) *numVMs = 0;
   }
-#ifndef USDT2
-  HS_DTRACE_PROBE1(hotspot_jni, GetCreatedJavaVMs__return, JNI_OK);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETCREATEDJAVAVMS_RETURN(
-                                    JNI_OK);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETCREATEDJAVAVMS_RETURN(JNI_OK);
   return JNI_OK;
 }
 
 extern "C" {
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(DestroyJavaVM, jint);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(DestroyJavaVM, jint
                     , HOTSPOT_JNI_DESTROYJAVAVM_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 jint JNICALL jni_DestroyJavaVM(JavaVM *vm) {
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, DestroyJavaVM__entry, vm);
-#else /* USDT2 */
-  HOTSPOT_JNI_DESTROYJAVAVM_ENTRY(
-                                  vm);
-#endif /* USDT2 */
+  HOTSPOT_JNI_DESTROYJAVAVM_ENTRY(vm);
   jint res = JNI_ERR;
   DT_RETURN_MARK(DestroyJavaVM, jint, (const jint&)res);
 
@@ -5429,64 +4233,34 @@
 
 
 jint JNICALL jni_AttachCurrentThread(JavaVM *vm, void **penv, void *_args) {
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, AttachCurrentThread__entry, vm, penv, _args);
-#else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREAD_ENTRY(
-                                        vm, penv, _args);
-#endif /* USDT2 */
+  HOTSPOT_JNI_ATTACHCURRENTTHREAD_ENTRY(vm, penv, _args);
   if (!vm_created) {
-#ifndef USDT2
-    DTRACE_PROBE1(hotspot_jni, AttachCurrentThread__return, JNI_ERR);
-#else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN(
-                                         (uint32_t) JNI_ERR);
-#endif /* USDT2 */
+  HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN((uint32_t) JNI_ERR);
     return JNI_ERR;
   }
 
   JNIWrapper("AttachCurrentThread");
   jint ret = attach_current_thread(vm, penv, _args, false);
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, AttachCurrentThread__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN(
-                                         ret);
-#endif /* USDT2 */
+  HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN(ret);
   return ret;
 }
 
 
 jint JNICALL jni_DetachCurrentThread(JavaVM *vm)  {
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__entry, vm);
-#else /* USDT2 */
-  HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY(
-                                        vm);
-#endif /* USDT2 */
+  HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY(vm);
   VM_Exit::block_if_vm_exited();
 
   JNIWrapper("DetachCurrentThread");
 
   // If the thread has been deattacted the operations is a no-op
   if (ThreadLocalStorage::thread() == NULL) {
-#ifndef USDT2
-    DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__return, JNI_OK);
-#else /* USDT2 */
-  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(
-                                         JNI_OK);
-#endif /* USDT2 */
+  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK);
     return JNI_OK;
   }
 
   JavaThread* thread = JavaThread::current();
   if (thread->has_last_Java_frame()) {
-#ifndef USDT2
-    DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__return, JNI_ERR);
-#else /* USDT2 */
-  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(
-                                         (uint32_t) JNI_ERR);
-#endif /* USDT2 */
+  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN((uint32_t) JNI_ERR);
     // Can't detach a thread that's running java, that can't work.
     return JNI_ERR;
   }
@@ -5507,29 +4281,15 @@
   thread->exit(false, JavaThread::jni_detach);
   delete thread;
 
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, DetachCurrentThread__return, JNI_OK);
-#else /* USDT2 */
-  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(
-                                         JNI_OK);
-#endif /* USDT2 */
+  HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(JNI_OK);
   return JNI_OK;
 }
 
-#ifndef USDT2
-DT_RETURN_MARK_DECL(GetEnv, jint);
-#else /* USDT2 */
 DT_RETURN_MARK_DECL(GetEnv, jint
                     , HOTSPOT_JNI_GETENV_RETURN(_ret_ref));
-#endif /* USDT2 */
 
 jint JNICALL jni_GetEnv(JavaVM *vm, void **penv, jint version) {
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, GetEnv__entry, vm, penv, version);
-#else /* USDT2 */
-  HOTSPOT_JNI_GETENV_ENTRY(
-                           vm, penv, version);
-#endif /* USDT2 */
+  HOTSPOT_JNI_GETENV_ENTRY(vm, penv, version);
   jint ret = JNI_ERR;
   DT_RETURN_MARK(GetEnv, jint, (const jint&)ret);
 
@@ -5583,30 +4343,15 @@
 
 
 jint JNICALL jni_AttachCurrentThreadAsDaemon(JavaVM *vm, void **penv, void *_args) {
-#ifndef USDT2
-  DTRACE_PROBE3(hotspot_jni, AttachCurrentThreadAsDaemon__entry, vm, penv, _args);
-#else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_ENTRY(
-                                                vm, penv, _args);
-#endif /* USDT2 */
+  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_ENTRY(vm, penv, _args);
   if (!vm_created) {
-#ifndef USDT2
-    DTRACE_PROBE1(hotspot_jni, AttachCurrentThreadAsDaemon__return, JNI_ERR);
-#else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN(
-                                                 (uint32_t) JNI_ERR);
-#endif /* USDT2 */
+  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN((uint32_t) JNI_ERR);
     return JNI_ERR;
   }
 
   JNIWrapper("AttachCurrentThreadAsDaemon");
   jint ret = attach_current_thread(vm, penv, _args, true);
-#ifndef USDT2
-  DTRACE_PROBE1(hotspot_jni, AttachCurrentThreadAsDaemon__return, ret);
-#else /* USDT2 */
-  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN(
-                                                 ret);
-#endif /* USDT2 */
+  HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN(ret);
   return ret;
 }
 
--- a/src/share/vm/prims/jniCheck.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jniCheck.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvm.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvm.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,18 +76,15 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "jvm_windows.h"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "jvm_aix.h"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "jvm_bsd.h"
 #endif
 
 #include <errno.h>
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL1(hotspot, thread__sleep__begin, long long);
-HS_DTRACE_PROBE_DECL1(hotspot, thread__sleep__end, int);
-HS_DTRACE_PROBE_DECL0(hotspot, thread__yield);
-#endif /* !USDT2 */
-
 /*
   NOTE about use of any ctor or function call that can trigger a safepoint/GC:
   such ctors and calls MUST NOT come between an oop declaration/init and its
@@ -520,6 +517,12 @@
   JavaThreadInObjectWaitState jtiows(thread, ms != 0);
   if (JvmtiExport::should_post_monitor_wait()) {
     JvmtiExport::post_monitor_wait((JavaThread *)THREAD, (oop)obj(), ms);
+
+    // The current thread already owns the monitor and it has not yet
+    // been added to the wait queue so the current thread cannot be
+    // made the successor. This means that the JVMTI_EVENT_MONITOR_WAIT
+    // event handler cannot accidentally consume an unpark() meant for
+    // the ParkEvent associated with this ObjectMonitor.
   }
   ObjectSynchronizer::wait(obj, ms, CHECK);
 JVM_END
@@ -1251,7 +1254,11 @@
   if (HAS_PENDING_EXCEPTION) {
     pending_exception = Handle(THREAD, PENDING_EXCEPTION);
     CLEAR_PENDING_EXCEPTION;
-
+    // JVMTI has already reported the pending exception
+    // JVMTI internal flag reset is needed in order to report PrivilegedActionException
+    if (THREAD->is_Java_thread()) {
+      JvmtiExport::clear_detected_exception((JavaThread*) THREAD);
+    }
     if ( pending_exception->is_a(SystemDictionary::Exception_klass()) &&
         !pending_exception->is_a(SystemDictionary::RuntimeException_klass())) {
       // Throw a java.security.PrivilegedActionException(Exception e) exception
@@ -2888,10 +2895,10 @@
     if (JvmtiExport::should_post_resource_exhausted()) {
       JvmtiExport::post_resource_exhausted(
         JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_THREADS,
-        "unable to create new native thread");
+        os::native_thread_creation_failed_msg());
     }
     THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
-              "unable to create new native thread");
+              os::native_thread_creation_failed_msg());
   }
 
   Thread::start(native_thread);
@@ -3015,11 +3022,8 @@
 JVM_ENTRY(void, JVM_Yield(JNIEnv *env, jclass threadClass))
   JVMWrapper("JVM_Yield");
   if (os::dont_yield()) return;
-#ifndef USDT2
-  HS_DTRACE_PROBE0(hotspot, thread__yield);
-#else /* USDT2 */
   HOTSPOT_THREAD_YIELD();
-#endif /* USDT2 */
+
   // When ConvertYieldToSleep is off (default), this matches the classic VM use of yield.
   // Critical for similar threading behaviour
   if (ConvertYieldToSleep) {
@@ -3045,12 +3049,7 @@
   // And set new thread state to SLEEPING.
   JavaThreadSleepState jtss(thread);
 
-#ifndef USDT2
-  HS_DTRACE_PROBE1(hotspot, thread__sleep__begin, millis);
-#else /* USDT2 */
-  HOTSPOT_THREAD_SLEEP_BEGIN(
-                             millis);
-#endif /* USDT2 */
+  HOTSPOT_THREAD_SLEEP_BEGIN(millis);
 
   EventThreadSleep event;
 
@@ -3078,12 +3077,8 @@
           event.set_time(millis);
           event.commit();
         }
-#ifndef USDT2
-        HS_DTRACE_PROBE1(hotspot, thread__sleep__end,1);
-#else /* USDT2 */
-        HOTSPOT_THREAD_SLEEP_END(
-                                 1);
-#endif /* USDT2 */
+        HOTSPOT_THREAD_SLEEP_END(1);
+
         // TODO-FIXME: THROW_MSG returns which means we will not call set_state()
         // to properly restore the thread state.  That's likely wrong.
         THROW_MSG(vmSymbols::java_lang_InterruptedException(), "sleep interrupted");
@@ -3095,12 +3090,7 @@
     event.set_time(millis);
     event.commit();
   }
-#ifndef USDT2
-  HS_DTRACE_PROBE1(hotspot, thread__sleep__end,0);
-#else /* USDT2 */
-  HOTSPOT_THREAD_SLEEP_END(
-                           0);
-#endif /* USDT2 */
+  HOTSPOT_THREAD_SLEEP_END(0);
 JVM_END
 
 JVM_ENTRY(jobject, JVM_CurrentThread(JNIEnv* env, jclass threadClass))
@@ -3984,40 +3974,6 @@
 }
 
 
-// Internal SQE debugging support ///////////////////////////////////////////////////////////
-
-#ifndef PRODUCT
-
-extern "C" {
-  JNIEXPORT jboolean JNICALL JVM_AccessVMBooleanFlag(const char* name, jboolean* value, jboolean is_get);
-  JNIEXPORT jboolean JNICALL JVM_AccessVMIntFlag(const char* name, jint* value, jboolean is_get);
-  JNIEXPORT void JNICALL JVM_VMBreakPoint(JNIEnv *env, jobject obj);
-}
-
-JVM_LEAF(jboolean, JVM_AccessVMBooleanFlag(const char* name, jboolean* value, jboolean is_get))
-  JVMWrapper("JVM_AccessBoolVMFlag");
-  return is_get ? CommandLineFlags::boolAt((char*) name, (bool*) value) : CommandLineFlags::boolAtPut((char*) name, (bool*) value, Flag::INTERNAL);
-JVM_END
-
-JVM_LEAF(jboolean, JVM_AccessVMIntFlag(const char* name, jint* value, jboolean is_get))
-  JVMWrapper("JVM_AccessVMIntFlag");
-  intx v;
-  jboolean result = is_get ? CommandLineFlags::intxAt((char*) name, &v) : CommandLineFlags::intxAtPut((char*) name, &v, Flag::INTERNAL);
-  *value = (jint)v;
-  return result;
-JVM_END
-
-
-JVM_ENTRY(void, JVM_VMBreakPoint(JNIEnv *env, jobject obj))
-  JVMWrapper("JVM_VMBreakPoint");
-  oop the_obj = JNIHandles::resolve(obj);
-  BREAKPOINT;
-JVM_END
-
-
-#endif
-
-
 // Method ///////////////////////////////////////////////////////////////////////////////////////////
 
 JVM_ENTRY(jobject, JVM_InvokeMethod(JNIEnv *env, jobject method, jobject obj, jobjectArray args0))
--- a/src/share/vm/prims/jvm.h	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvm.h	Wed Mar 12 13:30:08 2014 +0100
@@ -35,6 +35,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "jvm_windows.h"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "jvm_aix.h"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "jvm_bsd.h"
 #endif
--- a/src/share/vm/prims/jvm_misc.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvm_misc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiClassFileReconstituter.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -26,6 +26,7 @@
 #include "code/codeBlob.hpp"
 #include "code/codeCache.hpp"
 #include "code/scopeDesc.hpp"
+#include "code/vtableStubs.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiCodeBlobEvents.hpp"
@@ -63,6 +64,7 @@
   // used during a collection
   static GrowableArray<JvmtiCodeBlobDesc*>* _global_code_blobs;
   static void do_blob(CodeBlob* cb);
+  static void do_vtable_stub(VtableStub* vs);
  public:
   CodeBlobCollector() {
     _code_blobs = NULL;
@@ -119,6 +121,10 @@
   if (cb->is_nmethod()) {
     return;
   }
+  // exclude VtableStubs, which are processed separately
+  if (cb->is_buffer_blob() && strcmp(cb->name(), "vtable chunks") == 0) {
+    return;
+  }
 
   // check if this starting address has been seen already - the
   // assumption is that stubs are inserted into the list before the
@@ -136,6 +142,13 @@
   _global_code_blobs->append(scb);
 }
 
+// called for each VtableStub in VtableStubs
+
+void CodeBlobCollector::do_vtable_stub(VtableStub* vs) {
+    JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(vs->is_vtable_stub() ? "vtable stub" : "itable stub",
+                                                   vs->code_begin(), vs->code_end());
+    _global_code_blobs->append(scb);
+}
 
 // collects a list of CodeBlobs in the CodeCache.
 //
@@ -166,6 +179,10 @@
     _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end()));
   }
 
+  // Vtable stubs are not described with StubCodeDesc,
+  // process them separately
+  VtableStubs::vtable_stub_do(do_vtable_stub);
+
   // next iterate over all the non-nmethod code blobs and add them to
   // the list - as noted above this will filter out duplicates and
   // enclosing blobs.
--- a/src/share/vm/prims/jvmtiEnter.xsl	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiEnter.xsl	Wed Mar 12 13:30:08 2014 +0100
@@ -1,6 +1,6 @@
 <?xml version="1.0"?> 
 <!--
- Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiEnv.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -999,8 +999,9 @@
   GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list =
       new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true);
 
-  uint32_t debug_bits = 0;
-  if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+  // It is only safe to perform the direct operation on the current
+  // thread. All other usage needs to use a vm-safepoint-op for safety.
+  if (java_thread == calling_thread) {
     err = get_owned_monitors(calling_thread, java_thread, owned_monitors_list);
   } else {
     // JVMTI get monitors info at safepoint. Do not require target thread to
@@ -1044,8 +1045,9 @@
   GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list =
          new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true);
 
-  uint32_t debug_bits = 0;
-  if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+  // It is only safe to perform the direct operation on the current
+  // thread. All other usage needs to use a vm-safepoint-op for safety.
+  if (java_thread == calling_thread) {
     err = get_owned_monitors(calling_thread, java_thread, owned_monitors_list);
   } else {
     // JVMTI get owned monitors info at safepoint. Do not require target thread to
@@ -1086,9 +1088,11 @@
 jvmtiError
 JvmtiEnv::GetCurrentContendedMonitor(JavaThread* java_thread, jobject* monitor_ptr) {
   jvmtiError err = JVMTI_ERROR_NONE;
-  uint32_t debug_bits = 0;
   JavaThread* calling_thread  = JavaThread::current();
-  if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+
+  // It is only safe to perform the direct operation on the current
+  // thread. All other usage needs to use a vm-safepoint-op for safety.
+  if (java_thread == calling_thread) {
     err = get_current_contended_monitor(calling_thread, java_thread, monitor_ptr);
   } else {
     // get contended monitor information at safepoint.
@@ -1297,8 +1301,10 @@
 jvmtiError
 JvmtiEnv::GetStackTrace(JavaThread* java_thread, jint start_depth, jint max_frame_count, jvmtiFrameInfo* frame_buffer, jint* count_ptr) {
   jvmtiError err = JVMTI_ERROR_NONE;
-  uint32_t debug_bits = 0;
-  if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+
+  // It is only safe to perform the direct operation on the current
+  // thread. All other usage needs to use a vm-safepoint-op for safety.
+  if (java_thread == JavaThread::current()) {
     err = get_stack_trace(java_thread, start_depth, max_frame_count, frame_buffer, count_ptr);
   } else {
     // JVMTI get stack trace at safepoint. Do not require target thread to
@@ -1360,8 +1366,10 @@
   if (state == NULL) {
     return JVMTI_ERROR_THREAD_NOT_ALIVE;
   }
-  uint32_t debug_bits = 0;
-  if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+
+  // It is only safe to perform the direct operation on the current
+  // thread. All other usage needs to use a vm-safepoint-op for safety.
+  if (java_thread == JavaThread::current()) {
     err = get_frame_count(state, count_ptr);
   } else {
     // get java stack frame count at safepoint.
@@ -1476,9 +1484,10 @@
 jvmtiError
 JvmtiEnv::GetFrameLocation(JavaThread* java_thread, jint depth, jmethodID* method_ptr, jlocation* location_ptr) {
   jvmtiError err = JVMTI_ERROR_NONE;
-  uint32_t debug_bits = 0;
-
-  if (is_thread_fully_suspended(java_thread, true, &debug_bits)) {
+
+  // It is only safe to perform the direct operation on the current
+  // thread. All other usage needs to use a vm-safepoint-op for safety.
+  if (java_thread == JavaThread::current()) {
     err = get_frame_location(java_thread, depth, method_ptr, location_ptr);
   } else {
     // JVMTI get java stack frame location at safepoint.
--- a/src/share/vm/prims/jvmtiEnvBase.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiEnvBase.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -356,8 +356,12 @@
   }
   VMOp_Type type() const { return VMOp_GetOwnedMonitorInfo; }
   void doit() {
-    ((JvmtiEnvBase *)_env)->get_owned_monitors(_calling_thread, _java_thread,
-                                                         _owned_monitors_list);
+    _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+    if (Threads::includes(_java_thread) && !_java_thread->is_exiting()
+                                        && _java_thread->threadObj() != NULL) {
+      _result = ((JvmtiEnvBase *)_env)->get_owned_monitors(_calling_thread, _java_thread,
+                                                            _owned_monitors_list);
+    }
   }
   jvmtiError result() { return _result; }
 };
@@ -439,9 +443,13 @@
   jvmtiError result() { return _result; }
   VMOp_Type type() const { return VMOp_GetStackTrace; }
   void doit() {
-    _result = ((JvmtiEnvBase *)_env)->get_stack_trace(_java_thread,
-                                                      _start_depth, _max_count,
-                                                      _frame_buffer, _count_ptr);
+    _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+    if (Threads::includes(_java_thread) && !_java_thread->is_exiting()
+                                        && _java_thread->threadObj() != NULL) {
+      _result = ((JvmtiEnvBase *)_env)->get_stack_trace(_java_thread,
+                                                        _start_depth, _max_count,
+                                                        _frame_buffer, _count_ptr);
+    }
   }
 };
 
@@ -533,7 +541,11 @@
   VMOp_Type type() const { return VMOp_GetFrameCount; }
   jvmtiError result()    { return _result; }
   void doit() {
-    _result = ((JvmtiEnvBase*)_env)->get_frame_count(_state, _count_ptr);
+    _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+    JavaThread* jt = _state->get_thread();
+    if (Threads::includes(jt) && !jt->is_exiting() && jt->threadObj() != NULL) {
+      _result = ((JvmtiEnvBase*)_env)->get_frame_count(_state, _count_ptr);
+    }
   }
 };
 
@@ -559,8 +571,12 @@
   VMOp_Type type() const { return VMOp_GetFrameLocation; }
   jvmtiError result()    { return _result; }
   void doit() {
-    _result = ((JvmtiEnvBase*)_env)->get_frame_location(_java_thread, _depth,
-                                                        _method_ptr, _location_ptr);
+    _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+    if (Threads::includes(_java_thread) && !_java_thread->is_exiting() &&
+        _java_thread->threadObj() != NULL) {
+      _result = ((JvmtiEnvBase*)_env)->get_frame_location(_java_thread, _depth,
+                                                          _method_ptr, _location_ptr);
+    }
   }
 };
 
--- a/src/share/vm/prims/jvmtiEnvThreadState.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiEnvThreadState.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -272,7 +272,7 @@
     // There can be a race condition between a VM_Operation reaching a safepoint
     // and the target thread exiting from Java execution.
     // We must recheck the last Java frame still exists.
-    if (_thread->has_last_Java_frame()) {
+    if (!_thread->is_exiting() && _thread->has_last_Java_frame()) {
       javaVFrame* vf = _thread->last_java_vframe(&rm);
       assert(vf != NULL, "must have last java frame");
       Method* method = vf->method();
--- a/src/share/vm/prims/jvmtiEventController.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiEventController.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiExport.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiExport.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -2161,6 +2161,15 @@
   }
 }
 
+void JvmtiExport::clear_detected_exception(JavaThread* thread) {
+  assert(JavaThread::current() == thread, "thread is not current");
+
+  JvmtiThreadState* state = thread->jvmti_thread_state();
+  if (state != NULL) {
+    state->clear_exception_detected();
+  }
+}
+
 void JvmtiExport::oops_do(OopClosure* f) {
   JvmtiCurrentBreakpoints::oops_do(f);
   JvmtiVMObjectAllocEventCollector::oops_do_for_all_threads(f);
--- a/src/share/vm/prims/jvmtiExport.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiExport.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -363,6 +363,7 @@
   }
 
   static void cleanup_thread             (JavaThread* thread) NOT_JVMTI_RETURN;
+  static void clear_detected_exception   (JavaThread* thread) NOT_JVMTI_RETURN;
 
   static void oops_do(OopClosure* f) NOT_JVMTI_RETURN;
   static void weak_oops_do(BoolObjectClosure* b, OopClosure* f) NOT_JVMTI_RETURN;
--- a/src/share/vm/prims/jvmtiGetLoadedClasses.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiGetLoadedClasses.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/jvmtiManageCapabilities.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiManageCapabilities.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -117,10 +117,10 @@
   jvmtiCapabilities jc;
 
   memset(&jc, 0, sizeof(jc));
-#ifndef CC_INTERP
+#ifndef ZERO
   jc.can_pop_frame = 1;
   jc.can_force_early_return = 1;
-#endif // !CC_INTERP
+#endif // !ZERO
   jc.can_get_source_debug_extension = 1;
   jc.can_access_local_variables = 1;
   jc.can_maintain_original_method_order = 1;
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -2790,6 +2790,7 @@
   return true;
 }
 
+#ifdef ASSERT
 // verify that a static oop field is in range
 static inline bool verify_static_oop(InstanceKlass* ik,
                                      oop mirror, int offset) {
@@ -2804,6 +2805,7 @@
     return false;
   }
 }
+#endif // #ifdef ASSERT
 
 // a class references its super class, interfaces, class loader, ...
 // and finally its static fields
@@ -3079,6 +3081,23 @@
               }
             }
           }
+
+          StackValueCollection* exprs = jvf->expressions();
+          for (int index=0; index < exprs->size(); index++) {
+            if (exprs->at(index)->type() == T_OBJECT) {
+              oop o = exprs->obj_at(index)();
+              if (o == NULL) {
+                continue;
+              }
+
+              // stack reference
+              if (!CallbackInvoker::report_stack_ref_root(thread_tag, tid, depth, method,
+                                                   bci, locals->size() + index, o)) {
+                return false;
+              }
+            }
+          }
+
         } else {
           blk->set_context(thread_tag, tid, depth, method);
           if (is_top_frame) {
--- a/src/share/vm/prims/jvmtiTrace.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/jvmtiTrace.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/methodHandles.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/methodHandles.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -175,8 +175,8 @@
 
 oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
   assert(info.resolved_appendix().is_null(), "only normal methods here");
-  KlassHandle receiver_limit = info.resolved_klass();
   methodHandle m = info.resolved_method();
+  KlassHandle m_klass = m->method_holder();
   int flags = (jushort)( m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS );
   int vmindex = Method::invalid_vtable_index;
 
@@ -184,14 +184,13 @@
   case CallInfo::itable_call:
     vmindex = info.itable_index();
     // More importantly, the itable index only works with the method holder.
-    receiver_limit = m->method_holder();
-    assert(receiver_limit->verify_itable_index(vmindex), "");
+    assert(m_klass->verify_itable_index(vmindex), "");
     flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT);
     if (TraceInvokeDynamic) {
       ResourceMark rm;
-      tty->print_cr("memberName: invokeinterface method_holder::method: %s, receiver: %s, itableindex: %d, access_flags:",
-            Method::name_and_sig_as_C_string(receiver_limit(), m->name(), m->signature()),
-            receiver_limit()->internal_name(), vmindex);
+      tty->print_cr("memberName: invokeinterface method_holder::method: %s, itableindex: %d, access_flags:",
+            Method::name_and_sig_as_C_string(m->method_holder(), m->name(), m->signature()),
+            vmindex);
        m->access_flags().print_on(tty);
        if (!m->is_abstract()) {
          tty->print("default");
@@ -203,12 +202,35 @@
   case CallInfo::vtable_call:
     vmindex = info.vtable_index();
     flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
-    assert(receiver_limit->is_subtype_of(m->method_holder()), "virtual call must be type-safe");
+    assert(info.resolved_klass()->is_subtype_of(m_klass()), "virtual call must be type-safe");
+    if (m_klass->is_interface()) {
+      // This is a vtable call to an interface method (abstract "miranda method" or default method).
+      // The vtable index is meaningless without a class (not interface) receiver type, so get one.
+      // (LinkResolver should help us figure this out.)
+      KlassHandle m_klass_non_interface = info.resolved_klass();
+      if (m_klass_non_interface->is_interface()) {
+        m_klass_non_interface = SystemDictionary::Object_klass();
+#ifdef ASSERT
+        { ResourceMark rm;
+          Method* m2 = m_klass_non_interface->vtable()->method_at(vmindex);
+          assert(m->name() == m2->name() && m->signature() == m2->signature(),
+                 err_msg("at %d, %s != %s", vmindex,
+                         m->name_and_sig_as_C_string(), m2->name_and_sig_as_C_string()));
+        }
+#endif //ASSERT
+      }
+      if (!m->is_public()) {
+        assert(m->is_public(), "virtual call must be to public interface method");
+        return NULL;  // elicit an error later in product build
+      }
+      assert(info.resolved_klass()->is_subtype_of(m_klass_non_interface()), "virtual call must be type-safe");
+      m_klass = m_klass_non_interface;
+    }
     if (TraceInvokeDynamic) {
       ResourceMark rm;
       tty->print_cr("memberName: invokevirtual method_holder::method: %s, receiver: %s, vtableindex: %d, access_flags:",
-            Method::name_and_sig_as_C_string(receiver_limit(), m->name(), m->signature()),
-            receiver_limit()->internal_name(), vmindex);
+            Method::name_and_sig_as_C_string(m->method_holder(), m->name(), m->signature()),
+            m_klass->internal_name(), vmindex);
        m->access_flags().print_on(tty);
        if (m->is_default_method()) {
          tty->print("default");
@@ -223,10 +245,8 @@
       flags |= IS_METHOD      | (JVM_REF_invokeStatic  << REFERENCE_KIND_SHIFT);
     } else if (m->is_initializer()) {
       flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
-      assert(receiver_limit == m->method_holder(), "constructor call must be exactly typed");
     } else {
       flags |= IS_METHOD      | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
-      assert(receiver_limit->is_subtype_of(m->method_holder()), "special call must be type-safe");
     }
     break;
 
@@ -242,7 +262,7 @@
   java_lang_invoke_MemberName::set_flags(   mname_oop, flags);
   java_lang_invoke_MemberName::set_vmtarget(mname_oop, m());
   java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex);   // vtable/itable index
-  java_lang_invoke_MemberName::set_clazz(   mname_oop, receiver_limit->java_mirror());
+  java_lang_invoke_MemberName::set_clazz(   mname_oop, m_klass->java_mirror());
   // Note:  name and type can be lazily computed by resolve_MemberName,
   // if Java code needs them as resolved String and MethodType objects.
   // The clazz must be eagerly stored, because it provides a GC
@@ -569,7 +589,7 @@
 // An unresolved member name is a mere symbolic reference.
 // Resolving it plants a vmtarget/vmindex in it,
 // which refers directly to JVM internals.
-Handle MethodHandles::resolve_MemberName(Handle mname, TRAPS) {
+Handle MethodHandles::resolve_MemberName(Handle mname, KlassHandle caller, TRAPS) {
   Handle empty;
   assert(java_lang_invoke_MemberName::is_instance(mname()), "");
 
@@ -646,20 +666,20 @@
         assert(!HAS_PENDING_EXCEPTION, "");
         if (ref_kind == JVM_REF_invokeStatic) {
           LinkResolver::resolve_static_call(result,
-                        defc, name, type, KlassHandle(), false, false, THREAD);
+                        defc, name, type, caller, caller.not_null(), false, THREAD);
         } else if (ref_kind == JVM_REF_invokeInterface) {
           LinkResolver::resolve_interface_call(result, Handle(), defc,
-                        defc, name, type, KlassHandle(), false, false, THREAD);
+                        defc, name, type, caller, caller.not_null(), false, THREAD);
         } else if (mh_invoke_id != vmIntrinsics::_none) {
           assert(!is_signature_polymorphic_static(mh_invoke_id), "");
           LinkResolver::resolve_handle_call(result,
-                        defc, name, type, KlassHandle(), THREAD);
+                        defc, name, type, caller, THREAD);
         } else if (ref_kind == JVM_REF_invokeSpecial) {
           LinkResolver::resolve_special_call(result,
-                        defc, name, type, KlassHandle(), false, THREAD);
+                        defc, name, type, caller, caller.not_null(), THREAD);
         } else if (ref_kind == JVM_REF_invokeVirtual) {
           LinkResolver::resolve_virtual_call(result, Handle(), defc,
-                        defc, name, type, KlassHandle(), false, false, THREAD);
+                        defc, name, type, caller, caller.not_null(), false, THREAD);
         } else {
           assert(false, err_msg("ref_kind=%d", ref_kind));
         }
@@ -683,7 +703,7 @@
         assert(!HAS_PENDING_EXCEPTION, "");
         if (name == vmSymbols::object_initializer_name()) {
           LinkResolver::resolve_special_call(result,
-                        defc, name, type, KlassHandle(), false, THREAD);
+                        defc, name, type, caller, caller.not_null(), THREAD);
         } else {
           break;                // will throw after end of switch
         }
@@ -700,7 +720,7 @@
       fieldDescriptor result; // find_field initializes fd if found
       {
         assert(!HAS_PENDING_EXCEPTION, "");
-        LinkResolver::resolve_field(result, defc, name, type, KlassHandle(), Bytecodes::_nop, false, false, THREAD);
+        LinkResolver::resolve_field(result, defc, name, type, caller, Bytecodes::_nop, false, false, THREAD);
         if (HAS_PENDING_EXCEPTION) {
           return empty;
         }
@@ -1121,7 +1141,11 @@
     }
   }
 
-  Handle resolved = MethodHandles::resolve_MemberName(mname, CHECK_NULL);
+  KlassHandle caller(THREAD,
+                     caller_jh == NULL ? (Klass*) NULL :
+                     java_lang_Class::as_Klass(JNIHandles::resolve_non_null(caller_jh)));
+  Handle resolved = MethodHandles::resolve_MemberName(mname, caller, CHECK_NULL);
+
   if (resolved.is_null()) {
     int flags = java_lang_invoke_MemberName::flags(mname());
     int ref_kind = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK;
--- a/src/share/vm/prims/methodHandles.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/methodHandles.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -55,7 +55,7 @@
 
  public:
   // working with member names
-  static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type
+  static Handle resolve_MemberName(Handle mname, KlassHandle caller, TRAPS); // compute vmtarget/vmindex from name/type
   static void expand_MemberName(Handle mname, int suppress, TRAPS);  // expand defc/name/type if missing
   static Handle new_MemberName(TRAPS);  // must be followed by init_MemberName
   static oop init_MemberName(Handle mname_h, Handle target_h); // compute vmtarget/vmindex from target
--- a/src/share/vm/prims/nativeLookup.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/nativeLookup.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
--- a/src/share/vm/prims/perf.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/perf.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/unsafe.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/unsafe.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -44,11 +44,6 @@
  *      Implementation of class sun.misc.Unsafe
  */
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL3(hotspot, thread__park__begin, uintptr_t, int, long long);
-HS_DTRACE_PROBE_DECL1(hotspot, thread__park__end, uintptr_t);
-HS_DTRACE_PROBE_DECL1(hotspot, thread__unpark, uintptr_t);
-#endif /* !USDT2 */
 
 #define MAX_OBJECT_SIZE \
   ( arrayOopDesc::header_size(T_DOUBLE) * HeapWordSize \
@@ -162,6 +157,9 @@
 
 #define GET_FIELD_VOLATILE(obj, offset, type_name, v) \
   oop p = JNIHandles::resolve(obj); \
+  if (support_IRIW_for_not_multiple_copy_atomic_cpu) { \
+    OrderAccess::fence(); \
+  } \
   volatile type_name v = OrderAccess::load_acquire((volatile type_name*)index_oop_from_field_offset_long(p, offset));
 
 #define SET_FIELD_VOLATILE(obj, offset, type_name, x) \
@@ -858,6 +856,11 @@
   strcpy(buf, "java/lang/");
   strcat(buf, ename);
   jclass cls = env->FindClass(buf);
+  if (env->ExceptionCheck()) {
+    env->ExceptionClear();
+    tty->print_cr("Unsafe: cannot throw %s because FindClass has failed", buf);
+    return;
+  }
   char* msg = NULL;
   env->ThrowNew(cls, msg);
 }
@@ -1206,20 +1209,12 @@
 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time))
   UnsafeWrapper("Unsafe_Park");
   EventThreadPark event;
-#ifndef USDT2
-  HS_DTRACE_PROBE3(hotspot, thread__park__begin, thread->parker(), (int) isAbsolute, time);
-#else /* USDT2 */
-   HOTSPOT_THREAD_PARK_BEGIN(
-                             (uintptr_t) thread->parker(), (int) isAbsolute, time);
-#endif /* USDT2 */
+  HOTSPOT_THREAD_PARK_BEGIN((uintptr_t) thread->parker(), (int) isAbsolute, time);
+
   JavaThreadParkedState jtps(thread, time != 0);
   thread->parker()->park(isAbsolute != 0, time);
-#ifndef USDT2
-  HS_DTRACE_PROBE1(hotspot, thread__park__end, thread->parker());
-#else /* USDT2 */
-  HOTSPOT_THREAD_PARK_END(
-                          (uintptr_t) thread->parker());
-#endif /* USDT2 */
+
+  HOTSPOT_THREAD_PARK_END((uintptr_t) thread->parker());
   if (event.should_commit()) {
     oop obj = thread->current_park_blocker();
     event.set_klass((obj != NULL) ? obj->klass() : NULL);
@@ -1258,12 +1253,7 @@
     }
   }
   if (p != NULL) {
-#ifndef USDT2
-    HS_DTRACE_PROBE1(hotspot, thread__unpark, p);
-#else /* USDT2 */
-    HOTSPOT_THREAD_UNPARK(
-                          (uintptr_t) p);
-#endif /* USDT2 */
+    HOTSPOT_THREAD_UNPARK((uintptr_t) p);
     p->unpark();
   }
 UNSAFE_END
--- a/src/share/vm/prims/wbtestmethods/parserTests.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/wbtestmethods/parserTests.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/prims/whitebox.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/whitebox.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -105,7 +105,7 @@
 WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
   CollectorPolicy * p = Universe::heap()->collector_policy();
   gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
-    SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Min alignment "SIZE_FORMAT" Max alignment "SIZE_FORMAT,
+    SIZE_FORMAT" Maximum heap "SIZE_FORMAT" Space alignment "SIZE_FORMAT" Heap alignment "SIZE_FORMAT,
     p->min_heap_byte_size(), p->initial_heap_byte_size(), p->max_heap_byte_size(),
     p->space_alignment(), p->heap_alignment());
 }
@@ -316,9 +316,10 @@
 
 WB_ENTRY(jint, WB_DeoptimizeMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  int result = 0;
+  CHECK_JNI_EXCEPTION_(env, result);
   MutexLockerEx mu(Compile_lock);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
-  int result = 0;
   nmethod* code;
   if (is_osr) {
     int bci = InvocationEntryBci;
@@ -344,6 +345,7 @@
 
 WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
   MutexLockerEx mu(Compile_lock);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
   nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
@@ -355,6 +357,7 @@
 
 WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level, jboolean is_osr))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
   MutexLockerEx mu(Compile_lock);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
   if (is_osr) {
@@ -366,6 +369,7 @@
 
 WB_ENTRY(jboolean, WB_IsMethodQueuedForCompilation(JNIEnv* env, jobject o, jobject method))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
   MutexLockerEx mu(Compile_lock);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
   return mh->queued_for_compilation();
@@ -373,6 +377,7 @@
 
 WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION_(env, CompLevel_none);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
   nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
   return (code != NULL ? code->comp_level() : CompLevel_none);
@@ -380,6 +385,7 @@
 
 WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level, jboolean is_osr))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION(env);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
   if (is_osr) {
     mh->set_not_osr_compilable(comp_level, true /* report */, "WhiteBox");
@@ -390,6 +396,7 @@
 
 WB_ENTRY(jint, WB_GetMethodEntryBci(JNIEnv* env, jobject o, jobject method))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION_(env, InvocationEntryBci);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
   nmethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false);
   return (code != NULL && code->is_osr_method() ? code->osr_entry_bci() : InvocationEntryBci);
@@ -397,6 +404,7 @@
 
 WB_ENTRY(jboolean, WB_TestSetDontInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
   bool result = mh->dont_inline();
   mh->set_dont_inline(value == JNI_TRUE);
@@ -414,6 +422,7 @@
 
 WB_ENTRY(jboolean, WB_TestSetForceInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
   bool result = mh->force_inline();
   mh->set_force_inline(value == JNI_TRUE);
@@ -422,6 +431,7 @@
 
 WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobject method, jint comp_level, jint bci))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
   nmethod* nm = CompileBroker::compile_method(mh, bci, comp_level, mh, mh->invocation_count(), "WhiteBox", THREAD);
   MutexLockerEx mu(Compile_lock);
@@ -430,6 +440,7 @@
 
 WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
   jmethodID jmid = reflected_method_to_jmid(thread, env, method);
+  CHECK_JNI_EXCEPTION(env);
   methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
   MutexLockerEx mu(Compile_lock);
   MethodData* mdo = mh->method_data();
@@ -616,14 +627,18 @@
         bool result = true;
         //  one by one registration natives for exception catching
         jclass exceptionKlass = env->FindClass(vmSymbols::java_lang_NoSuchMethodError()->as_C_string());
+        CHECK_JNI_EXCEPTION(env);
         for (int i = 0, n = sizeof(methods) / sizeof(methods[0]); i < n; ++i) {
           if (env->RegisterNatives(wbclass, methods + i, 1) != 0) {
             result = false;
-            if (env->ExceptionCheck() && env->IsInstanceOf(env->ExceptionOccurred(), exceptionKlass)) {
-              // j.l.NoSuchMethodError is thrown when a method can't be found or a method is not native
-              // ignoring the exception
-              tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", methods[i].name, methods[i].signature);
+            jthrowable throwable_obj = env->ExceptionOccurred();
+            if (throwable_obj != NULL) {
               env->ExceptionClear();
+              if (env->IsInstanceOf(throwable_obj, exceptionKlass)) {
+                // j.l.NoSuchMethodError is thrown when a method can't be found or a method is not native
+                // ignoring the exception
+                tty->print_cr("Warning: 'NoSuchMethodError' on register of sun.hotspot.WhiteBox::%s%s", methods[i].name, methods[i].signature);
+              }
             } else {
               // register is failed w/o exception or w/ unexpected exception
               tty->print_cr("Warning: unexpected error on register of sun.hotspot.WhiteBox::%s%s. All methods will be unregistered", methods[i].name, methods[i].signature);
--- a/src/share/vm/prims/whitebox.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/prims/whitebox.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,24 @@
 #define WB_END JNI_END
 #define WB_METHOD_DECLARE(result_type) extern "C" result_type JNICALL
 
+#define CHECK_JNI_EXCEPTION_(env, value)                               \
+  do {                                                                 \
+    JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
+    if (HAS_PENDING_EXCEPTION) {                                       \
+      CLEAR_PENDING_EXCEPTION;                                         \
+      return(value);                                                   \
+    }                                                                  \
+  } while (0)
+
+#define CHECK_JNI_EXCEPTION(env)                                       \
+  do {                                                                 \
+    JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
+    if (HAS_PENDING_EXCEPTION) {                                       \
+      CLEAR_PENDING_EXCEPTION;                                         \
+      return;                                                          \
+    }                                                                  \
+  } while (0)
+
 class WhiteBox : public AllStatic {
  private:
   static bool _used;
--- a/src/share/vm/runtime/advancedThresholdPolicy.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -306,7 +306,7 @@
  *    profiling can start at level 0 and finish at level 3.
  *
  * b. 0 -> 2 -> 3 -> 4.
- *    This case occures when the load on C2 is deemed too high. So, instead of transitioning
+ *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
  *
--- a/src/share/vm/runtime/advancedThresholdPolicy.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/advancedThresholdPolicy.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/arguments.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
@@ -106,7 +109,7 @@
 const char*  Arguments::_java_vendor_url_bug    = DEFAULT_VENDOR_URL_BUG;
 const char*  Arguments::_sun_java_launcher      = DEFAULT_JAVA_LAUNCHER;
 int    Arguments::_sun_java_launcher_pid        = -1;
-bool   Arguments::_created_by_gamma_launcher    = false;
+bool   Arguments::_sun_java_launcher_is_altjvm  = false;
 
 // These parameters are reset in method parse_vm_init_args(JavaVMInitArgs*)
 bool   Arguments::_AlwaysCompileLoopMethods     = AlwaysCompileLoopMethods;
@@ -156,7 +159,8 @@
 
 // Process java launcher properties.
 void Arguments::process_sun_java_launcher_properties(JavaVMInitArgs* args) {
-  // See if sun.java.launcher or sun.java.launcher.pid is defined.
+  // See if sun.java.launcher, sun.java.launcher.is_altjvm or
+  // sun.java.launcher.pid is defined.
   // Must do this before setting up other system properties,
   // as some of them may depend on launcher type.
   for (int index = 0; index < args->nOptions; index++) {
@@ -167,6 +171,12 @@
       process_java_launcher_argument(tail, option->extraInfo);
       continue;
     }
+    if (match_option(option, "-Dsun.java.launcher.is_altjvm=", &tail)) {
+      if (strcmp(tail, "true") == 0) {
+        _sun_java_launcher_is_altjvm = true;
+      }
+      continue;
+    }
     if (match_option(option, "-Dsun.java.launcher.pid=", &tail)) {
       _sun_java_launcher_pid = atoi(tail);
       continue;
@@ -183,7 +193,7 @@
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.name", VM_Version::vm_name(),  false));
   PropertyList_add(&_system_properties, new SystemProperty("java.vm.info", VM_Version::vm_info_string(),  true));
 
-  // following are JVMTI agent writeable properties.
+  // Following are JVMTI agent writable properties.
   // Properties values are set to NULL and they are
   // os specific they are initialized in os::init_system_properties_values().
   _java_ext_dirs = new SystemProperty("java.ext.dirs", NULL,  true);
@@ -295,6 +305,7 @@
   { "UsePermISM",                    JDK_Version::jdk(8), JDK_Version::jdk(9) },
   { "UseMPSS",                       JDK_Version::jdk(8), JDK_Version::jdk(9) },
   { "UseStringCache",                JDK_Version::jdk(8), JDK_Version::jdk(9) },
+  { "UseOldInlining",                JDK_Version::jdk(9), JDK_Version::jdk(10) },
 #ifdef PRODUCT
   { "DesiredMethodLimit",
                            JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
@@ -888,7 +899,7 @@
     arg_len = equal_sign - argname;
   }
 
-  Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true);
+  Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true, true);
   if (found_flag != NULL) {
     char locked_message_buf[BUFLEN];
     found_flag->get_locked_message(locked_message_buf, BUFLEN);
@@ -1023,9 +1034,10 @@
     _java_command = value;
 
     // Record value in Arguments, but let it get passed to Java.
-  } else if (strcmp(key, "sun.java.launcher.pid") == 0) {
-    // launcher.pid property is private and is processed
-    // in process_sun_java_launcher_properties();
+  } else if (strcmp(key, "sun.java.launcher.is_altjvm") == 0 ||
+             strcmp(key, "sun.java.launcher.pid") == 0) {
+    // sun.java.launcher.is_altjvm and sun.java.launcher.pid property are
+    // private and are processed in process_sun_java_launcher_properties();
     // the sun.java.launcher property is passed on to the java application
     FreeHeap(key);
     if (eq != NULL) {
@@ -1316,7 +1328,7 @@
   if (!FLAG_IS_DEFAULT(OldPLABSize)) {
     if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
       // OldPLABSize is not the default value but CMSParPromoteBlocksToClaim
-      // is.  In this situtation let CMSParPromoteBlocksToClaim follow
+      // is.  In this situation let CMSParPromoteBlocksToClaim follow
       // the value (either from the command line or ergonomics) of
       // OldPLABSize.  Following OldPLABSize is an ergonomics decision.
       FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
@@ -1579,6 +1591,16 @@
     vm_exit(1);
   }
 
+  if (UseAdaptiveSizePolicy) {
+    // We don't want to limit adaptive heap sizing's freedom to adjust the heap
+    // unless the user actually sets these flags.
+    if (FLAG_IS_DEFAULT(MinHeapFreeRatio)) {
+      FLAG_SET_DEFAULT(MinHeapFreeRatio, 0);
+    }
+    if (FLAG_IS_DEFAULT(MaxHeapFreeRatio)) {
+      FLAG_SET_DEFAULT(MaxHeapFreeRatio, 100);
+    }
+  }
 
   // If InitialSurvivorRatio or MinSurvivorRatio were not specified, but the
   // SurvivorRatio has been set, reset their default values to SurvivorRatio +
@@ -1649,6 +1671,9 @@
   return result;
 }
 
+// Use static initialization to get the default before parsing
+static const uintx DefaultHeapBaseMinAddress = HeapBaseMinAddress;
+
 void Arguments::set_heap_size() {
   if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
     // Deprecated flag
@@ -1680,6 +1705,23 @@
     if (UseCompressedOops) {
       // Limit the heap size to the maximum possible when using compressed oops
       julong max_coop_heap = (julong)max_heap_for_compressed_oops();
+
+      // HeapBaseMinAddress can be greater than default but not less than.
+      if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
+        if (HeapBaseMinAddress < DefaultHeapBaseMinAddress) {
+          if (PrintMiscellaneous && Verbose) {  // matches compressed oops printing flags
+            jio_fprintf(defaultStream::error_stream(),
+                        "HeapBaseMinAddress must be at least " UINTX_FORMAT
+                        " (" UINTX_FORMAT "G) which is greater than value given "
+                        UINTX_FORMAT "\n",
+                        DefaultHeapBaseMinAddress,
+                        DefaultHeapBaseMinAddress/G,
+                        HeapBaseMinAddress);
+          }
+          FLAG_SET_ERGO(uintx, HeapBaseMinAddress, DefaultHeapBaseMinAddress);
+        }
+      }
+
       if (HeapBaseMinAddress + MaxHeapSize < max_coop_heap) {
         // Heap should be above HeapBaseMinAddress to get zero based compressed oops
         // but it should be not less than default MaxHeapSize.
@@ -1810,9 +1852,6 @@
 
 void Arguments::process_java_launcher_argument(const char* launcher, void* extra_info) {
   _sun_java_launcher = strdup(launcher);
-  if (strcmp("gamma", _sun_java_launcher) == 0) {
-    _created_by_gamma_launcher = true;
-  }
 }
 
 bool Arguments::created_by_java_launcher() {
@@ -1820,8 +1859,8 @@
   return strcmp(DEFAULT_JAVA_LAUNCHER, _sun_java_launcher) != 0;
 }
 
-bool Arguments::created_by_gamma_launcher() {
-  return _created_by_gamma_launcher;
+bool Arguments::sun_java_launcher_is_altjvm() {
+  return _sun_java_launcher_is_altjvm;
 }
 
 //===========================================================================================================
@@ -1854,7 +1893,7 @@
 }
 
 bool Arguments::verify_percentage(uintx value, const char* name) {
-  if (value <= 100) {
+  if (is_percentage(value)) {
     return true;
   }
   jio_fprintf(defaultStream::error_stream(),
@@ -1942,6 +1981,34 @@
   return count_p < 2 && count_t < 2;
 }
 
+bool Arguments::verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_heap_free_ratio) {
+  if (!is_percentage(min_heap_free_ratio)) {
+    err_msg.print("MinHeapFreeRatio must have a value between 0 and 100");
+    return false;
+  }
+  if (min_heap_free_ratio > MaxHeapFreeRatio) {
+    err_msg.print("MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or "
+                  "equal to MaxHeapFreeRatio (" UINTX_FORMAT ")", min_heap_free_ratio,
+                  MaxHeapFreeRatio);
+    return false;
+  }
+  return true;
+}
+
+bool Arguments::verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio) {
+  if (!is_percentage(max_heap_free_ratio)) {
+    err_msg.print("MaxHeapFreeRatio must have a value between 0 and 100");
+    return false;
+  }
+  if (max_heap_free_ratio < MinHeapFreeRatio) {
+    err_msg.print("MaxHeapFreeRatio (" UINTX_FORMAT ") must be greater than or "
+                  "equal to MinHeapFreeRatio (" UINTX_FORMAT ")", max_heap_free_ratio,
+                  MinHeapFreeRatio);
+    return false;
+  }
+  return true;
+}
+
 // Check consistency of GC selection
 bool Arguments::check_gc_consistency() {
   check_gclog_consistency();
@@ -2047,8 +2114,6 @@
   status = status && verify_interval(AdaptiveSizePolicyWeight, 0, 100,
                               "AdaptiveSizePolicyWeight");
   status = status && verify_percentage(ThresholdTolerance, "ThresholdTolerance");
-  status = status && verify_percentage(MinHeapFreeRatio, "MinHeapFreeRatio");
-  status = status && verify_percentage(MaxHeapFreeRatio, "MaxHeapFreeRatio");
 
   // Divide by bucket size to prevent a large size from causing rollover when
   // calculating amount of memory needed to be allocated for the String table.
@@ -2058,15 +2123,19 @@
   status = status && verify_interval(SymbolTableSize, minimumSymbolTableSize,
     (max_uintx / SymbolTable::bucket_size()), "SymbolTable size");
 
-  if (MinHeapFreeRatio > MaxHeapFreeRatio) {
-    jio_fprintf(defaultStream::error_stream(),
-                "MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or "
-                "equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
-                MinHeapFreeRatio, MaxHeapFreeRatio);
-    status = false;
+  {
+    // Using "else if" below to avoid printing two error messages if min > max.
+    // This will also prevent us from reporting both min>100 and max>100 at the
+    // same time, but that is less annoying than printing two identical errors IMHO.
+    FormatBuffer<80> err_msg("");
+    if (!verify_MinHeapFreeRatio(err_msg, MinHeapFreeRatio)) {
+      jio_fprintf(defaultStream::error_stream(), "%s\n", err_msg.buffer());
+      status = false;
+    } else if (!verify_MaxHeapFreeRatio(err_msg, MaxHeapFreeRatio)) {
+      jio_fprintf(defaultStream::error_stream(), "%s\n", err_msg.buffer());
+      status = false;
+    }
   }
-  // Keeping the heap 100% free is hard ;-) so limit it to 99%.
-  MinHeapFreeRatio = MIN2(MinHeapFreeRatio, (uintx) 99);
 
   // Min/MaxMetaspaceFreeRatio
   status = status && verify_percentage(MinMetaspaceFreeRatio, "MinMetaspaceFreeRatio");
@@ -2370,6 +2439,10 @@
   status &= verify_interval(NmethodSweepFraction, 1, ReservedCodeCacheSize/K, "NmethodSweepFraction");
   status &= verify_interval(NmethodSweepActivity, 0, 2000, "NmethodSweepActivity");
 
+  // TieredCompilation needs at least 2 compiler threads.
+  const int num_min_compiler_threads = (TieredCompilation) ? NOT_GRAAL(2) GRAAL_ONLY(1) : 1;
+  status &=verify_min_value(CICompilerCount, num_min_compiler_threads, "CICompilerCount");
+
   return status;
 }
 
@@ -2731,7 +2804,7 @@
     } else if (match_option(option, "-Xmaxf", &tail)) {
       char* err;
       int maxf = (int)(strtod(tail, &err) * 100);
-      if (*err != '\0' || maxf < 0 || maxf > 100) {
+      if (*err != '\0' || *tail == '\0' || maxf < 0 || maxf > 100) {
         jio_fprintf(defaultStream::error_stream(),
                     "Bad max heap free percentage size: %s\n",
                     option->optionString);
@@ -2743,7 +2816,7 @@
     } else if (match_option(option, "-Xminf", &tail)) {
       char* err;
       int minf = (int)(strtod(tail, &err) * 100);
-      if (*err != '\0' || minf < 0 || minf > 100) {
+      if (*err != '\0' || *tail == '\0' || minf < 0 || minf > 100) {
         jio_fprintf(defaultStream::error_stream(),
                     "Bad min heap free percentage size: %s\n",
                     option->optionString);
@@ -3699,9 +3772,9 @@
   // Set per-collector flags
   if (UseParallelGC || UseParallelOldGC) {
     set_parallel_gc_flags();
-  } else if (UseConcMarkSweepGC) { // should be done before ParNew check below
+  } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below
     set_cms_and_parnew_gc_flags();
-  } else if (UseParNewGC) {  // skipped if CMS is set above
+  } else if (UseParNewGC) {  // Skipped if CMS is set above
     set_parnew_gc_flags();
   } else if (UseG1GC) {
     set_g1_gc_flags();
@@ -3715,22 +3788,26 @@
               " using -XX:ParallelGCThreads=N");
     }
   }
+  if (MinHeapFreeRatio == 100) {
+    // Keeping the heap 100% free is hard ;-) so limit it to 99%.
+    FLAG_SET_ERGO(uintx, MinHeapFreeRatio, 99);
+  }
 #else // INCLUDE_ALL_GCS
   assert(verify_serial_gc_flags(), "SerialGC unset");
 #endif // INCLUDE_ALL_GCS
 
-  // Initialize Metaspace flags and alignments.
+  // Initialize Metaspace flags and alignments
   Metaspace::ergo_initialize();
 
   // Set bytecode rewriting flags
   set_bytecode_flags();
 
-  // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled.
+  // Set flags if Aggressive optimization flags (-XX:+AggressiveOpts) enabled
   set_aggressive_opts_flags();
 
   // Turn off biased locking for locking debug mode flags,
-  // which are subtlely different from each other but neither works with
-  // biased locking.
+  // which are subtly different from each other but neither works with
+  // biased locking
   if (UseHeavyMonitors
 #ifdef COMPILER1
       || !UseFastLocking
@@ -3748,8 +3825,8 @@
     UseBiasedLocking = false;
   }
 
-#ifdef CC_INTERP
-  // Clear flags not supported by the C++ interpreter
+#ifdef ZERO
+  // Clear flags not supported on zero.
   FLAG_SET_DEFAULT(ProfileInterpreter, false);
   FLAG_SET_DEFAULT(UseBiasedLocking, false);
   LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
@@ -3783,10 +3860,6 @@
     // Doing the replace in parent maps helps speculation
     FLAG_SET_DEFAULT(ReplaceInParentMaps, true);
   }
-#ifndef X86
-  // Only on x86 for now
-  FLAG_SET_DEFAULT(TypeProfileLevel, 0);
-#endif
 #endif
 
   if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) {
@@ -3825,32 +3898,28 @@
     }
   }
 
-  // set PauseAtExit if the gamma launcher was used and a debugger is attached
-  // but only if not already set on the commandline
-  if (Arguments::created_by_gamma_launcher() && os::is_debugger_attached()) {
-    bool set = false;
-    CommandLineFlags::wasSetOnCmdline("PauseAtExit", &set);
-    if (!set) {
-      FLAG_SET_DEFAULT(PauseAtExit, true);
-    }
-  }
-
   return JNI_OK;
 }
 
 jint Arguments::adjust_after_os() {
-#if INCLUDE_ALL_GCS
-  if (UseParallelGC || UseParallelOldGC) {
-    if (UseNUMA) {
+  if (UseNUMA) {
+    if (UseParallelGC || UseParallelOldGC) {
       if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
-        FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
+         FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
       }
-      // For those collectors or operating systems (eg, Windows) that do
-      // not support full UseNUMA, we will map to UseNUMAInterleaving for now
-      UseNUMAInterleaving = true;
+    }
+    // UseNUMAInterleaving is set to ON for all collectors and
+    // platforms when UseNUMA is set to ON. NUMA-aware collectors
+    // such as the parallel collector for Linux and Solaris will
+    // interleave old gen and survivor spaces on top of NUMA
+    // allocation policy for the eden space.
+    // Non NUMA-aware collectors such as CMS, G1 and Serial-GC on
+    // all platforms and ParallelGC on Windows will interleave all
+    // of the heap spaces across NUMA nodes.
+    if (FLAG_IS_DEFAULT(UseNUMAInterleaving)) {
+      FLAG_SET_ERGO(bool, UseNUMAInterleaving, true);
     }
   }
-#endif // INCLUDE_ALL_GCS
   return JNI_OK;
 }
 
--- a/src/share/vm/runtime/arguments.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/arguments.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -27,6 +27,7 @@
 
 #include "runtime/java.hpp"
 #include "runtime/perfData.hpp"
+#include "utilities/debug.hpp"
 #include "utilities/top.hpp"
 
 // Arguments parses the command line and recognizes options
@@ -276,14 +277,14 @@
   static const char* _java_vendor_url_bug;
 
   // sun.java.launcher, private property to provide information about
-  // java/gamma launcher
+  // java launcher
   static const char* _sun_java_launcher;
 
   // sun.java.launcher.pid, private property
   static int    _sun_java_launcher_pid;
 
-  // was this VM created by the gamma launcher
-  static bool   _created_by_gamma_launcher;
+  // was this VM created via the -XXaltjvm=<path> option
+  static bool   _sun_java_launcher_is_altjvm;
 
   // Option flags
   static bool   _has_profile;
@@ -378,11 +379,16 @@
   static jint parse_vm_init_args(const JavaVMInitArgs* args);
   static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, SysClassPath* scp_p, bool* scp_assembly_required_p, Flag::Flags origin);
   static jint finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required);
-  static bool is_bad_option(const JavaVMOption* option, jboolean ignore,
-    const char* option_type);
+  static bool is_bad_option(const JavaVMOption* option, jboolean ignore, const char* option_type);
+
   static bool is_bad_option(const JavaVMOption* option, jboolean ignore) {
     return is_bad_option(option, ignore, NULL);
   }
+
+  static bool is_percentage(uintx val) {
+    return val <= 100;
+  }
+
   static bool verify_interval(uintx val, uintx min,
                               uintx max, const char* name);
   static bool verify_min_value(intx val, intx min, const char* name);
@@ -451,11 +457,20 @@
   static jint apply_ergo();
   // Adjusts the arguments after the OS have adjusted the arguments
   static jint adjust_after_os();
+
+  // Verifies that the given value will fit as a MinHeapFreeRatio. If not, an error
+  // message is returned in the provided buffer.
+  static bool verify_MinHeapFreeRatio(FormatBuffer<80>& err_msg, uintx min_heap_free_ratio);
+
+  // Verifies that the given value will fit as a MaxHeapFreeRatio. If not, an error
+  // message is returned in the provided buffer.
+  static bool verify_MaxHeapFreeRatio(FormatBuffer<80>& err_msg, uintx max_heap_free_ratio);
+
   // Check for consistency in the selection of the garbage collector.
   static bool check_gc_consistency();
   static void check_deprecated_gcs();
   static void check_deprecated_gc_flags();
-  // Check consistecy or otherwise of VM argument settings
+  // Check consistency or otherwise of VM argument settings
   static bool check_vm_args_consistency();
   // Check stack pages settings
   static bool check_stack_pages();
@@ -498,8 +513,8 @@
   static const char* sun_java_launcher()    { return _sun_java_launcher; }
   // Was VM created by a Java launcher?
   static bool created_by_java_launcher();
-  // Was VM created by the gamma Java launcher?
-  static bool created_by_gamma_launcher();
+  // -Dsun.java.launcher.is_altjvm
+  static bool sun_java_launcher_is_altjvm();
   // -Dsun.java.launcher.pid
   static int sun_java_launcher_pid()        { return _sun_java_launcher_pid; }
 
@@ -509,7 +524,7 @@
   // -Xprof
   static bool has_profile()                 { return _has_profile; }
 
-  // -Xms, -Xmx
+  // -Xms
   static uintx min_heap_size()              { return _min_heap_size; }
   static void  set_min_heap_size(uintx v)   { _min_heap_size = v;  }
 
--- a/src/share/vm/runtime/atomic.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/atomic.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
--- a/src/share/vm/runtime/atomic.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/atomic.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/atomic.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/atomic.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -57,6 +57,11 @@
 # include "atomic_windows_x86.inline.hpp"
 #endif
 
+// AIX
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "atomic_aix_ppc.inline.hpp"
+#endif
+
 // BSD
 #ifdef TARGET_OS_ARCH_bsd_x86
 # include "atomic_bsd_x86.inline.hpp"
--- a/src/share/vm/runtime/biasedLocking.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/biasedLocking.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -128,7 +128,7 @@
         // Walk monitors youngest to oldest
         for (int i = len - 1; i >= 0; i--) {
           MonitorInfo* mon_info = monitors->at(i);
-          if (mon_info->owner_is_scalar_replaced()) continue;
+          if (mon_info->eliminated()) continue;
           oop owner = mon_info->owner();
           if (owner != NULL) {
             info->append(mon_info);
@@ -233,8 +233,10 @@
     // Fix up highest lock to contain displaced header and point
     // object at it
     highest_lock->set_displaced_header(unbiased_prototype);
-    // Reset object header to point to displaced mark
-    obj->set_mark(markOopDesc::encode(highest_lock));
+    // Reset object header to point to displaced mark.
+    // Must release storing the lock address for platforms without TSO
+    // ordering (e.g. ppc).
+    obj->release_set_mark(markOopDesc::encode(highest_lock));
     assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
     if (TraceBiasedLocking && (Verbose || !is_bulk)) {
       tty->print_cr("  Revoked bias of currently-locked object");
--- a/src/share/vm/runtime/compilationPolicy.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/compilationPolicy.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -275,7 +275,7 @@
 }
 
 void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) {
-  // Delay next back-branch event but pump up invocation counter to triger
+  // Delay next back-branch event but pump up invocation counter to trigger
   // whole method compilation.
   MethodCounters* mcs = m->method_counters();
   assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
@@ -293,7 +293,7 @@
 //
 // CounterDecay
 //
-// Interates through invocation counters and decrements them. This
+// Iterates through invocation counters and decrements them. This
 // is done at each safepoint.
 //
 class CounterDecay : public AllStatic {
@@ -363,7 +363,7 @@
 }
 
 // This method can be called by any component of the runtime to notify the policy
-// that it's recommended to delay the complation of this method.
+// that it's recommended to delay the compilation of this method.
 void NonTieredCompPolicy::delay_compilation(Method* method) {
   MethodCounters* mcs = method->method_counters();
   if (mcs != NULL) {
--- a/src/share/vm/runtime/compilationPolicy.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/compilationPolicy.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,7 @@
   // reprofile request
   virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0;
   // delay_compilation(method) can be called by any component of the runtime to notify the policy
-  // that it's recommended to delay the complation of this method.
+  // that it's recommended to delay the compilation of this method.
   virtual void delay_compilation(Method* method) = 0;
   // disable_compilation() is called whenever the runtime decides to disable compilation of the
   // specified method.
--- a/src/share/vm/runtime/deoptimization.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/deoptimization.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -82,10 +82,13 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/ad_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/ad_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/ad_ppc_32.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/ad_ppc_64.hpp"
 #endif
+#endif // COMPILER2
 
 #ifdef GRAAL
 #include "graal/graalCompiler.hpp"
@@ -398,7 +401,7 @@
   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
 
-  // It's possible that the number of paramters at the call site is
+  // It's possible that the number of parameters at the call site is
   // different than number of arguments in the callee when method
   // handles are used.  If the caller is interpreted get the real
   // value so that the proper amount of space can be added to it's
@@ -558,7 +561,7 @@
     // popframe condition bit set, we should always clear it now
     thread->clear_popframe_condition();
 #else
-    // C++ interpeter will clear has_pending_popframe when it enters
+    // C++ interpreter will clear has_pending_popframe when it enters
     // with method_resume. For deopt_resume2 we clear it now.
     if (thread->popframe_forcing_deopt_reexecution())
         thread->clear_popframe_condition();
@@ -1291,9 +1294,19 @@
   load_class_by_index(constant_pool, index, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     // Exception happened during classloading. We ignore the exception here, since it
-    // is going to be rethrown since the current activation is going to be deoptimzied and
+    // is going to be rethrown since the current activation is going to be deoptimized and
     // the interpreter will re-execute the bytecode.
     CLEAR_PENDING_EXCEPTION;
+    // Class loading called java code which may have caused a stack
+    // overflow. If the exception was thrown right before the return
+    // to the runtime the stack is no longer guarded. Reguard the
+    // stack otherwise if we return to the uncommon trap blob and the
+    // stack bang causes a stack overflow we crash.
+    assert(THREAD->is_Java_thread(), "only a java thread can be here");
+    JavaThread* thread = (JavaThread*)THREAD;
+    bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
+    if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
+    assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
   }
 }
 
@@ -1639,6 +1652,7 @@
 #ifdef GRAAL
                                    nm->is_compiled_by_graal() && nm->is_osr_method(),
 #endif
+                                   nm->method(),
                                    //outputs:
                                    this_trap_count,
                                    maybe_prior_trap,
@@ -1684,7 +1698,7 @@
       }
 
       // Go back to the compiler if there are too many traps in this method.
-      if (this_trap_count >= (uint)PerMethodTrapLimit) {
+      if (this_trap_count >= per_method_trap_limit(reason)) {
         // If there are too many traps in this method, force a recompile.
         // This will allow the compiler to see the limit overflow, and
         // take corrective action, if possible.
@@ -1776,6 +1790,7 @@
 #ifdef GRAAL
                                          bool is_osr,
 #endif
+                                         Method* compiled_method,
                                          //outputs:
                                          uint& ret_this_trap_count,
                                          bool& ret_maybe_prior_trap,
@@ -1811,9 +1826,16 @@
     // Find the profile data for this BCI.  If there isn't one,
     // try to allocate one from the MDO's set of spares.
     // This will let us detect a repeated trap at this point.
-    pdata = trap_mdo->allocate_bci_to_data(trap_bci);
+    pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL);
 
     if (pdata != NULL) {
+      if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) {
+        if (LogCompilation && xtty != NULL) {
+          ttyLocker ttyl;
+          // no more room for speculative traps in this MDO
+          xtty->elem("speculative_traps_oom");
+        }
+      }
       // Query the trap state of this profile datum.
       int tstate0 = pdata->trap_state();
       if (!trap_state_has_reason(tstate0, per_bc_reason))
@@ -1853,12 +1875,14 @@
   bool ignore_maybe_prior_recompile;
   // Graal uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
   bool update_total_counts = GRAAL_ONLY(false) NOT_GRAAL(true);
+  assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
   query_update_method_data(trap_mdo, trap_bci,
                            (DeoptReason)reason,
                            update_total_counts,
 #ifdef GRAAL
                            false,
 #endif
+                           NULL,
                            ignore_this_trap_count,
                            ignore_maybe_prior_trap,
                            ignore_maybe_prior_recompile);
@@ -1989,6 +2013,7 @@
   "age" GRAAL_ONLY("|jsr_mismatch"),
   "predicate",
   "loop_limit_check",
+  "speculate_class_check",
   GRAAL_ONLY("aliasing")
 };
 const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
--- a/src/share/vm/runtime/deoptimization.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/deoptimization.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -72,6 +72,7 @@
     Reason_age,                   // nmethod too old; tier threshold reached
     Reason_predicate,             // compiler generated predicate failed
     Reason_loop_limit_check,      // compiler generated loop limits check failed
+    Reason_speculate_class_check, // saw unexpected object class from type speculation
 #ifdef GRAAL
     Reason_aliasing,              // optimistic assumption about aliasing failed
 #endif
@@ -233,7 +234,7 @@
   // Called by assembly stub after execution has returned to
   // deoptimized frame and after the stack unrolling.
   // @argument thread.     Thread where stub_frame resides.
-  // @argument exec_mode.  Determines how execution should be continuted in top frame.
+  // @argument exec_mode.  Determines how execution should be continued in top frame.
   //                       0 means continue after current byte code
   //                       1 means exception has happened, handle exception
   //                       2 means reexecute current bytecode (for uncommon traps).
@@ -350,10 +351,23 @@
       return reason;
     else if (reason == Reason_div0_check) // null check due to divide-by-zero?
       return Reason_null_check;           // recorded per BCI as a null check
+    else if (reason == Reason_speculate_class_check)
+      return Reason_class_check;
     else
       return Reason_none;
   }
 
+  static bool reason_is_speculate(int reason) {
+    if (reason == Reason_speculate_class_check) {
+      return true;
+    }
+    return false;
+  }
+
+  static uint per_method_trap_limit(int reason) {
+    return reason_is_speculate(reason) ? (uint)PerMethodSpecTrapLimit : (uint)PerMethodTrapLimit;
+  }
+
   static const char* trap_reason_name(int reason);
   static const char* trap_action_name(int action);
   // Format like reason='foo' action='bar' index='123'.
@@ -380,6 +394,7 @@
 #ifdef GRAAL
                                                bool is_osr,
 #endif
+                                               Method* compiled_method,
                                                //outputs:
                                                uint& ret_this_trap_count,
                                                bool& ret_maybe_prior_trap,
--- a/src/share/vm/runtime/fprofiler.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/fprofiler.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/frame.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/frame.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -649,7 +649,7 @@
 #endif
 }
 
-// Return whether the frame is in the VM or os indicating a Hotspot problem.
+// Print whether the frame is in the VM or OS indicating a HotSpot problem.
 // Otherwise, it's likely a bug in the native library that the Java code calls,
 // hopefully indicating where to submit bugs.
 void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
@@ -895,7 +895,7 @@
 }
 
 
-void frame::oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f,
+void frame::oops_interpreted_do(OopClosure* f, CLDClosure* cld_f,
     const RegisterMap* map, bool query_oop_map_cache) {
   assert(is_interpreted_frame(), "Not an interpreted frame");
   assert(map != NULL, "map must be set");
@@ -928,25 +928,14 @@
     // klass, and the klass needs to be kept alive while executing. The GCs
     // don't trace through method pointers, so typically in similar situations
     // the mirror or the class loader of the klass are installed as a GC root.
-    // To minimze the overhead of doing that here, we ask the GC to pass down a
+    // To minimize the overhead of doing that here, we ask the GC to pass down a
     // closure that knows how to keep klasses alive given a ClassLoaderData.
     cld_f->do_cld(m->method_holder()->class_loader_data());
   }
 
-#if !defined(PPC) || defined(ZERO)
-  if (m->is_native()) {
-#ifdef CC_INTERP
-    interpreterState istate = get_interpreterState();
-    f->do_oop((oop*)&istate->_oop_temp);
-#else
-    f->do_oop((oop*)( fp() + interpreter_frame_oop_temp_offset ));
-#endif /* CC_INTERP */
+  if (m->is_native() PPC32_ONLY(&& m->is_static())) {
+    f->do_oop(interpreter_frame_temp_oop_addr());
   }
-#else // PPC
-  if (m->is_native() && m->is_static()) {
-    f->do_oop(interpreter_frame_mirror_addr());
-  }
-#endif // PPC
 
   int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
 
@@ -1146,7 +1135,7 @@
 }
 
 
-void frame::oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
+void frame::oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
 #ifndef PRODUCT
   // simulate GC crash here to dump java thread in error report
   if (CrashGCForDumpingJavaThread) {
--- a/src/share/vm/runtime/frame.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/frame.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -46,10 +46,13 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/adGlobals_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/adGlobals_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/adGlobals_ppc_32.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/adGlobals_ppc_64.hpp"
 #endif
+#endif // COMPILER2
 #ifdef ZERO
 #ifdef TARGET_ARCH_zero
 # include "stack_zero.hpp"
@@ -311,6 +314,9 @@
   void interpreter_frame_set_monitor_end(BasicObjectLock* value);
 #endif // CC_INTERP
 
+  // Address of the temp oop in the frame. Needed as GC root.
+  oop* interpreter_frame_temp_oop_addr() const;
+
   // BasicObjectLocks:
   //
   // interpreter_frame_monitor_begin is higher in memory than interpreter_frame_monitor_end
@@ -347,9 +353,6 @@
   void interpreter_frame_set_method(Method* method);
   Method** interpreter_frame_method_addr() const;
   ConstantPoolCache** interpreter_frame_cache_addr() const;
-#ifdef PPC
-  oop* interpreter_frame_mirror_addr() const;
-#endif
 
  public:
   // Entry frames
@@ -416,19 +419,19 @@
 
   // Oops-do's
   void oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f);
-  void oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
+  void oops_interpreted_do(OopClosure* f, CLDClosure* cld_f, const RegisterMap* map, bool query_oop_map_cache = true);
 
  private:
   void oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f);
 
   // Iteration of oops
-  void oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
+  void oops_do_internal(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
   void oops_entry_do(OopClosure* f, const RegisterMap* map);
   void oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* map);
   int adjust_offset(Method* method, int index); // helper for above fn
  public:
   // Memory management
-  void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); }
+  void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map) { oops_do_internal(f, cld_f, cf, map, true); }
   void nmethods_do(CodeBlobClosure* cf);
 
   // RedefineClasses support for finding live interpreted methods on the stack
--- a/src/share/vm/runtime/frame.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/frame.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -87,6 +87,13 @@
   return is_entry_frame() && entry_frame_is_first();
 }
 
+#ifdef CC_INTERP
+inline oop* frame::interpreter_frame_temp_oop_addr() const {
+  interpreterState istate = get_interpreterState();
+  return (oop *)&istate->_oop_temp;
+}
+#endif // CC_INTERP
+
 // here are the platform-dependent bodies:
 
 #ifdef TARGET_ARCH_x86
--- a/src/share/vm/runtime/globals.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/globals.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -31,6 +31,7 @@
 #include "utilities/ostream.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/top.hpp"
+#include "trace/tracing.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1_globals.hpp"
 #endif // INCLUDE_ALL_GCS
@@ -65,6 +66,14 @@
 MATERIALIZE_FLAGS_EXT
 
 
+static bool is_product_build() {
+#ifdef PRODUCT
+  return true;
+#else
+  return false;
+#endif
+}
+
 void Flag::check_writable() {
   if (is_constant_in_binary()) {
     fatal(err_msg("flag is constant: %s", _name));
@@ -238,6 +247,27 @@
 // Get custom message for this locked flag, or return NULL if
 // none is available.
 void Flag::get_locked_message(char* buf, int buflen) const {
+  buf[0] = '\0';
+  if (is_diagnostic() && !is_unlocked()) {
+    jio_snprintf(buf, buflen, "Error: VM option '%s' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.\n",
+                 _name);
+    return;
+  }
+  if (is_experimental() && !is_unlocked()) {
+    jio_snprintf(buf, buflen, "Error: VM option '%s' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.\n",
+                 _name);
+    return;
+  }
+  if (is_develop() && is_product_build()) {
+    jio_snprintf(buf, buflen, "Error: VM option '%s' is develop and is available only in debug version of VM.\n",
+                 _name);
+    return;
+  }
+  if (is_notproduct() && is_product_build()) {
+    jio_snprintf(buf, buflen, "Error: VM option '%s' is notproduct and is available only in debug version of VM.\n",
+                 _name);
+    return;
+  }
   get_locked_message_ext(buf, buflen);
 }
 
@@ -298,7 +328,7 @@
     else st->print("%-16s", "");
   }
 
-  st->print("%-20");
+  st->print("%-20s", " ");
   print_kind(st);
 
   if (withComments) {
@@ -477,13 +507,13 @@
 }
 
 // Search the flag table for a named flag
-Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) {
+Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) {
   for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
     if (str_equal(current->_name, name, length)) {
       // Found a matching entry.
       // Don't report notproduct and develop flags in product builds.
       if (current->is_constant_in_binary()) {
-        return NULL;
+        return (return_flag == true ? current : NULL);
       }
       // Report locked flags only if allowed.
       if (!(current->is_unlocked() || current->is_unlocker())) {
@@ -577,6 +607,17 @@
   return true;
 }
 
+template<class E, class T>
+static void trace_flag_changed(const char* name, const T old_value, const T new_value, const Flag::Flags origin)
+{
+  E e;
+  e.set_name(name);
+  e.set_old_value(old_value);
+  e.set_new_value(new_value);
+  e.set_origin(origin);
+  e.commit();
+}
+
 bool CommandLineFlags::boolAt(char* name, size_t len, bool* value) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
@@ -590,6 +631,7 @@
   if (result == NULL) return false;
   if (!result->is_bool()) return false;
   bool old_value = result->get_bool();
+  trace_flag_changed<EventBooleanFlagChanged, bool>(name, old_value, *value, origin);
   result->set_bool(*value);
   *value = old_value;
   result->set_origin(origin);
@@ -599,6 +641,7 @@
 void CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type");
+  trace_flag_changed<EventBooleanFlagChanged, bool>(faddr->_name, faddr->get_bool(), value, origin);
   faddr->set_bool(value);
   faddr->set_origin(origin);
 }
@@ -616,6 +659,7 @@
   if (result == NULL) return false;
   if (!result->is_intx()) return false;
   intx old_value = result->get_intx();
+  trace_flag_changed<EventLongFlagChanged, s8>(name, old_value, *value, origin);
   result->set_intx(*value);
   *value = old_value;
   result->set_origin(origin);
@@ -625,6 +669,7 @@
 void CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type");
+  trace_flag_changed<EventLongFlagChanged, s8>(faddr->_name, faddr->get_intx(), value, origin);
   faddr->set_intx(value);
   faddr->set_origin(origin);
 }
@@ -642,6 +687,7 @@
   if (result == NULL) return false;
   if (!result->is_uintx()) return false;
   uintx old_value = result->get_uintx();
+  trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
   result->set_uintx(*value);
   *value = old_value;
   result->set_origin(origin);
@@ -651,6 +697,7 @@
 void CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_uintx(), "wrong flag type");
+  trace_flag_changed<EventUnsignedLongFlagChanged, u8>(faddr->_name, faddr->get_uintx(), value, origin);
   faddr->set_uintx(value);
   faddr->set_origin(origin);
 }
@@ -668,6 +715,7 @@
   if (result == NULL) return false;
   if (!result->is_uint64_t()) return false;
   uint64_t old_value = result->get_uint64_t();
+  trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
   result->set_uint64_t(*value);
   *value = old_value;
   result->set_origin(origin);
@@ -677,6 +725,7 @@
 void CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type");
+  trace_flag_changed<EventUnsignedLongFlagChanged, u8>(faddr->_name, faddr->get_uint64_t(), value, origin);
   faddr->set_uint64_t(value);
   faddr->set_origin(origin);
 }
@@ -694,6 +743,7 @@
   if (result == NULL) return false;
   if (!result->is_double()) return false;
   double old_value = result->get_double();
+  trace_flag_changed<EventDoubleFlagChanged, double>(name, old_value, *value, origin);
   result->set_double(*value);
   *value = old_value;
   result->set_origin(origin);
@@ -703,6 +753,7 @@
 void CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_double(), "wrong flag type");
+  trace_flag_changed<EventDoubleFlagChanged, double>(faddr->_name, faddr->get_double(), value, origin);
   faddr->set_double(value);
   faddr->set_origin(origin);
 }
@@ -715,13 +766,12 @@
   return true;
 }
 
-// Contract:  Flag will make private copy of the incoming value.
-// Outgoing value is always malloc-ed, and caller MUST call free.
 bool CommandLineFlags::ccstrAtPut(char* name, size_t len, ccstr* value, Flag::Flags origin) {
   Flag* result = Flag::find_flag(name, len);
   if (result == NULL) return false;
   if (!result->is_ccstr()) return false;
   ccstr old_value = result->get_ccstr();
+  trace_flag_changed<EventStringFlagChanged, const char*>(name, old_value, *value, origin);
   char* new_value = NULL;
   if (*value != NULL) {
     new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1, mtInternal);
@@ -739,11 +789,11 @@
   return true;
 }
 
-// Contract:  Flag will make private copy of the incoming value.
 void CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin) {
   Flag* faddr = address_of_flag(flag);
   guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
   ccstr old_value = faddr->get_ccstr();
+  trace_flag_changed<EventStringFlagChanged, const char*>(faddr->_name, old_value, value, origin);
   char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal);
   strcpy(new_value, value);
   faddr->set_ccstr(new_value);
--- a/src/share/vm/runtime/globals.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/globals.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,6 +61,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "globals_windows.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "globals_aix.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "globals_bsd.hpp"
 #endif
@@ -88,6 +91,9 @@
 #ifdef TARGET_OS_ARCH_linux_ppc
 # include "globals_linux_ppc.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "globals_aix_ppc.hpp"
+#endif
 #ifdef TARGET_OS_ARCH_bsd_x86
 # include "globals_bsd_x86.hpp"
 #endif
@@ -116,6 +122,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "c1_globals_windows.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "c1_globals_aix.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "c1_globals_bsd.hpp"
 #endif
@@ -144,6 +153,9 @@
 #ifdef TARGET_ARCH_arm
 # include "c2_globals_arm.hpp"
 #endif
+#ifdef TARGET_ARCH_ppc
+# include "c2_globals_ppc.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_linux
 # include "c2_globals_linux.hpp"
 #endif
@@ -153,6 +165,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "c2_globals_windows.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "c2_globals_aix.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "c2_globals_bsd.hpp"
 #endif
@@ -181,7 +196,6 @@
 define_pd_global(intx, OnStackReplacePercentage,     0);
 define_pd_global(bool, ResizeTLAB,                   false);
 define_pd_global(intx, FreqInlineSize,               0);
-define_pd_global(intx, InlineSmallCode,              0);
 define_pd_global(intx, NewSizeThreadIncrease,        4*K);
 define_pd_global(intx, InlineClassNatives,           true);
 define_pd_global(intx, InlineUnsafeOps,              true);
@@ -256,7 +270,7 @@
   // number of flags
   static size_t numFlags;
 
-  static Flag* find_flag(const char* name, size_t length, bool allow_locked = false);
+  static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false);
   static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
 
   void check_writable();
@@ -391,6 +405,8 @@
 
   static bool ccstrAt(char* name, size_t len, ccstr* value);
   static bool ccstrAt(char* name, ccstr* value)    { return ccstrAt(name, strlen(name), value); }
+  // Contract:  Flag will make private copy of the incoming value.
+  // Outgoing value is always malloc-ed, and caller MUST call free.
   static bool ccstrAtPut(char* name, size_t len, ccstr* value, Flag::Flags origin);
   static bool ccstrAtPut(char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
 
@@ -578,7 +594,7 @@
           "Force NUMA optimizations on single-node/UMA systems")            \
                                                                             \
   product(uintx, NUMAChunkResizeWeight, 20,                                 \
-          "Percentage (0-100) used to weigh the current sample when "       \
+          "Percentage (0-100) used to weight the current sample when "      \
           "computing exponentially decaying average for "                   \
           "AdaptiveNUMAChunkSizing")                                        \
                                                                             \
@@ -1276,6 +1292,9 @@
   develop(bool, TraceJNICalls, false,                                       \
           "Trace JNI calls")                                                \
                                                                             \
+  develop(bool, StressRewriter, false,                                      \
+          "Stress linktime bytecode rewriting")                             \
+                                                                            \
   notproduct(bool, TraceJVMCalls, false,                                    \
           "Trace JVM calls")                                                \
                                                                             \
@@ -1518,7 +1537,7 @@
           "allocation")                                                     \
                                                                             \
   product(uintx, PLABWeight, 75,                                            \
-          "Percentage (0-100) used to weigh the current sample when "       \
+          "Percentage (0-100) used to weight the current sample when "      \
           "computing exponentially decaying average for ResizePLAB")        \
                                                                             \
   product(bool, ResizePLAB, true,                                           \
@@ -1627,11 +1646,11 @@
           "is shifted to the right within the period between young GCs")    \
                                                                             \
   product(uintx, CMSExpAvgFactor, 50,                                       \
-          "Percentage (0-100) used to weigh the current sample when "       \
+          "Percentage (0-100) used to weight the current sample when "      \
           "computing exponential averages for CMS statistics")              \
                                                                             \
   product(uintx, CMS_FLSWeight, 75,                                         \
-          "Percentage (0-100) used to weigh the current sample when "       \
+          "Percentage (0-100) used to weight the current sample when "      \
           "computing exponentially decaying averages for CMS FLS "          \
           "statistics")                                                     \
                                                                             \
@@ -1743,19 +1762,15 @@
           "to simulate overflow; a smaller number increases frequency")     \
                                                                             \
   product(uintx, CMSMaxAbortablePrecleanLoops, 0,                           \
-          "(Temporary, subject to experimentation) "                        \
           "Maximum number of abortable preclean iterations, if > 0")        \
                                                                             \
   product(intx, CMSMaxAbortablePrecleanTime, 5000,                          \
-          "(Temporary, subject to experimentation) "                        \
           "Maximum time in abortable preclean (in milliseconds)")           \
                                                                             \
   product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100,              \
-          "(Temporary, subject to experimentation) "                        \
           "Nominal minimum work per abortable preclean iteration")          \
                                                                             \
   manageable(intx, CMSAbortablePrecleanWaitMillis, 100,                     \
-          "(Temporary, subject to experimentation) "                        \
           "Time that we sleep between iterations when not given "           \
           "enough work per iteration")                                      \
                                                                             \
@@ -1971,13 +1986,13 @@
           "(other young collectors)")                                       \
                                                                             \
   develop(uintx, PromotionFailureALotInterval, 5,                           \
-          "Total collections between promotion failures alot")              \
+          "Total collections between promotion failures a lot")             \
                                                                             \
   experimental(uintx, WorkStealingSleepMillis, 1,                           \
           "Sleep time when sleep is used for yields")                       \
                                                                             \
   experimental(uintx, WorkStealingYieldsBeforeSleep, 5000,                  \
-          "Number of yields before a sleep is done during workstealing")    \
+          "Number of yields before a sleep is done during work stealing")   \
                                                                             \
   experimental(uintx, WorkStealingHardSpins, 4096,                          \
           "Number of iterations in a spin loop between checks on "          \
@@ -2055,7 +2070,7 @@
           "size; deprecated: to be renamed to MaxRAMFraction")              \
                                                                             \
   product(uintx, MinRAMFraction, 2,                                         \
-          "Minimum fraction (1/n) of real memory used for maxmimum heap "   \
+          "Minimum fraction (1/n) of real memory used for maximum heap "    \
           "size on systems with small physical memory size")                \
                                                                             \
   product(uintx, InitialRAMFraction, 64,                                    \
@@ -2504,6 +2519,12 @@
   develop_pd(bool, ImplicitNullChecks,                                      \
           "Generate code for implicit null checks")                         \
                                                                             \
+  product_pd(bool, TrapBasedNullChecks,                                     \
+          "Generate code for null checks that uses a cmp and trap "         \
+          "instruction raising SIGTRAP.  This is only used if an access to" \
+          "null (+offset) will not raise a SIGSEGV, i.e.,"                  \
+          "ImplicitNullChecks don't work (PPC64).")                         \
+                                                                            \
   product(bool, PrintSafepointStatistics, false,                            \
           "Print statistics about safepoint synchronization")               \
                                                                             \
@@ -2549,6 +2570,9 @@
   develop(bool, PrintMethodFlushing, false,                                 \
           "Print the nmethods being flushed")                               \
                                                                             \
+  diagnostic(bool, PrintMethodFlushingStatistics, false,                    \
+          "print statistics about method flushing")                         \
+                                                                            \
   develop(bool, UseRelocIndex, false,                                       \
           "Use an index to speed random access to relocations")             \
                                                                             \
@@ -2811,6 +2835,11 @@
   product_pd(bool, ProfileInterpreter,                                      \
           "Profile at the bytecode level during interpretation")            \
                                                                             \
+  develop(bool, TraceProfileInterpreter, false,                             \
+          "Trace profiling at the bytecode level during interpretation. "   \
+          "This outputs the profiling information collected to improve "    \
+          "jit compilation.")                                               \
+                                                                            \
   develop_pd(bool, ProfileTraps,                                            \
           "Profile deoptimization traps at the bytecode level")             \
                                                                             \
@@ -3073,9 +3102,15 @@
   product(intx, PerMethodTrapLimit,  100,                                   \
           "Limit on traps (of one kind) in a method (includes inlines)")    \
                                                                             \
+  experimental(intx, PerMethodSpecTrapLimit,  5000,                         \
+          "Limit on speculative traps (of one kind) in a method (includes inlines)") \
+                                                                            \
   product(intx, PerBytecodeTrapLimit,  4,                                   \
           "Limit on traps (of one kind) at a particular BCI")               \
                                                                             \
+  experimental(intx, SpecTrapLimitExtraEntries,  3,                         \
+          "Extra method data trap entries for speculation")                 \
+                                                                            \
   develop(intx, InlineFrequencyRatio,    20,                                \
           "Ratio of call site execution to caller method invocation")       \
                                                                             \
@@ -3154,15 +3189,15 @@
           "Maximum size of class area in Metaspace when compressed "        \
           "class pointers are used")                                        \
                                                                             \
-  product(uintx, MinHeapFreeRatio,    40,                                   \
+  manageable(uintx, MinHeapFreeRatio, 40,                                   \
           "The minimum percentage of heap free after GC to avoid expansion."\
-          " For most GCs this applies to the old generation. In G1 it"      \
-          " applies to the whole heap. Not supported by ParallelGC.")       \
-                                                                            \
-  product(uintx, MaxHeapFreeRatio,    70,                                   \
+          " For most GCs this applies to the old generation. In G1 and"     \
+          " ParallelGC it applies to the whole heap.")                      \
+                                                                            \
+  manageable(uintx, MaxHeapFreeRatio, 70,                                   \
           "The maximum percentage of heap free after GC to avoid shrinking."\
-          " For most GCs this applies to the old generation. In G1 it"      \
-          " applies to the whole heap. Not supported by ParallelGC.")       \
+          " For most GCs this applies to the old generation. In G1 and"     \
+          " ParallelGC it applies to the whole heap.")                      \
                                                                             \
   product(intx, SoftRefLRUPolicyMSPerMB, 1000,                              \
           "Number of milliseconds per MB of free space in the heap")        \
@@ -3281,7 +3316,8 @@
           "disable this feature")                                           \
                                                                             \
   /* code cache parameters */                                               \
-  develop(uintx, CodeCacheSegmentSize, 64,                                  \
+  /* ppc64 has large code-entry alignment. */                               \
+  develop(uintx, CodeCacheSegmentSize, 64 PPC64_ONLY(+64),                  \
           "Code cache segment size (in bytes) - smallest unit of "          \
           "allocation")                                                     \
                                                                             \
@@ -3330,21 +3366,21 @@
   develop(intx, CIStart, 0,                                                 \
           "The id of the first compilation to permit")                      \
                                                                             \
-  develop(intx, CIStop,    -1,                                              \
+  develop(intx, CIStop, max_jint,                                           \
           "The id of the last compilation to permit")                       \
                                                                             \
-  develop(intx, CIStartOSR,     0,                                          \
+  develop(intx, CIStartOSR, 0,                                              \
           "The id of the first osr compilation to permit "                  \
           "(CICountOSR must be on)")                                        \
                                                                             \
-  develop(intx, CIStopOSR,    -1,                                           \
+  develop(intx, CIStopOSR, max_jint,                                        \
           "The id of the last osr compilation to permit "                   \
           "(CICountOSR must be on)")                                        \
                                                                             \
-  develop(intx, CIBreakAtOSR,    -1,                                        \
+  develop(intx, CIBreakAtOSR, -1,                                           \
           "The id of osr compilation to break at")                          \
                                                                             \
-  develop(intx, CIBreakAt,    -1,                                           \
+  develop(intx, CIBreakAt, -1,                                              \
           "The id of compilation to break at")                              \
                                                                             \
   product(ccstrlist, CompileOnly, "",                                       \
@@ -3363,6 +3399,10 @@
           "File containing compilation replay information"                  \
           "[default: ./replay_pid%p.log] (%p replaced with pid)")           \
                                                                             \
+   product(ccstr, InlineDataFile, NULL,                                     \
+          "File containing inlining replay information"                     \
+          "[default: ./inline_pid%p.log] (%p replaced with pid)")           \
+                                                                            \
   develop(intx, ReplaySuppressInitializers, 2,                              \
           "Control handling of class initialization during replay: "        \
           "0 - don't do anything special; "                                 \
@@ -3654,7 +3694,7 @@
   product(uintx, MaxDirectMemorySize, 0,                                    \
           "Maximum total size of NIO direct-buffer allocations")            \
                                                                             \
-  /* temporary developer defined flags  */                                  \
+  /* Flags used for temporary code during development  */                   \
                                                                             \
   diagnostic(bool, UseNewCode, false,                                       \
           "Testing Only: Use the new version while testing")                \
--- a/src/share/vm/runtime/globals_extension.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/globals_extension.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@
 
 // Construct enum of Flag_<cmdline-arg> constants.
 
-// Parens left off in the following for the enum decl below.
+// Parenthesis left off in the following for the enum decl below.
 #define FLAG_MEMBER(flag) Flag_##flag
 
 #define RUNTIME_PRODUCT_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
@@ -220,6 +220,7 @@
   static void uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin);
   static void uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin);
   static void doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin);
+  // Contract:  Flag will make private copy of the incoming value
   static void ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin);
 
   static bool is_default(CommandLineFlag flag);
--- a/src/share/vm/runtime/handles.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/handles.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -267,7 +267,7 @@
 // HandleMarks manually.
 //
 // A HandleMark constructor will record the current handle area top, and the
-// desctructor will reset the top, destroying all handles allocated in between.
+// destructor will reset the top, destroying all handles allocated in between.
 // The following code will therefore NOT work:
 //
 //   Handle h;
--- a/src/share/vm/runtime/handles.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/handles.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/interfaceSupport.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/interfaceSupport.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -107,6 +107,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "interfaceSupport_windows.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "interfaceSupport_aix.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "interfaceSupport_bsd.hpp"
 #endif
--- a/src/share/vm/runtime/java.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/java.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -55,6 +55,7 @@
 #include "runtime/memprofiler.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/statSampler.hpp"
+#include "runtime/sweeper.hpp"
 #include "runtime/task.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
@@ -99,9 +100,6 @@
 #include "opto/runtime.hpp"
 #endif
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL(hotspot, vm__shutdown);
-#endif /* !USDT2 */
 
 #ifndef PRODUCT
 
@@ -220,9 +218,7 @@
 
 
 // General statistics printing (profiling ...)
-
 void print_statistics() {
-
 #ifdef ASSERT
 
   if (CountRuntimeCalls) {
@@ -320,6 +316,10 @@
     CodeCache::print();
   }
 
+  if (PrintMethodFlushingStatistics) {
+    NMethodSweeper::print();
+  }
+
   if (PrintCodeCache2) {
     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     CodeCache::print_internals();
@@ -387,6 +387,10 @@
     CodeCache::print();
   }
 
+  if (PrintMethodFlushingStatistics) {
+    NMethodSweeper::print();
+  }
+
 #ifdef COMPILER2
   if (PrintPreciseBiasedLockingStatistics) {
     OptoRuntime::print_named_counters();
@@ -610,12 +614,8 @@
 
 void notify_vm_shutdown() {
   // For now, just a dtrace probe.
-#ifndef USDT2
-  HS_DTRACE_PROBE(hotspot, vm__shutdown);
+  HOTSPOT_VM_SHUTDOWN();
   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
-#else /* USDT2 */
-  HOTSPOT_VM_SHUTDOWN();
-#endif /* USDT2 */
 }
 
 void vm_direct_exit(int code) {
--- a/src/share/vm/runtime/javaCalls.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/javaCalls.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -319,7 +319,7 @@
   // Check if we need to wrap a potential OS exception handler around thread
   // This is used for e.g. Win32 structured exception handlers
   assert(THREAD->is_Java_thread(), "only JavaThreads can make JavaCalls");
-  // Need to wrap each and everytime, since there might be native code down the
+  // Need to wrap each and every time, since there might be native code down the
   // stack that has installed its own exception handlers
   os::os_exception_wrapper(call_helper, result, &method, args, THREAD);
 }
@@ -356,7 +356,7 @@
     // A klass might not be initialized since JavaCall's might be used during the executing of
     // the <clinit>. For example, a Thread.start might start executing on an object that is
     // not fully initialized! (bad Java programming style)
-    assert(holder->is_linked(), "rewritting must have taken place");
+    assert(holder->is_linked(), "rewriting must have taken place");
   }
 #endif
 
--- a/src/share/vm/runtime/javaCalls.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/javaCalls.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/javaFrameAnchor.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/javaFrameAnchor.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -50,6 +50,9 @@
 #ifdef TARGET_OS_ARCH_linux_ppc
 # include "orderAccess_linux_ppc.inline.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "orderAccess_aix_ppc.inline.hpp"
+#endif
 #ifdef TARGET_OS_ARCH_bsd_x86
 # include "orderAccess_bsd_x86.inline.hpp"
 #endif
--- a/src/share/vm/runtime/jniHandles.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/jniHandles.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -195,8 +195,10 @@
   int _count;
 public:
   CountHandleClosure(): _count(0) {}
-  virtual void do_oop(oop* unused) {
-    _count++;
+  virtual void do_oop(oop* ooph) {
+    if (*ooph != JNIHandles::deleted_handle()) {
+      _count++;
+    }
   }
   virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
   int count() { return _count; }
@@ -461,7 +463,7 @@
     // Append new block
     Thread* thread = Thread::current();
     Handle obj_handle(thread, obj);
-    // This can block, so we need to preserve obj accross call.
+    // This can block, so we need to preserve obj across call.
     _last->_next = JNIHandleBlock::allocate_block(thread);
     _last = _last->_next;
     _allocate_before_rebuild--;
@@ -528,7 +530,7 @@
   return result;
 }
 
-// This method is not thread-safe, i.e., must be called whule holding a lock on the
+// This method is not thread-safe, i.e., must be called while holding a lock on the
 // structure.
 long JNIHandleBlock::memory_usage() const {
   return length() * sizeof(JNIHandleBlock);
--- a/src/share/vm/runtime/jniHandles.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/jniHandles.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -106,7 +106,7 @@
   JNIHandleBlock* _next;                        // Link to next block
 
   // The following instance variables are only used by the first block in a chain.
-  // Having two types of blocks complicates the code and the space overhead in negligble.
+  // Having two types of blocks complicates the code and the space overhead in negligible.
   JNIHandleBlock* _last;                        // Last block in use
   JNIHandleBlock* _pop_frame_link;              // Block to restore on PopLocalFrame call
   oop*            _free_list;                   // Handle free list
--- a/src/share/vm/runtime/mutex.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/mutex.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -280,16 +280,6 @@
   return x & 0x7FFFFFFF ;
 }
 
-static inline jint MarsagliaXOR (jint * const a) {
-  jint x = *a ;
-  if (x == 0) x = UNS(a)|1 ;
-  x ^= x << 6;
-  x ^= ((unsigned)x) >> 21;
-  x ^= x << 7 ;
-  *a = x ;
-  return x & 0x7FFFFFFF ;
-}
-
 static int Stall (int its) {
   static volatile jint rv = 1 ;
   volatile int OnFrame = 0 ;
@@ -507,7 +497,7 @@
   _OnDeck = NULL ;
 
   // Note that we current drop the inner lock (clear OnDeck) in the slow-path
-  // epilog immediately after having acquired the outer lock.
+  // epilogue immediately after having acquired the outer lock.
   // But instead we could consider the following optimizations:
   // A. Shift or defer dropping the inner lock until the subsequent IUnlock() operation.
   //    This might avoid potential reacquisition of the inner lock in IUlock().
@@ -931,7 +921,7 @@
 
   check_block_state(Self);
   if (Self->is_Java_thread()) {
-    // Horribile dictu - we suffer through a state transition
+    // Horrible dictu - we suffer through a state transition
     assert(rank() > Mutex::special, "Potential deadlock with special or lesser rank mutex");
     ThreadBlockInVM tbivm ((JavaThread *) Self) ;
     ILock (Self) ;
@@ -963,7 +953,7 @@
 }
 
 
-// Returns true if thread succeceed [sic] in grabbing the lock, otherwise false.
+// Returns true if thread succeeds in grabbing the lock, otherwise false.
 
 bool Monitor::try_lock() {
   Thread * const Self = Thread::current();
--- a/src/share/vm/runtime/mutex.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/mutex.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -90,7 +90,7 @@
   // A special lock: Is a lock where you are guaranteed not to block while you are
   // holding it, i.e., no vm operation can happen, taking other locks, etc.
   // NOTE: It is critical that the rank 'special' be the lowest (earliest)
-  // (except for "event"?) for the deadlock dection to work correctly.
+  // (except for "event"?) for the deadlock detection to work correctly.
   // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate,
   // which being external to the VM are not subject to deadlock detection.
   // The rank safepoint is used only for synchronization in reaching a
@@ -241,7 +241,7 @@
 //
 // Currently, however, the base object is a monitor.  Monitor contains all the
 // logic for wait(), notify(), etc.   Mutex extends monitor and restricts the
-// visiblity of wait(), notify(), and notify_all().
+// visibility of wait(), notify(), and notify_all().
 //
 // Another viable alternative would have been to have Monitor extend Mutex and
 // implement all the normal mutex and wait()-notify() logic in Mutex base class.
--- a/src/share/vm/runtime/mutexLocker.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/mutexLocker.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -36,6 +36,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
@@ -43,7 +46,7 @@
 // Mutexes used in the VM.
 
 extern Mutex*   Patching_lock;                   // a lock used to guard code patching of compiled code
-extern Monitor* SystemDictionary_lock;           // a lock on the system dictonary
+extern Monitor* SystemDictionary_lock;           // a lock on the system dictionary
 extern Mutex*   PackageTable_lock;               // a lock on the class loader package table
 extern Mutex*   CompiledIC_lock;                 // a lock used to guard compiled IC patching and access
 extern Mutex*   InlineCacheBuffer_lock;          // a lock used to guard the InlineCacheBuffer
@@ -349,8 +352,8 @@
 //   - reentrant locking
 //   - locking out of order
 //
-// Only too be used for verify code, where we can relaxe out dead-lock
-// dection code a bit (unsafe, but probably ok). This code is NEVER to
+// Only to be used for verify code, where we can relax out dead-lock
+// detection code a bit (unsafe, but probably ok). This code is NEVER to
 // be included in a product version.
 //
 class VerifyMutexLocker: StackObj {
@@ -362,7 +365,7 @@
     _mutex     = mutex;
     _reentrant = mutex->owned_by_self();
     if (!_reentrant) {
-      // We temp. diable strict safepoint checking, while we require the lock
+      // We temp. disable strict safepoint checking, while we require the lock
       FlagSetting fs(StrictSafepointChecks, false);
       _mutex->lock();
     }
--- a/src/share/vm/runtime/objectMonitor.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/objectMonitor.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,7 @@
 # include "os_bsd.inline.hpp"
 #endif
 
-#if defined(__GNUC__) && !defined(IA64)
+#if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
   #define ATTR __attribute__((noinline))
 #else
@@ -78,39 +78,6 @@
     len = klassname->utf8_length();                                        \
   }
 
-#ifndef USDT2
-
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__notify,
-  jlong, uintptr_t, char*, int);
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__notifyAll,
-  jlong, uintptr_t, char*, int);
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__enter,
-  jlong, uintptr_t, char*, int);
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__entered,
-  jlong, uintptr_t, char*, int);
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__contended__exit,
-  jlong, uintptr_t, char*, int);
-
-#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)       \
-  {                                                                        \
-    if (DTraceMonitorProbes) {                                            \
-      DTRACE_MONITOR_PROBE_COMMON(obj, thread);                       \
-      HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid,                       \
-                       (monitor), bytes, len, (millis));                   \
-    }                                                                      \
-  }
-
-#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)             \
-  {                                                                        \
-    if (DTraceMonitorProbes) {                                            \
-      DTRACE_MONITOR_PROBE_COMMON(obj, thread);                       \
-      HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid,                    \
-                       (uintptr_t)(monitor), bytes, len);                  \
-    }                                                                      \
-  }
-
-#else /* USDT2 */
-
 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
   {                                                                        \
     if (DTraceMonitorProbes) {                                            \
@@ -135,7 +102,6 @@
     }                                                                      \
   }
 
-#endif /* USDT2 */
 #else //  ndef DTRACE_ENABLED
 
 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
@@ -234,7 +200,7 @@
 // * Taken together, the cxq and the EntryList constitute or form a
 //   single logical queue of threads stalled trying to acquire the lock.
 //   We use two distinct lists to improve the odds of a constant-time
-//   dequeue operation after acquisition (in the ::enter() epilog) and
+//   dequeue operation after acquisition (in the ::enter() epilogue) and
 //   to reduce heat on the list ends.  (c.f. Michael Scott's "2Q" algorithm).
 //   A key desideratum is to minimize queue & monitor metadata manipulation
 //   that occurs while holding the monitor lock -- that is, we want to
@@ -382,6 +348,12 @@
     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
     if (JvmtiExport::should_post_monitor_contended_enter()) {
       JvmtiExport::post_monitor_contended_enter(jt, this);
+
+      // The current thread does not yet own the monitor and does not
+      // yet appear on any queues that would get it made the successor.
+      // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
+      // handler cannot accidentally consume an unpark() meant for the
+      // ParkEvent associated with this ObjectMonitor.
     }
 
     OSThreadContendState osts(Self->osthread());
@@ -439,6 +411,12 @@
   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
   if (JvmtiExport::should_post_monitor_contended_entered()) {
     JvmtiExport::post_monitor_contended_entered(jt, this);
+
+    // The current thread already owns the monitor and is not going to
+    // call park() for the remainder of the monitor enter protocol. So
+    // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
+    // event handler consumed an unpark() issued by the thread that
+    // just exited the monitor.
   }
 
   if (event.should_commit()) {
@@ -677,7 +655,7 @@
         // non-null and elect a new "Responsible" timer thread.
         //
         // This thread executes:
-        //    ST Responsible=null; MEMBAR    (in enter epilog - here)
+        //    ST Responsible=null; MEMBAR    (in enter epilogue - here)
         //    LD cxq|EntryList               (in subsequent exit)
         //
         // Entering threads in the slow/contended path execute:
@@ -1456,6 +1434,14 @@
         // Note: 'false' parameter is passed here because the
         // wait was not timed out due to thread interrupt.
         JvmtiExport::post_monitor_waited(jt, this, false);
+
+        // In this short circuit of the monitor wait protocol, the
+        // current thread never drops ownership of the monitor and
+        // never gets added to the wait queue so the current thread
+        // cannot be made the successor. This means that the
+        // JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
+        // consume an unpark() meant for the ParkEvent associated with
+        // this ObjectMonitor.
      }
      if (event.should_commit()) {
        post_monitor_wait_event(&event, 0, millis, false);
@@ -1499,21 +1485,6 @@
    exit (true, Self) ;                    // exit the monitor
    guarantee (_owner != Self, "invariant") ;
 
-   // As soon as the ObjectMonitor's ownership is dropped in the exit()
-   // call above, another thread can enter() the ObjectMonitor, do the
-   // notify(), and exit() the ObjectMonitor. If the other thread's
-   // exit() call chooses this thread as the successor and the unpark()
-   // call happens to occur while this thread is posting a
-   // MONITOR_CONTENDED_EXIT event, then we run the risk of the event
-   // handler using RawMonitors and consuming the unpark().
-   //
-   // To avoid the problem, we re-post the event. This does no harm
-   // even if the original unpark() was not consumed because we are the
-   // chosen successor for this monitor.
-   if (node._notified != 0 && _succ == Self) {
-      node._event->unpark();
-   }
-
    // The thread is on the WaitSet list - now park() it.
    // On MP systems it's conceivable that a brief spin before we park
    // could be profitable.
@@ -1595,6 +1566,25 @@
      // post monitor waited event. Note that this is past-tense, we are done waiting.
      if (JvmtiExport::should_post_monitor_waited()) {
        JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
+
+       if (node._notified != 0 && _succ == Self) {
+         // In this part of the monitor wait-notify-reenter protocol it
+         // is possible (and normal) for another thread to do a fastpath
+         // monitor enter-exit while this thread is still trying to get
+         // to the reenter portion of the protocol.
+         //
+         // The ObjectMonitor was notified and the current thread is
+         // the successor which also means that an unpark() has already
+         // been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
+         // consume the unpark() that was done when the successor was
+         // set because the same ParkEvent is shared between Java
+         // monitors and JVM/TI RawMonitors (for now).
+         //
+         // We redo the unpark() to ensure forward progress, i.e., we
+         // don't want all pending threads hanging (parked) with none
+         // entering the unlocked monitor.
+         node._event->unpark();
+       }
      }
 
      if (event.should_commit()) {
@@ -2031,7 +2021,7 @@
           TEVENT (Spin abort -- too many spinners) ;
           return 0 ;
        }
-       // Slighty racy, but benign ...
+       // Slightly racy, but benign ...
        Adjust (&_Spinner, 1) ;
     }
 
--- a/src/share/vm/runtime/objectMonitor.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/objectMonitor.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -101,7 +101,7 @@
   static int Spinner_offset_in_bytes()     { return offset_of(ObjectMonitor, _Spinner);    }
 
  public:
-  // Eventaully we'll make provisions for multiple callbacks, but
+  // Eventually we'll make provisions for multiple callbacks, but
   // now one will suffice.
   static int (*SpinCallbackFunction)(intptr_t, int) ;
   static intptr_t SpinCallbackArgument ;
@@ -272,7 +272,7 @@
   // type int, or int32_t but not intptr_t.  There's no reason
   // to use 64-bit fields for these variables on a 64-bit JVM.
 
-  volatile intptr_t  _count;        // reference count to prevent reclaimation/deflation
+  volatile intptr_t  _count;        // reference count to prevent reclamation/deflation
                                     // at stop-the-world time.  See deflate_idle_monitors().
                                     // _count is approximately |_WaitSet| + |_EntryList|
  protected:
--- a/src/share/vm/runtime/orderAccess.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/orderAccess.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -61,13 +61,13 @@
 //
 // Ensures that Load1 completes before Store2 and any subsequent store
 // operations.  Loads before Load1 may *not* float below Store2 and any
-// subseqeuent store operations.
+// subsequent store operations.
 //
 // StoreLoad:  Store1(s); StoreLoad; Load2
 //
 // Ensures that Store1 completes before Load2 and any subsequent load
 // operations.  Stores before Store1 may *not* float below Load2 and any
-// subseqeuent load operations.
+// subsequent load operations.
 //
 //
 // We define two further operations, 'release' and 'acquire'.  They are
@@ -176,7 +176,7 @@
 // compilers that we currently use (SunStudio, gcc and VC++) respect the
 // semantics of volatile here. If you build HotSpot using other
 // compilers, you may need to verify that no compiler reordering occurs
-// across the sequence point respresented by the volatile access.
+// across the sequence point represented by the volatile access.
 //
 //
 //                os::is_MP Considered Redundant
@@ -311,7 +311,7 @@
  private:
   // This is a helper that invokes the StubRoutines::fence_entry()
   // routine if it exists, It should only be used by platforms that
-  // don't another way to do the inline eassembly.
+  // don't have another way to do the inline assembly.
   static void StubRoutines_fence();
 };
 
--- a/src/share/vm/runtime/os.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/os.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -236,7 +236,7 @@
   while (true) {
     int sig;
     {
-      // FIXME : Currently we have not decieded what should be the status
+      // FIXME : Currently we have not decided what should be the status
       //         for this java thread blocked here. Once we decide about
       //         that we should fix this.
       sig = os::signal_wait();
@@ -362,7 +362,7 @@
       // exceptions anyway, check and abort if this fails.
       if (signal_thread == NULL || signal_thread->osthread() == NULL) {
         vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                      "unable to create new native thread");
+                                      os::native_thread_creation_failed_msg());
       }
 
       java_lang_Thread::set_thread(thread_oop(), signal_thread);
@@ -583,7 +583,7 @@
   ptrdiff_t size = *size_addr_from_base(start_of_prev_block);
   u_char* obj = start_of_prev_block + space_before;
   if (size <= 0 ) {
-    // start is bad; mayhave been confused by OS data inbetween objects
+    // start is bad; may have been confused by OS data in between objects
     // search one more backwards
     start_of_prev_block = find_cushion_backwards(start_of_prev_block);
     size = *size_addr_from_base(start_of_prev_block);
@@ -1011,7 +1011,7 @@
   if (Universe::heap()->is_in(addr)) {
     HeapWord* p = Universe::heap()->block_start(addr);
     bool print = false;
-    // If we couldn't find it it just may mean that heap wasn't parseable
+    // If we couldn't find it it just may mean that heap wasn't parsable
     // See if we were just given an oop directly
     if (p != NULL && Universe::heap()->block_is_obj(p)) {
       print = true;
@@ -1081,7 +1081,6 @@
 
   }
 
-#ifndef PRODUCT
   // Check if in metaspace.
   if (ClassLoaderDataGraph::contains((address)addr)) {
     // Use addr->print() from the debugger instead (not here)
@@ -1089,7 +1088,6 @@
                  " is pointing into metadata", addr);
     return;
   }
-#endif
 
   // Try an OS specific find
   if (os::find(addr, st)) {
@@ -1103,7 +1101,7 @@
 // if C stack is walkable beyond current frame. The check for fp() is not
 // necessary on Sparc, but it's harmless.
 bool os::is_first_C_frame(frame* fr) {
-#if defined(IA64) && !defined(_WIN32)
+#if (defined(IA64) && !defined(AIX)) && !defined(_WIN32)
   // On IA64 we have to check if the callers bsp is still valid
   // (i.e. within the register stack bounds).
   // Notice: this only works for threads created by the VM and only if
@@ -1199,7 +1197,7 @@
                            char fileSep,
                            char pathSep) {
     assert((fileSep == '/' && pathSep == ':') ||
-           (fileSep == '\\' && pathSep == ';'), "unexpected seperator chars");
+           (fileSep == '\\' && pathSep == ';'), "unexpected separator chars");
 
     // Scan the format string to determine the length of the actual
     // boot classpath, and handle platform dependencies as well.
@@ -1264,9 +1262,6 @@
         "%/lib/jce.jar:"
         "%/lib/charsets.jar:"
         "%/lib/jfr.jar:"
-#ifdef __APPLE__
-        "%/lib/JObjC.jar:"
-#endif
 #ifdef GRAAL
         "%/lib/graal.jar:"
 #endif
@@ -1451,7 +1446,7 @@
 // >= 2 physical CPU's and >=2GB of memory, with some fuzz
 // because the graphics memory (?) sometimes masks physical memory.
 // If you want to change the definition of a server class machine
-// on some OS or platform, e.g., >=4GB on Windohs platforms,
+// on some OS or platform, e.g., >=4GB on Windows platforms,
 // then you'll have to parameterize this method based on that state,
 // as was done for logical processors here, or replicate and
 // specialize this method for each platform.  (Or fix os to have
--- a/src/share/vm/runtime/os.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/os.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,6 +41,10 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "jvm_windows.h"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "jvm_aix.h"
+# include <setjmp.h>
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "jvm_bsd.h"
 # include <setjmp.h>
@@ -161,6 +165,7 @@
   static jlong  javaTimeNanos();
   static void   javaTimeNanos_info(jvmtiTimerInfo *info_ptr);
   static void   run_periodic_checks();
+  static bool   supports_monotonic_clock();
 
 
   // Returns the elapsed time in seconds since the vm started.
@@ -395,7 +400,7 @@
     // was equal.  However, some platforms mask off faulting addresses
     // to the page size, so now we just check that the address is
     // within the page.  This makes the thread argument unnecessary,
-    // but we retain the NULL check to preserve existing behaviour.
+    // but we retain the NULL check to preserve existing behavior.
     if (thread == NULL) return false;
     address page = (address) _mem_serialize_page;
     return addr >= page && addr < (page + os::vm_page_size());
@@ -430,7 +435,10 @@
   static intx current_thread_id();
   static int current_process_id();
   static int sleep(Thread* thread, jlong ms, bool interruptable);
-  static int naked_sleep();
+  // Short standalone OS sleep suitable for slow path spin loop.
+  // Ignores Thread.interrupt() (so keep it short).
+  // ms = 0, will sleep for the least amount of time allowed by the OS.
+  static void naked_short_sleep(jlong ms);
   static void infinite_sleep(); // never returns, use with CAUTION
   static void yield();        // Yields to all threads with same priority
   enum YieldResult {
@@ -540,7 +548,7 @@
 
   // Loads .dll/.so and
   // in case of error it checks if .dll/.so was built for the
-  // same architecture as Hotspot is running on
+  // same architecture as HotSpot is running on
   static void* dll_load(const char *name, char *ebuf, int ebuflen);
 
   // lookup symbol in a shared library
@@ -769,6 +777,10 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.hpp"
+# include "os_posix.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_posix.hpp"
 # include "os_bsd.hpp"
@@ -797,6 +809,9 @@
 #ifdef TARGET_OS_ARCH_linux_ppc
 # include "os_linux_ppc.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "os_aix_ppc.hpp"
+#endif
 #ifdef TARGET_OS_ARCH_bsd_x86
 # include "os_bsd_x86.hpp"
 #endif
@@ -804,6 +819,10 @@
 # include "os_bsd_zero.hpp"
 #endif
 
+#ifndef OS_NATIVE_THREAD_CREATION_FAILED_MSG
+#define OS_NATIVE_THREAD_CREATION_FAILED_MSG "unable to create native thread: possibly out of memory or process/resource limits reached"
+#endif
+
  public:
 #ifndef PLATFORM_PRINT_NATIVE_STACK
   // No platform-specific code for printing the native stack.
@@ -826,6 +845,9 @@
   // Hint to the underlying OS that a task switch would not be good.
   // Void return because it's a hint and can fail.
   static void hint_no_preempt();
+  static const char* native_thread_creation_failed_msg() {
+    return OS_NATIVE_THREAD_CREATION_FAILED_MSG;
+  }
 
   // Used at creation if requested by the diagnostic flag PauseAtStartup.
   // Causes the VM to wait until an external stimulus has been applied
--- a/src/share/vm/runtime/osThread.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/osThread.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -105,6 +105,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "osThread_windows.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "osThread_aix.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "osThread_bsd.hpp"
 #endif
--- a/src/share/vm/runtime/park.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/park.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -59,58 +59,22 @@
 
   // Start by trying to recycle an existing but unassociated
   // ParkEvent from the global free list.
-  for (;;) {
-    ev = FreeList ;
-    if (ev == NULL) break ;
-    // 1: Detach - sequester or privatize the list
-    // Tantamount to ev = Swap (&FreeList, NULL)
-    if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
-       continue ;
+  // Using a spin lock since we are part of the mutex impl.
+  // 8028280: using concurrent free list without memory management can leak
+  // pretty badly it turns out.
+  Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate");
+  {
+    ev = FreeList;
+    if (ev != NULL) {
+      FreeList = ev->FreeNext;
     }
-
-    // We've detached the list.  The list in-hand is now
-    // local to this thread.   This thread can operate on the
-    // list without risk of interference from other threads.
-    // 2: Extract -- pop the 1st element from the list.
-    ParkEvent * List = ev->FreeNext ;
-    if (List == NULL) break ;
-    for (;;) {
-        // 3: Try to reattach the residual list
-        guarantee (List != NULL, "invariant") ;
-        ParkEvent * Arv =  (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
-        if (Arv == NULL) break ;
-
-        // New nodes arrived.  Try to detach the recent arrivals.
-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
-            continue ;
-        }
-        guarantee (Arv != NULL, "invariant") ;
-        // 4: Merge Arv into List
-        ParkEvent * Tail = List ;
-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
-        Tail->FreeNext = Arv ;
-    }
-    break ;
   }
+  Thread::SpinRelease(&ListLock);
 
   if (ev != NULL) {
     guarantee (ev->AssociatedWith == NULL, "invariant") ;
   } else {
     // Do this the hard way -- materialize a new ParkEvent.
-    // In rare cases an allocating thread might detach a long list --
-    // installing null into FreeList -- and then stall or be obstructed.
-    // A 2nd thread calling Allocate() would see FreeList == null.
-    // The list held privately by the 1st thread is unavailable to the 2nd thread.
-    // In that case the 2nd thread would have to materialize a new ParkEvent,
-    // even though free ParkEvents existed in the system.  In this case we end up
-    // with more ParkEvents in circulation than we need, but the race is
-    // rare and the outcome is benign.  Ideally, the # of extant ParkEvents
-    // is equal to the maximum # of threads that existed at any one time.
-    // Because of the race mentioned above, segments of the freelist
-    // can be transiently inaccessible.  At worst we may end up with the
-    // # of ParkEvents in circulation slightly above the ideal.
-    // Note that if we didn't have the TSM/immortal constraint, then
-    // when reattaching, above, we could trim the list.
     ev = new ParkEvent () ;
     guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
   }
@@ -124,13 +88,14 @@
   if (ev == NULL) return ;
   guarantee (ev->FreeNext == NULL      , "invariant") ;
   ev->AssociatedWith = NULL ;
-  for (;;) {
-    // Push ev onto FreeList
-    // The mechanism is "half" lock-free.
-    ParkEvent * List = FreeList ;
-    ev->FreeNext = List ;
-    if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
+  // Note that if we didn't have the TSM/immortal constraint, then
+  // when reattaching we could trim the list.
+  Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease");
+  {
+    ev->FreeNext = FreeList;
+    FreeList = ev;
   }
+  Thread::SpinRelease(&ListLock);
 }
 
 // Override operator new and delete so we can ensure that the
@@ -152,7 +117,7 @@
 
 // 6399321 As a temporary measure we copied & modified the ParkEvent::
 // allocate() and release() code for use by Parkers.  The Parker:: forms
-// will eventually be removed as we consolide and shift over to ParkEvents
+// will eventually be removed as we consolidate and shift over to ParkEvents
 // for both builtin synchronization and JSR166 operations.
 
 volatile int Parker::ListLock = 0 ;
@@ -164,56 +129,21 @@
 
   // Start by trying to recycle an existing but unassociated
   // Parker from the global free list.
-  for (;;) {
-    p = FreeList ;
-    if (p  == NULL) break ;
-    // 1: Detach
-    // Tantamount to p = Swap (&FreeList, NULL)
-    if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
-       continue ;
+  // 8028280: using concurrent free list without memory management can leak
+  // pretty badly it turns out.
+  Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate");
+  {
+    p = FreeList;
+    if (p != NULL) {
+      FreeList = p->FreeNext;
     }
-
-    // We've detached the list.  The list in-hand is now
-    // local to this thread.   This thread can operate on the
-    // list without risk of interference from other threads.
-    // 2: Extract -- pop the 1st element from the list.
-    Parker * List = p->FreeNext ;
-    if (List == NULL) break ;
-    for (;;) {
-        // 3: Try to reattach the residual list
-        guarantee (List != NULL, "invariant") ;
-        Parker * Arv =  (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
-        if (Arv == NULL) break ;
-
-        // New nodes arrived.  Try to detach the recent arrivals.
-        if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
-            continue ;
-        }
-        guarantee (Arv != NULL, "invariant") ;
-        // 4: Merge Arv into List
-        Parker * Tail = List ;
-        while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
-        Tail->FreeNext = Arv ;
-    }
-    break ;
   }
+  Thread::SpinRelease(&ListLock);
 
   if (p != NULL) {
     guarantee (p->AssociatedWith == NULL, "invariant") ;
   } else {
     // Do this the hard way -- materialize a new Parker..
-    // In rare cases an allocating thread might detach
-    // a long list -- installing null into FreeList --and
-    // then stall.  Another thread calling Allocate() would see
-    // FreeList == null and then invoke the ctor.  In this case we
-    // end up with more Parkers in circulation than we need, but
-    // the race is rare and the outcome is benign.
-    // Ideally, the # of extant Parkers is equal to the
-    // maximum # of threads that existed at any one time.
-    // Because of the race mentioned above, segments of the
-    // freelist can be transiently inaccessible.  At worst
-    // we may end up with the # of Parkers in circulation
-    // slightly above the ideal.
     p = new Parker() ;
   }
   p->AssociatedWith = t ;          // Associate p with t
@@ -227,11 +157,12 @@
   guarantee (p->AssociatedWith != NULL, "invariant") ;
   guarantee (p->FreeNext == NULL      , "invariant") ;
   p->AssociatedWith = NULL ;
-  for (;;) {
-    // Push p onto FreeList
-    Parker * List = FreeList ;
-    p->FreeNext = List ;
-    if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
+
+  Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease");
+  {
+    p->FreeNext = FreeList;
+    FreeList = p;
   }
+  Thread::SpinRelease(&ListLock);
 }
 
--- a/src/share/vm/runtime/perfData.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/perfData.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -39,7 +39,7 @@
 PerfDataList*   PerfDataManager::_constants = NULL;
 
 /*
- * The jvmstat global and subsysem jvmstat counter name spaces. The top
+ * The jvmstat global and subsystem jvmstat counter name spaces. The top
  * level name spaces imply the interface stability level of the counter,
  * which generally follows the Java package, class, and property naming
  * conventions. The CounterNS enumeration values should be used to index
--- a/src/share/vm/runtime/perfData.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/perfData.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -116,7 +116,7 @@
  *
  * A PerfData subtype is not required to provide an implementation for
  * each variability classification. For example, the String type provides
- * Variable and Constant variablility classifications in the PerfStringVariable
+ * Variable and Constant variability classifications in the PerfStringVariable
  * and PerfStringConstant classes, but does not provide a counter type.
  *
  * Performance data are also described by a unit of measure. Units allow
@@ -172,10 +172,10 @@
  *   foo_counter->inc();
  *
  * Creating a performance counter that holds a variably change long
- * data value with untis specified in U_Bytes in the "com.sun.ci
+ * data value with units specified in U_Bytes in the "com.sun.ci
  * name space.
  *
- *   PerfLongVariable* bar_varible;
+ *   PerfLongVariable* bar_variable;
  *   bar_variable = PerfDataManager::create_long_variable(COM_CI, "bar",
 .*                                                        PerfData::U_Bytes,
  *                                                        optionalInitialValue,
@@ -203,7 +203,7 @@
  *    In this example, the PerfData pointer can be ignored as the caller
  *    is relying on the StatSampler PeriodicTask to sample the given
  *    address at a regular interval. The interval is defined by the
- *    PerfDataSamplingInterval global variable, and is applyied on
+ *    PerfDataSamplingInterval global variable, and is applied on
  *    a system wide basis, not on an per-counter basis.
  *
  * Creating a performance counter in an arbitrary name space that utilizes
@@ -234,7 +234,7 @@
  * the UsePerfData flag. Counters will be created on the c-heap
  * if UsePerfData is false.
  *
- * Until further noice, all PerfData objects should be created and
+ * Until further notice, all PerfData objects should be created and
  * manipulated within a guarded block. The guard variable is
  * UsePerfData, a product flag set to true by default. This flag may
  * be removed from the product in the future.
@@ -586,7 +586,7 @@
  *
  * The abstraction is not complete. A more general container class
  * would provide an Iterator abstraction that could be used to
- * traverse the lists. This implementation still relys upon integer
+ * traverse the lists. This implementation still relies upon integer
  * iterators and the at(int index) method. However, the GrowableArray
  * is not directly visible outside this class and can be replaced by
  * some other implementation, as long as that implementation provides
--- a/src/share/vm/runtime/perfMemory.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/perfMemory.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -155,7 +155,7 @@
 
 void PerfMemory::destroy() {
 
-  assert(_prologue != NULL, "prologue pointer must be initialized");
+  if (_prologue == NULL) return;
 
   if (_start != NULL && _prologue->overflow != 0) {
 
--- a/src/share/vm/runtime/perfMemory.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/perfMemory.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -55,7 +55,7 @@
  * of the fields must be changed along with their counterparts in the
  * PerfDataBuffer Java class. The first four bytes of this structure
  * should never change, or compatibility problems between the monitoring
- * applications and Hotspot VMs will result. The reserved fields are
+ * applications and HotSpot VMs will result. The reserved fields are
  * available for future enhancements.
  */
 typedef struct {
--- a/src/share/vm/runtime/reflection.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/reflection.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -36,6 +36,7 @@
 #include "oops/objArrayKlass.hpp"
 #include "oops/objArrayOop.hpp"
 #include "prims/jvm.h"
+#include "prims/jvmtiExport.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
@@ -482,7 +483,7 @@
     ik = InstanceKlass::cast(hc);
 
     // There's no way to make a host class loop short of patching memory.
-    // Therefore there cannot be a loop here unles there's another bug.
+    // Therefore there cannot be a loop here unless there's another bug.
     // Still, let's check for it.
     assert(--inf_loop_check > 0, "no host_klass loop");
   }
@@ -941,6 +942,11 @@
           // Method resolution threw an exception; wrap it in an InvocationTargetException
             oop resolution_exception = PENDING_EXCEPTION;
             CLEAR_PENDING_EXCEPTION;
+            // JVMTI has already reported the pending exception
+            // JVMTI internal flag reset is needed in order to report InvocationTargetException
+            if (THREAD->is_Java_thread()) {
+              JvmtiExport::clear_detected_exception((JavaThread*) THREAD);
+            }
             JavaCallArguments args(Handle(THREAD, resolution_exception));
             THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
                 vmSymbols::throwable_void_signature(),
@@ -1073,6 +1079,12 @@
     // Method threw an exception; wrap it in an InvocationTargetException
     oop target_exception = PENDING_EXCEPTION;
     CLEAR_PENDING_EXCEPTION;
+    // JVMTI has already reported the pending exception
+    // JVMTI internal flag reset is needed in order to report InvocationTargetException
+    if (THREAD->is_Java_thread()) {
+      JvmtiExport::clear_detected_exception((JavaThread*) THREAD);
+    }
+
     JavaCallArguments args(Handle(THREAD, target_exception));
     THROW_ARG_0(vmSymbols::java_lang_reflect_InvocationTargetException(),
                 vmSymbols::throwable_void_signature(),
--- a/src/share/vm/runtime/reflection.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/reflection.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -138,9 +138,9 @@
   static BasicType basic_type_mirror_to_basic_type(oop basic_type_mirror, TRAPS);
 
 public:
-  // Method invokation through java.lang.reflect.Method
+  // Method invocation through java.lang.reflect.Method
   static oop      invoke_method(oop method_mirror, Handle receiver, objArrayHandle args, TRAPS);
-  // Method invokation through java.lang.reflect.Constructor
+  // Method invocation through java.lang.reflect.Constructor
   static oop      invoke_constructor(oop method_mirror, objArrayHandle args, TRAPS);
 
 };
--- a/src/share/vm/runtime/registerMap.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/registerMap.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -70,7 +70,7 @@
 //   3) The RegisterMap keeps track of the values of callee-saved registers
 //      from frame to frame (hence, the name).  For some stack traversal the
 //      values of the callee-saved registers does not matter, e.g., if you
-//      only need the static properies such as frame type, pc, and such.
+//      only need the static properties such as frame type, pc, and such.
 //      Updating of the RegisterMap can be turned off by instantiating the
 //      register map as: RegisterMap map(thread, false);
 
--- a/src/share/vm/runtime/relocator.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/relocator.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -141,7 +141,7 @@
 }
 
 // size is the new size of the instruction at bci. Hence, if size is less than the current
-// instruction sice, we will shrink the code.
+// instruction size, we will shrink the code.
 methodHandle Relocator::insert_space_at(int bci, int size, u_char inst_buffer[], TRAPS) {
   _changes = new GrowableArray<ChangeItem*> (10);
   _changes->push(new ChangeWiden(bci, size, inst_buffer));
@@ -192,7 +192,7 @@
     // Execute operation
     if (!ci->handle_code_change(this)) return false;
 
-    // Shuffel items up
+    // Shuffle items up
     for (int index = 1; index < _changes->length(); index++) {
       _changes->at_put(index-1, _changes->at(index));
     }
@@ -214,7 +214,7 @@
 }
 
 // We need a special instruction size method, since lookupswitches and tableswitches might not be
-// properly alligned during relocation
+// properly aligned during relocation
 int Relocator::rc_instr_len(int bci) {
   Bytecodes::Code bc= code_at(bci);
   switch (bc) {
@@ -611,7 +611,7 @@
 
   // In case we have shrunken a tableswitch/lookupswitch statement, we store the last
   // bytes that get overwritten. We have to copy the bytes after the change_jumps method
-  // has been called, since it is likly to update last offset in a tableswitch/lookupswitch
+  // has been called, since it is likely to update last offset in a tableswitch/lookupswitch
   if (delta < 0) {
     assert(delta>=-3, "we cannot overwrite more than 3 bytes");
     memcpy(_overwrite, addr_at(bci + ilen + delta), -delta);
--- a/src/share/vm/runtime/safepoint.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/safepoint.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -156,7 +156,7 @@
   // stopped by different mechanisms:
   //
   //  1. Running interpreted
-  //     The interpeter dispatch table is changed to force it to
+  //     The interpreter dispatch table is changed to force it to
   //     check for a safepoint condition between bytecodes.
   //  2. Running in native code
   //     When returning from the native code, a Java thread must check
@@ -282,7 +282,7 @@
       // See the comments in synchronizer.cpp for additional remarks on spinning.
       //
       // In the future we might:
-      // 1. Modify the safepoint scheme to avoid potentally unbounded spinning.
+      // 1. Modify the safepoint scheme to avoid potentially unbounded spinning.
       //    This is tricky as the path used by a thread exiting the JVM (say on
       //    on JNI call-out) simply stores into its state field.  The burden
       //    is placed on the VM thread, which must poll (spin).
@@ -489,7 +489,7 @@
     ConcurrentGCThread::safepoint_desynchronize();
   }
 #endif // INCLUDE_ALL_GCS
-  // record this time so VMThread can keep track how much time has elasped
+  // record this time so VMThread can keep track how much time has elapsed
   // since last safepoint.
   _end_of_last_safepoint = os::javaTimeMillis();
 }
@@ -826,7 +826,7 @@
 void SafepointSynchronize::print_safepoint_timeout(SafepointTimeoutReason reason) {
   if (!timeout_error_printed) {
     timeout_error_printed = true;
-    // Print out the thread infor which didn't reach the safepoint for debugging
+    // Print out the thread info which didn't reach the safepoint for debugging
     // purposes (useful when there are lots of threads in the debugger).
     tty->print_cr("");
     tty->print_cr("# SafepointSynchronize::begin: Timeout detected:");
@@ -1093,7 +1093,7 @@
       if (caller_fr.is_deoptimized_frame()) {
         // The exception patch will destroy registers that are still
         // live and will be needed during deoptimization. Defer the
-        // Async exception should have defered the exception until the
+        // Async exception should have deferred the exception until the
         // next safepoint which will be detected when we get into
         // the interpreter so if we have an exception now things
         // are messed up.
--- a/src/share/vm/runtime/safepoint.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/safepoint.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -59,7 +59,7 @@
  public:
   enum SynchronizeState {
       _not_synchronized = 0,                   // Threads not synchronized at a safepoint
-                                               // Keep this value 0. See the coment in do_call_back()
+                                               // Keep this value 0. See the comment in do_call_back()
       _synchronizing    = 1,                   // Synchronizing in progress
       _synchronized     = 2                    // All Java threads are stopped at a safepoint. Only VM thread is running
   };
@@ -91,7 +91,7 @@
   } SafepointStats;
 
  private:
-  static volatile SynchronizeState _state;     // Threads might read this flag directly, without acquireing the Threads_lock
+  static volatile SynchronizeState _state;     // Threads might read this flag directly, without acquiring the Threads_lock
   static volatile int _waiting_to_block;       // number of threads we are waiting for to block
   static int _current_jni_active_count;        // Counts the number of active critical natives during the safepoint
 
@@ -106,7 +106,7 @@
 private:
   static long       _end_of_last_safepoint;     // Time of last safepoint in milliseconds
 
-  // statistics
+  // Statistics
   static jlong            _safepoint_begin_time;     // time when safepoint begins
   static SafepointStats*  _safepoint_stats;          // array of SafepointStats struct
   static int              _cur_stat_index;           // current index to the above array
@@ -155,7 +155,7 @@
     _current_jni_active_count++;
   }
 
-  // Called when a thread volantary blocks
+  // Called when a thread voluntarily blocks
   static void   block(JavaThread *thread);
   static void   signal_thread_at_safepoint()              { _waiting_to_block--; }
 
@@ -172,7 +172,7 @@
   static bool is_cleanup_needed();
   static void do_cleanup_tasks();
 
-  // debugging
+  // Debugging
   static void print_state()                                PRODUCT_RETURN;
   static void safepoint_msg(const char* format, ...)       PRODUCT_RETURN;
 
@@ -183,7 +183,7 @@
   static void set_is_at_safepoint()                        { _state = _synchronized; }
   static void set_is_not_at_safepoint()                    { _state = _not_synchronized; }
 
-  // assembly support
+  // Assembly support
   static address address_of_state()                        { return (address)&_state; }
 
   static address safepoint_counter_addr()                  { return (address)&_safepoint_counter; }
--- a/src/share/vm/runtime/serviceThread.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/serviceThread.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -27,6 +27,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/serviceThread.hpp"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
 #include "prims/jvmtiImpl.hpp"
 #include "services/gcNotifier.hpp"
 #include "services/diagnosticArgument.hpp"
@@ -66,7 +67,7 @@
     // exceptions anyway, check and abort if this fails.
     if (thread == NULL || thread->osthread() == NULL) {
       vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    "unable to create new native thread");
+                                    os::native_thread_creation_failed_msg());
     }
 
     java_lang_Thread::set_thread(thread_oop(), thread);
--- a/src/share/vm/runtime/sharedRuntime.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -127,14 +127,6 @@
 
 #include <math.h>
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
-HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
-                      char*, int, char*, int, char*, int);
-HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
-                      char*, int, char*, int, char*, int);
-#endif /* !USDT2 */
-
 // Implementation of SharedRuntime
 
 #ifndef PRODUCT
@@ -408,7 +400,7 @@
 
 #endif
 
-#if defined(__SOFTFP__) || defined(PPC)
+#if defined(__SOFTFP__) || defined(PPC32)
 double SharedRuntime::dsqrt(double f) {
   return sqrt(f);
 }
@@ -472,7 +464,7 @@
   return (jdouble)x;
 JRT_END
 
-// Exception handling accross interpreter/compiler boundaries
+// Exception handling across interpreter/compiler boundaries
 //
 // exception_handler_for_return_address(...) returns the continuation address.
 // The continuation address is the entry point of the exception handler of the
@@ -500,6 +492,13 @@
     assert(!nm->is_native_method(), "no exception handler");
     assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
     if (nm->is_deopt_pc(return_address)) {
+      // If we come here because of a stack overflow, the stack may be
+      // unguarded. Reguard the stack otherwise if we return to the
+      // deopt blob and the stack bang causes a stack overflow we
+      // crash.
+      bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
+      if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
+      assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
       return SharedRuntime::deopt_blob()->unpack_with_exception();
     } else {
       return nm->exception_begin();
@@ -714,8 +713,8 @@
     // Allow abbreviated catch tables.  The idea is to allow a method
     // to materialize its exceptions without committing to the exact
     // routing of exceptions.  In particular this is needed for adding
-    // a synthethic handler to unlock monitors when inlining
-    // synchonized methods since the unlock path isn't represented in
+    // a synthetic handler to unlock monitors when inlining
+    // synchronized methods since the unlock path isn't represented in
     // the bytecodes.
     t = table.entry_for(catch_pco, -1, 0);
   }
@@ -853,7 +852,7 @@
           // Exception happened in CodeCache. Must be either:
           // 1. Inline-cache check in C2I handler blob,
           // 2. Inline-cache check in nmethod, or
-          // 3. Implict null exception in nmethod
+          // 3. Implicit null exception in nmethod
 
           if (!cb->is_nmethod()) {
             bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
@@ -1024,14 +1023,9 @@
   Klass* klass = o->klass();
   int size = o->size();
   Symbol* name = klass->name();
-#ifndef USDT2
-  HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
-                   name->bytes(), name->utf8_length(), size * HeapWordSize);
-#else /* USDT2 */
   HOTSPOT_OBJECT_ALLOC(
                    get_java_tid(thread),
                    (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
-#endif /* USDT2 */
   return 0;
 }
 
@@ -1041,18 +1035,11 @@
   Symbol* kname = method->klass_name();
   Symbol* name = method->name();
   Symbol* sig = method->signature();
-#ifndef USDT2
-  HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread),
-      kname->bytes(), kname->utf8_length(),
-      name->bytes(), name->utf8_length(),
-      sig->bytes(), sig->utf8_length());
-#else /* USDT2 */
   HOTSPOT_METHOD_ENTRY(
       get_java_tid(thread),
       (char *) kname->bytes(), kname->utf8_length(),
       (char *) name->bytes(), name->utf8_length(),
       (char *) sig->bytes(), sig->utf8_length());
-#endif /* USDT2 */
   return 0;
 JRT_END
 
@@ -1062,18 +1049,11 @@
   Symbol* kname = method->klass_name();
   Symbol* name = method->name();
   Symbol* sig = method->signature();
-#ifndef USDT2
-  HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread),
-      kname->bytes(), kname->utf8_length(),
-      name->bytes(), name->utf8_length(),
-      sig->bytes(), sig->utf8_length());
-#else /* USDT2 */
   HOTSPOT_METHOD_RETURN(
       get_java_tid(thread),
       (char *) kname->bytes(), kname->utf8_length(),
       (char *) name->bytes(), name->utf8_length(),
       (char *) sig->bytes(), sig->utf8_length());
-#endif /* USDT2 */
   return 0;
 JRT_END
 
@@ -2462,7 +2442,7 @@
   ResourceMark rm;
 
   NOT_PRODUCT(int insts_size);
-  AdapterBlob* B = NULL;
+  AdapterBlob* new_adapter = NULL;
   AdapterHandlerEntry* entry = NULL;
   AdapterFingerPrint* fingerprint = NULL;
   {
@@ -2494,7 +2474,8 @@
 
 #ifdef ASSERT
     AdapterHandlerEntry* shared_entry = NULL;
-    if (VerifyAdapterSharing && entry != NULL) {
+    // Start adapter sharing verification only after the VM is booted.
+    if (VerifyAdapterSharing && (entry != NULL)) {
       shared_entry = entry;
       entry = NULL;
     }
@@ -2510,41 +2491,44 @@
     // Make a C heap allocated version of the fingerprint to store in the adapter
     fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
 
+    // StubRoutines::code2() is initialized after this function can be called. As a result,
+    // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated
+    // prior to StubRoutines::code2() being set. Checks refer to checks generated in an I2C
+    // stub that ensure that an I2C stub is called from an interpreter frame.
+    bool contains_all_checks = StubRoutines::code2() != NULL;
+
     // Create I2C & C2I handlers
-
     BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
     if (buf != NULL) {
       CodeBuffer buffer(buf);
       short buffer_locs[20];
       buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
                                              sizeof(buffer_locs)/sizeof(relocInfo));
+
       MacroAssembler _masm(&buffer);
-
       entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
                                                      total_args_passed,
                                                      comp_args_on_stack,
                                                      sig_bt,
                                                      regs,
                                                      fingerprint);
-
 #ifdef ASSERT
       if (VerifyAdapterSharing) {
         if (shared_entry != NULL) {
-          assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt),
-                 "code must match");
+          assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size()), "code must match");
           // Release the one just created and return the original
           _adapters->free_entry(entry);
           return shared_entry;
         } else  {
-          entry->save_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt);
+          entry->save_code(buf->code_begin(), buffer.insts_size());
         }
       }
 #endif
 
-      B = AdapterBlob::create(&buffer);
+      new_adapter = AdapterBlob::create(&buffer);
       NOT_PRODUCT(insts_size = buffer.insts_size());
     }
-    if (B == NULL) {
+    if (new_adapter == NULL) {
       // CodeCache is full, disable compilation
       // Ought to log this but compile log is only per compile thread
       // and we're some non descript Java thread.
@@ -2552,7 +2536,7 @@
       CompileBroker::handle_full_code_cache();
       return NULL; // Out of CodeCache space
     }
-    entry->relocate(B->content_begin());
+    entry->relocate(new_adapter->content_begin());
 #ifndef PRODUCT
     // debugging suppport
     if (PrintAdapterHandlers || PrintStubCode) {
@@ -2571,22 +2555,25 @@
       }
     }
 #endif
-
-    _adapters->add(entry);
+    // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
+    // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
+    if (contains_all_checks || !VerifyAdapterCalls) {
+      _adapters->add(entry);
+    }
   }
   // Outside of the lock
-  if (B != NULL) {
+  if (new_adapter != NULL) {
     char blob_id[256];
     jio_snprintf(blob_id,
                  sizeof(blob_id),
                  "%s(%s)@" PTR_FORMAT,
-                 B->name(),
+                 new_adapter->name(),
                  fingerprint->as_string(),
-                 B->content_begin());
-    Forte::register_stub(blob_id, B->content_begin(), B->content_end());
+                 new_adapter->content_begin());
+    Forte::register_stub(blob_id, new_adapter->content_begin(),new_adapter->content_end());
 
     if (JvmtiExport::should_post_dynamic_code_generated()) {
-      JvmtiExport::post_dynamic_code_generated(blob_id, B->content_begin(), B->content_end());
+      JvmtiExport::post_dynamic_code_generated(blob_id, new_adapter->content_begin(), new_adapter->content_end());
     }
   }
   return entry;
@@ -2618,7 +2605,6 @@
   delete _fingerprint;
 #ifdef ASSERT
   if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code, mtCode);
-  if (_saved_sig)  FREE_C_HEAP_ARRAY(Basictype, _saved_sig, mtCode);
 #endif
 }
 
@@ -2627,35 +2613,30 @@
 // Capture the code before relocation so that it can be compared
 // against other versions.  If the code is captured after relocation
 // then relative instructions won't be equivalent.
-void AdapterHandlerEntry::save_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
+void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
-  _code_length = length;
+  _saved_code_length = length;
   memcpy(_saved_code, buffer, length);
-  _total_args_passed = total_args_passed;
-  _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed, mtCode);
-  memcpy(_saved_sig, sig_bt, _total_args_passed * sizeof(BasicType));
 }
 
 
-bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
-  if (length != _code_length) {
+bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length) {
+  if (length != _saved_code_length) {
     return false;
   }
-  for (int i = 0; i < length; i++) {
-    if (buffer[i] != _saved_code[i]) {
-      return false;
-    }
-  }
-  return true;
+
+  return (memcmp(buffer, _saved_code, length) == 0) ? true : false;
 }
 #endif
 
 
-// Create a native wrapper for this native method.  The wrapper converts the
-// java compiled calling convention to the native convention, handlizes
-// arguments, and transitions to native.  On return from the native we transition
-// back to java blocking if a safepoint is in progress.
-nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int compile_id) {
+/**
+ * Create a native wrapper for this native method.  The wrapper converts the
+ * Java-compiled calling convention to the native convention, handles
+ * arguments, and transitions to native.  On return from the native we transition
+ * back to java blocking if a safepoint is in progress.
+ */
+void AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
   ResourceMark rm;
   nmethod* nm = NULL;
 
@@ -2664,16 +2645,19 @@
          method->has_native_function(), "must have something valid to call!");
 
   {
-    // perform the work while holding the lock, but perform any printing outside the lock
+    // Perform the work while holding the lock, but perform any printing outside the lock
     MutexLocker mu(AdapterHandlerLibrary_lock);
     // See if somebody beat us to it
     nm = method->code();
-    if (nm) {
-      return nm;
+    if (nm != NULL) {
+      return;
     }
 
+    const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
+    assert(compile_id > 0, "Must generate native wrapper");
+
+
     ResourceMark rm;
-
     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
     if (buf != NULL) {
       CodeBuffer buffer(buf);
@@ -2705,16 +2689,14 @@
       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, is_outgoing);
 
       // Generate the compiled-to-native wrapper code
-      nm = SharedRuntime::generate_native_wrapper(&_masm,
-                                                  method,
-                                                  compile_id,
-                                                  sig_bt,
-                                                  regs,
-                                                  ret_type);
+      nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
+
+      if (nm != NULL) {
+        method->set_code(method, nm);
+      }
     }
-  }
-
-  // Must unlock before calling set_code
+  } // Unlock AdapterHandlerLibrary_lock
+
 
   // Install the generated code.
   if (nm != NULL) {
@@ -2722,13 +2704,11 @@
       ttyLocker ttyl;
       CompileTask::print_compilation(tty, nm, method->is_static() ? "(static)" : "");
     }
-    method->set_code(method, nm);
     nm->post_compiled_method_load_event();
   } else {
     // CodeCache is full, disable compilation
     CompileBroker::handle_full_code_cache();
   }
-  return nm;
 }
 
 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::block_for_jni_critical(JavaThread* thread))
@@ -2806,6 +2786,71 @@
 }
 #endif // ndef HAVE_DTRACE_H
 
+int SharedRuntime::convert_ints_to_longints_argcnt(int in_args_count, BasicType* in_sig_bt) {
+  int argcnt = in_args_count;
+  if (CCallingConventionRequiresIntsAsLongs) {
+    for (int in = 0; in < in_args_count; in++) {
+      BasicType bt = in_sig_bt[in];
+      switch (bt) {
+        case T_BOOLEAN:
+        case T_CHAR:
+        case T_BYTE:
+        case T_SHORT:
+        case T_INT:
+          argcnt++;
+          break;
+        default:
+          break;
+      }
+    }
+  } else {
+    assert(0, "This should not be needed on this platform");
+  }
+
+  return argcnt;
+}
+
+void SharedRuntime::convert_ints_to_longints(int i2l_argcnt, int& in_args_count,
+                                             BasicType*& in_sig_bt, VMRegPair*& in_regs) {
+  if (CCallingConventionRequiresIntsAsLongs) {
+    VMRegPair *new_in_regs   = NEW_RESOURCE_ARRAY(VMRegPair, i2l_argcnt);
+    BasicType *new_in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, i2l_argcnt);
+
+    int argcnt = 0;
+    for (int in = 0; in < in_args_count; in++, argcnt++) {
+      BasicType bt  = in_sig_bt[in];
+      VMRegPair reg = in_regs[in];
+      switch (bt) {
+        case T_BOOLEAN:
+        case T_CHAR:
+        case T_BYTE:
+        case T_SHORT:
+        case T_INT:
+          // Convert (bt) to (T_LONG,bt).
+          new_in_sig_bt[argcnt  ] = T_LONG;
+          new_in_sig_bt[argcnt+1] = bt;
+          assert(reg.first()->is_valid() && !reg.second()->is_valid(), "");
+          new_in_regs[argcnt  ].set2(reg.first());
+          new_in_regs[argcnt+1].set_bad();
+          argcnt++;
+          break;
+        default:
+          // No conversion needed.
+          new_in_sig_bt[argcnt] = bt;
+          new_in_regs[argcnt]   = reg;
+          break;
+      }
+    }
+    assert(argcnt == i2l_argcnt, "must match");
+
+    in_regs = new_in_regs;
+    in_sig_bt = new_in_sig_bt;
+    in_args_count = i2l_argcnt;
+  } else {
+    assert(0, "This should not be needed on this platform");
+  }
+}
+
 // -------------------------------------------------------------------------
 // Java-Java calling convention
 // (what you use when Java calls Java)
@@ -2905,7 +2950,7 @@
 // called from very start of a compiled OSR nmethod.  A temp array is
 // allocated to hold the interesting bits of the interpreter frame.  All
 // active locks are inflated to allow them to move.  The displaced headers and
-// active interpeter locals are copied into the temp buffer.  Then we return
+// active interpreter locals are copied into the temp buffer.  Then we return
 // back to the compiled code.  The compiled code then pops the current
 // interpreter frame off the stack and pushes a new compiled frame.  Then it
 // copies the interpreter locals and displaced headers where it wants.
--- a/src/share/vm/runtime/sharedRuntime.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -141,7 +141,7 @@
   static double dabs(double f);
 #endif
 
-#if defined(__SOFTFP__) || defined(PPC)
+#if defined(__SOFTFP__) || defined(PPC32)
   static double dsqrt(double f);
 #endif
 
@@ -366,7 +366,25 @@
                                                           const VMRegPair* regs) NOT_DEBUG_RETURN;
 
   // Ditto except for calling C
-  static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, int total_args_passed);
+  //
+  // C argument in register AND stack slot.
+  // Some architectures require that an argument must be passed in a register
+  // AND in a stack slot. These architectures provide a second VMRegPair array
+  // to be filled by the c_calling_convention method. On other architectures,
+  // NULL is being passed as the second VMRegPair array, so arguments are either
+  // passed in a register OR in a stack slot.
+  static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2,
+                                  int total_args_passed);
+
+  // Compute the new number of arguments in the signature if 32 bit ints
+  // must be converted to longs. Needed if CCallingConventionRequiresIntsAsLongs
+  // is true.
+  static int  convert_ints_to_longints_argcnt(int in_args_count, BasicType* in_sig_bt);
+  // Adapt a method's signature if it contains 32 bit integers that must
+  // be converted to longs. Needed if CCallingConventionRequiresIntsAsLongs
+  // is true.
+  static void convert_ints_to_longints(int i2l_argcnt, int& in_args_count,
+                                       BasicType*& in_sig_bt, VMRegPair*& in_regs);
 
   // Generate I2C and C2I adapters. These adapters are simple argument marshalling
   // blobs. Unlike adapters in the tiger and earlier releases the code in these
@@ -380,13 +398,13 @@
   // location for the interpreter to record. This is used by the frame code
   // to correct the sender code to match up with the stack pointer when the
   // thread left the compiled code. In addition it allows the interpreter
-  // to remove the space the c2i adapter allocated to do it argument conversion.
+  // to remove the space the c2i adapter allocated to do its argument conversion.
 
   // Although a c2i blob will always run interpreted even if compiled code is
   // present if we see that compiled code is present the compiled call site
   // will be patched/re-resolved so that later calls will run compiled.
 
-  // Aditionally a c2i blob need to have a unverified entry because it can be reached
+  // Additionally a c2i blob need to have a unverified entry because it can be reached
   // in situations where the call site is an inlined cache site and may go megamorphic.
 
   // A i2c adapter is simpler than the c2i adapter. This is because it is assumed
@@ -586,7 +604,7 @@
 // arguments for a Java-compiled call, and jumps to Rmethod-> code()->
 // code_begin().  It is broken to call it without an nmethod assigned.
 // The usual behavior is to lift any register arguments up out of the
-// stack and possibly re-pack the extra arguments to be contigious.
+// stack and possibly re-pack the extra arguments to be contiguous.
 // I2C adapters will save what the interpreter's stack pointer will be
 // after arguments are popped, then adjust the interpreter's frame
 // size to force alignment and possibly to repack the arguments.
@@ -603,7 +621,7 @@
 // outgoing stack args will be dead after the copy.
 //
 // Native wrappers, like adapters, marshal arguments.  Unlike adapters they
-// also perform an offical frame push & pop.  They have a call to the native
+// also perform an official frame push & pop.  They have a call to the native
 // routine in their middles and end in a return (instead of ending in a jump).
 // The native wrappers are stored in real nmethods instead of the BufferBlobs
 // used by the adapters.  The code generation happens here because it's very
@@ -620,11 +638,9 @@
 
 #ifdef ASSERT
   // Captures code and signature used to generate this adapter when
-  // verifing adapter equivalence.
+  // verifying adapter equivalence.
   unsigned char* _saved_code;
-  int            _code_length;
-  BasicType*     _saved_sig;
-  int            _total_args_passed;
+  int            _saved_code_length;
 #endif
 
   void init(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
@@ -634,9 +650,7 @@
     _c2i_unverified_entry = c2i_unverified_entry;
 #ifdef ASSERT
     _saved_code = NULL;
-    _code_length = 0;
-    _saved_sig = NULL;
-    _total_args_passed = 0;
+    _saved_code_length = 0;
 #endif
   }
 
@@ -649,7 +663,6 @@
   address get_i2c_entry()            const { return _i2c_entry; }
   address get_c2i_entry()            const { return _c2i_entry; }
   address get_c2i_unverified_entry() const { return _c2i_unverified_entry; }
-
   address base_address();
   void relocate(address new_base);
 
@@ -661,8 +674,8 @@
 
 #ifdef ASSERT
   // Used to verify that code generated for shared adapters is equivalent
-  void save_code(unsigned char* code, int length, int total_args_passed, BasicType* sig_bt);
-  bool compare_code(unsigned char* code, int length, int total_args_passed, BasicType* sig_bt);
+  void save_code   (unsigned char* code, int length);
+  bool compare_code(unsigned char* code, int length);
 #endif
 
   //virtual void print_on(outputStream* st) const;  DO NOT USE
@@ -681,7 +694,7 @@
 
   static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint,
                                         address i2c_entry, address c2i_entry, address c2i_unverified_entry);
-  static nmethod* create_native_wrapper(methodHandle method, int compile_id);
+  static void create_native_wrapper(methodHandle method);
   static AdapterHandlerEntry* get_adapter(methodHandle method);
 
 #ifdef HAVE_DTRACE_H
--- a/src/share/vm/runtime/sharedRuntimeTrans.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/sharedRuntimeTrans.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -56,10 +56,12 @@
 # define __LO(x) *(1+(int*)&x)
 #endif
 
+#if !defined(AIX)
 double copysign(double x, double y) {
   __HI(x) = (__HI(x)&0x7fffffff)|(__HI(y)&0x80000000);
   return x;
 }
+#endif
 
 /*
  * ====================================================
@@ -85,6 +87,7 @@
   hugeX   = 1.0e+300,
   tiny   = 1.0e-300;
 
+#if !defined(AIX)
 double scalbn (double x, int n) {
   int  k,hx,lx;
   hx = __HI(x);
@@ -111,9 +114,10 @@
   __HI(x) = (hx&0x800fffff)|(k<<20);
   return x*twom54;
 }
+#endif
 
 /* __ieee754_log(x)
- * Return the logrithm of x
+ * Return the logarithm of x
  *
  * Method :
  *   1. Argument Reduction: find k and f such that
--- a/src/share/vm/runtime/sharedRuntimeTrig.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/sharedRuntimeTrig.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -223,7 +223,7 @@
  *
  *      fq[]    final product of x*(2/pi) in fq[0],..,fq[jk]
  *
- *      ih      integer. If >0 it indicats q[] is >= 0.5, hence
+ *      ih      integer. If >0 it indicates q[] is >= 0.5, hence
  *              it also indicates the *sign* of the result.
  *
  */
@@ -347,7 +347,7 @@
   if(z==0.0) {
     jz -= 1; q0 -= 24;
     while(iq[jz]==0) { jz--; q0-=24;}
-  } else { /* break z into 24-bit if neccessary */
+  } else { /* break z into 24-bit if necessary */
     z = scalbnA(z,-q0);
     if(z>=two24B) {
       fw = (double)((int)(twon24*z));
@@ -409,7 +409,7 @@
 
 /*
  * ====================================================
- * Copyright (c) 1993 Oracle and/or its affilates. All rights reserved.
+ * Copyright (c) 1993 Oracle and/or its affiliates. All rights reserved.
  *
  * Developed at SunPro, a Sun Microsystems, Inc. business.
  * Permission to use, copy, modify, and distribute this
@@ -658,7 +658,7 @@
 
 static double __kernel_cos(double x, double y)
 {
-  double a,hz,z,r,qx;
+  double a,h,z,r,qx;
   int ix;
   ix = __HI(x)&0x7fffffff;      /* ix = |x|'s high word*/
   if(ix<0x3e400000) {                   /* if x < 2**27 */
@@ -675,9 +675,9 @@
       __HI(qx) = ix-0x00200000; /* x/4 */
       __LO(qx) = 0;
     }
-    hz = 0.5*z-qx;
-    a  = one-qx;
-    return a - (hz - (z*r-x*y));
+    h = 0.5*z-qx;
+    a = one-qx;
+    return a - (h - (z*r-x*y));
   }
 }
 
--- a/src/share/vm/runtime/signature.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/signature.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -152,7 +152,7 @@
   _parameter_index = 0;
 }
 
-// Optimized version of iterat_parameters when fingerprint is known
+// Optimized version of iterate_parameters when fingerprint is known
 void SignatureIterator::iterate_parameters( uint64_t fingerprint ) {
   uint64_t saved_fingerprint = fingerprint;
 
--- a/src/share/vm/runtime/signature.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/signature.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/simpleThresholdPolicy.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -387,7 +387,7 @@
                                                      int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
   // If the method is already compiling, quickly bail out.
   if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
-    // Use loop event as an opportinity to also check there's been
+    // Use loop event as an opportunity to also check there's been
     // enough calls.
     CompLevel cur_level = comp_level(mh());
     CompLevel next_level = call_event(mh(), cur_level);
--- a/src/share/vm/runtime/statSampler.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/statSampler.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -222,8 +222,8 @@
  * The list of System Properties that have corresponding PerfData
  * string instrumentation created by retrieving the named property's
  * value from System.getProperty() and unconditionally creating a
- * PerfStringConstant object initialized to the retreived value. This
- * is not an exhustive list of Java properties with corresponding string
+ * PerfStringConstant object initialized to the retrieved value. This
+ * is not an exhaustive list of Java properties with corresponding string
  * instrumentation as the create_system_property_instrumentation() method
  * creates other property based instrumentation conditionally.
  */
@@ -325,7 +325,7 @@
   // create string instrumentation for various Java properties.
   create_system_property_instrumentation(CHECK);
 
-  // hotspot flags (from .hotspotrc) and args (from command line)
+  // HotSpot flags (from .hotspotrc) and args (from command line)
   //
   PerfDataManager::create_string_constant(JAVA_RT, "vmFlags",
                                           Arguments::jvm_flags(), CHECK);
--- a/src/share/vm/runtime/stubCodeGenerator.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/stubCodeGenerator.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/stubCodeGenerator.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/stubCodeGenerator.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -111,7 +111,7 @@
 };
 
 
-// Stack-allocated helper class used to assciate a stub code with a name.
+// Stack-allocated helper class used to associate a stub code with a name.
 // All stub code generating functions that use a StubCodeMark will be registered
 // in the global StubCodeDesc list and the generated stub code can be identified
 // later via an address pointing into it.
--- a/src/share/vm/runtime/stubRoutines.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/stubRoutines.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -114,8 +114,11 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "stubRoutines_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "stubRoutines_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "stubRoutines_ppc_32.hpp"
+#endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "stubRoutines_ppc_64.hpp"
 #endif
 
 
--- a/src/share/vm/runtime/sweeper.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/sweeper.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -129,6 +129,7 @@
 
 nmethod* NMethodSweeper::_current                      = NULL; // Current nmethod
 long     NMethodSweeper::_traversals                   = 0;    // Stack scan count, also sweep ID.
+long     NMethodSweeper::_total_nof_code_cache_sweeps  = 0;    // Total number of full sweeps of the code cache
 long     NMethodSweeper::_time_counter                 = 0;    // Virtual time used to periodically invoke sweeper
 long     NMethodSweeper::_last_sweep                   = 0;    // Value of _time_counter when the last sweep happened
 int      NMethodSweeper::_seen                         = 0;    // Nof. nmethod we have currently processed in current pass of CodeCache
@@ -143,13 +144,16 @@
                                                                //   1) alive       -> not_entrant
                                                                //   2) not_entrant -> zombie
                                                                //   3) zombie      -> marked_for_reclamation
+int    NMethodSweeper::_hotness_counter_reset_val       = 0;
 
-int   NMethodSweeper::_total_nof_methods_reclaimed     = 0;    // Accumulated nof methods flushed
-Tickspan NMethodSweeper::_total_time_sweeping;                 // Accumulated time sweeping
-Tickspan NMethodSweeper::_total_time_this_sweep;               // Total time this sweep
-Tickspan NMethodSweeper::_peak_sweep_time;                     // Peak time for a full sweep
-Tickspan NMethodSweeper::_peak_sweep_fraction_time;            // Peak time sweeping one fraction
-int   NMethodSweeper::_hotness_counter_reset_val       = 0;
+long   NMethodSweeper::_total_nof_methods_reclaimed     = 0;    // Accumulated nof methods flushed
+long   NMethodSweeper::_total_nof_c2_methods_reclaimed  = 0;    // Accumulated nof methods flushed
+size_t NMethodSweeper::_total_flushed_size              = 0;    // Total number of bytes flushed from the code cache
+Tickspan  NMethodSweeper::_total_time_sweeping;                 // Accumulated time sweeping
+Tickspan  NMethodSweeper::_total_time_this_sweep;               // Total time this sweep
+Tickspan  NMethodSweeper::_peak_sweep_time;                     // Peak time for a full sweep
+Tickspan  NMethodSweeper::_peak_sweep_fraction_time;            // Peak time sweeping one fraction
+
 
 
 class MarkActivationClosure: public CodeBlobClosure {
@@ -257,9 +261,14 @@
   // Large ReservedCodeCacheSize:   (e.g., 256M + code Cache is 90% full). The formula
   //                                              computes: (256 / 16) - 10 = 6.
   if (!_should_sweep) {
-    int time_since_last_sweep = _time_counter - _last_sweep;
-    double wait_until_next_sweep = (ReservedCodeCacheSize / (16 * M)) - time_since_last_sweep -
-                                CodeCache::reverse_free_ratio();
+    const int time_since_last_sweep = _time_counter - _last_sweep;
+    // ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
+    // since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
+    // an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
+    // value) that disables the intended periodic sweeps.
+    const int max_wait_time = ReservedCodeCacheSize / (16 * M);
+    double wait_until_next_sweep = max_wait_time - time_since_last_sweep - CodeCache::reverse_free_ratio();
+    assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
 
     if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
       _should_sweep = true;
@@ -287,6 +296,7 @@
 
     // We are done with sweeping the code cache once.
     if (_sweep_fractions_left == 0) {
+      _total_nof_code_cache_sweeps++;
       _last_sweep = _time_counter;
       // Reset flag; temporarily disables sweeper
       _should_sweep = false;
@@ -299,7 +309,8 @@
         _bytes_changed = 0;
       }
     }
-    _sweep_started = 0;
+    // Release work, because another compiler thread could continue.
+    OrderAccess::release_store((int*)&_sweep_started, 0);
   }
 }
 
@@ -373,6 +384,7 @@
   _total_time_sweeping  += sweep_time;
   _total_time_this_sweep += sweep_time;
   _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
+  _total_flushed_size += freed_memory;
   _total_nof_methods_reclaimed += _flushed_count;
 
   EventSweepCodeCache event(UNTIMED);
@@ -512,6 +524,9 @@
         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
       }
       freed_memory = nm->total_size();
+      if (nm->is_compiled_by_c2()) {
+        _total_nof_c2_methods_reclaimed++;
+      }
       release_nmethod(nm);
       _flushed_count++;
     } else {
@@ -550,6 +565,9 @@
       SWEEP(nm);
       // No inline caches will ever point to osr methods, so we can just remove it
       freed_memory = nm->total_size();
+      if (nm->is_compiled_by_c2()) {
+        _total_nof_c2_methods_reclaimed++;
+      }
       release_nmethod(nm);
       _flushed_count++;
     } else {
@@ -637,3 +655,13 @@
     xtty->end_elem();
   }
 }
+
+void NMethodSweeper::print() {
+  ttyLocker ttyl;
+  tty->print_cr("Code cache sweeper statistics:");
+  tty->print_cr("  Total sweep time:                %1.0lfms", (double)_total_time_sweeping.value()/1000000);
+  tty->print_cr("  Total number of full sweeps:     %ld", _total_nof_code_cache_sweeps);
+  tty->print_cr("  Total number of flushed methods: %ld(%ld C2 methods)", _total_nof_methods_reclaimed,
+                                                    _total_nof_c2_methods_reclaimed);
+  tty->print_cr("  Total size of flushed methods:   " SIZE_FORMAT "kB", _total_flushed_size/K);
+}
--- a/src/share/vm/runtime/sweeper.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/sweeper.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -54,28 +54,33 @@
 //     is full.
 
 class NMethodSweeper : public AllStatic {
-  static long      _traversals;                   // Stack scan count, also sweep ID.
-  static long      _time_counter;                 // Virtual time used to periodically invoke sweeper
-  static long      _last_sweep;                   // Value of _time_counter when the last sweep happened
-  static nmethod*  _current;                      // Current nmethod
-  static int       _seen;                         // Nof. nmethod we have currently processed in current pass of CodeCache
-  static int       _flushed_count;                // Nof. nmethods flushed in current sweep
-  static int       _zombified_count;              // Nof. nmethods made zombie in current sweep
-  static int       _marked_for_reclamation_count; // Nof. nmethods marked for reclaim in current sweep
+  static long      _traversals;                     // Stack scan count, also sweep ID.
+  static long      _total_nof_code_cache_sweeps;    // Total number of full sweeps of the code cache
+  static long      _time_counter;                   // Virtual time used to periodically invoke sweeper
+  static long      _last_sweep;                     // Value of _time_counter when the last sweep happened
+  static nmethod*  _current;                        // Current nmethod
+  static int       _seen;                           // Nof. nmethod we have currently processed in current pass of CodeCache
+  static int       _flushed_count;                  // Nof. nmethods flushed in current sweep
+  static int       _zombified_count;                // Nof. nmethods made zombie in current sweep
+  static int       _marked_for_reclamation_count;   // Nof. nmethods marked for reclaim in current sweep
 
-  static volatile int  _sweep_fractions_left;     // Nof. invocations left until we are completed with this pass
-  static volatile int  _sweep_started;            // Flag to control conc sweeper
-  static volatile bool _should_sweep;             // Indicates if we should invoke the sweeper
-  static volatile int _bytes_changed;             // Counts the total nmethod size if the nmethod changed from:
-                                                  //   1) alive       -> not_entrant
-                                                  //   2) not_entrant -> zombie
-                                                  //   3) zombie      -> marked_for_reclamation
+  static volatile int  _sweep_fractions_left;       // Nof. invocations left until we are completed with this pass
+  static volatile int  _sweep_started;              // Flag to control conc sweeper
+  static volatile bool _should_sweep;               // Indicates if we should invoke the sweeper
+  static volatile int  _bytes_changed;              // Counts the total nmethod size if the nmethod changed from:
+                                                    //   1) alive       -> not_entrant
+                                                    //   2) not_entrant -> zombie
+                                                    //   3) zombie      -> marked_for_reclamation
   // Stat counters
-  static int       _total_nof_methods_reclaimed;  // Accumulated nof methods flushed
-  static Tickspan  _total_time_sweeping;          // Accumulated time sweeping
-  static Tickspan  _total_time_this_sweep;        // Total time this sweep
-  static Tickspan  _peak_sweep_time;              // Peak time for a full sweep
-  static Tickspan  _peak_sweep_fraction_time;     // Peak time sweeping one fraction
+  static long      _total_nof_methods_reclaimed;    // Accumulated nof methods flushed
+  static long      _total_nof_c2_methods_reclaimed; // Accumulated nof C2-compiled methods flushed
+  static size_t    _total_flushed_size;             // Total size of flushed methods
+  static int       _hotness_counter_reset_val;
+
+  static Tickspan  _total_time_sweeping;            // Accumulated time sweeping
+  static Tickspan  _total_time_this_sweep;          // Total time this sweep
+  static Tickspan  _peak_sweep_time;                // Peak time for a full sweep
+  static Tickspan  _peak_sweep_fraction_time;       // Peak time sweeping one fraction
 
   static int  process_nmethod(nmethod *nm);
   static void release_nmethod(nmethod* nm);
@@ -83,8 +88,6 @@
   static bool sweep_in_progress();
   static void sweep_code_cache();
 
-  static int _hotness_counter_reset_val;
-
  public:
   static long traversal_count()              { return _traversals; }
   static int  total_nof_methods_reclaimed()  { return _total_nof_methods_reclaimed; }
@@ -105,10 +108,10 @@
   static void mark_active_nmethods();      // Invoked at the end of each safepoint
   static void possibly_sweep();            // Compiler threads call this to sweep
 
-  static int sort_nmethods_by_hotness(nmethod** nm1, nmethod** nm2);
   static int hotness_counter_reset_val();
   static void report_state_change(nmethod* nm);
   static void possibly_enable_sweeper();
+  static void print();   // Printing/debugging
 };
 
 #endif // SHARE_VM_RUNTIME_SWEEPER_HPP
--- a/src/share/vm/runtime/synchronizer.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/synchronizer.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@
 # include "os_bsd.inline.hpp"
 #endif
 
-#if defined(__GNUC__)
+#if defined(__GNUC__) && !defined(PPC64)
   // Need to inhibit inlining for older versions of GCC to avoid build-time failures
   #define ATTR __attribute__((noinline))
 #else
@@ -84,32 +84,6 @@
     len = klassname->utf8_length();                                        \
   }
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL5(hotspot, monitor__wait,
-  jlong, uintptr_t, char*, int, long);
-HS_DTRACE_PROBE_DECL4(hotspot, monitor__waited,
-  jlong, uintptr_t, char*, int);
-
-#define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
-  {                                                                        \
-    if (DTraceMonitorProbes) {                                            \
-      DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
-      HS_DTRACE_PROBE5(hotspot, monitor__wait, jtid,                       \
-                       (monitor), bytes, len, (millis));                   \
-    }                                                                      \
-  }
-
-#define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
-  {                                                                        \
-    if (DTraceMonitorProbes) {                                            \
-      DTRACE_MONITOR_PROBE_COMMON(obj, thread);                            \
-      HS_DTRACE_PROBE4(hotspot, monitor__##probe, jtid,                    \
-                       (uintptr_t)(monitor), bytes, len);                  \
-    }                                                                      \
-  }
-
-#else /* USDT2 */
-
 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis)            \
   {                                                                        \
     if (DTraceMonitorProbes) {                                            \
@@ -119,7 +93,7 @@
     }                                                                      \
   }
 
-#define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_PROBE_WAITED
+#define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED
 
 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread)                  \
   {                                                                        \
@@ -130,7 +104,6 @@
     }                                                                      \
   }
 
-#endif /* USDT2 */
 #else //  ndef DTRACE_ENABLED
 
 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
@@ -373,23 +346,24 @@
 // -----------------------------------------------------------------------------
 //  Wait/Notify/NotifyAll
 // NOTE: must use heavy weight monitor to handle wait()
-void ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
+int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
   if (UseBiasedLocking) {
     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
   }
   if (millis < 0) {
     TEVENT (wait - throw IAX) ;
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
+    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
   }
   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj());
   DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
   monitor->wait(millis, true, THREAD);
 
-  /* This dummy call is in place to get around dtrace bug 6254741.  Once
-     that's fixed we can uncomment the following line and remove the call */
+  // This dummy call is in place to get around dtrace bug 6254741.  Once
+  // that's fixed we can uncomment the following line, remove the call
+  // and change this function back into a "void" func.
   // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
-  dtrace_waited_probe(monitor, obj, THREAD);
+  return dtrace_waited_probe(monitor, obj, THREAD);
 }
 
 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) {
@@ -737,10 +711,10 @@
 }
 
 // Be aware of this method could revoke bias of the lock object.
-// This method querys the ownership of the lock handle specified by 'h_obj'.
+// This method queries the ownership of the lock handle specified by 'h_obj'.
 // If the current thread owns the lock, it returns owner_self. If no
 // thread owns the lock, it returns owner_none. Otherwise, it will return
-// ower_other.
+// owner_other.
 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
 (JavaThread *self, Handle h_obj) {
   // The caller must beware this method can revoke bias, and
--- a/src/share/vm/runtime/synchronizer.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/synchronizer.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
   // to use enter() and exit() in order to make sure user be ware
   // of the performance and semantics difference. They are normally
   // used by ObjectLocker etc. The interpreter and compiler use
-  // assembly copies of these routines. Please keep them synchornized.
+  // assembly copies of these routines. Please keep them synchronized.
   //
   // attempt_rebias flag is used by UseBiasedLocking implementation
   static void fast_enter  (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS);
@@ -68,7 +68,7 @@
   static void jni_exit    (oop obj,    Thread* THREAD);
 
   // Handle all interpreter, compiler and jni cases
-  static void wait               (Handle obj, jlong millis, TRAPS);
+  static int  wait               (Handle obj, jlong millis, TRAPS);
   static void notify             (Handle obj,               TRAPS);
   static void notifyall          (Handle obj,               TRAPS);
 
--- a/src/share/vm/runtime/thread.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/thread.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -116,31 +116,8 @@
 
 // Only bother with this argument setup if dtrace is available
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL(hotspot, vm__init__begin);
-HS_DTRACE_PROBE_DECL(hotspot, vm__init__end);
-HS_DTRACE_PROBE_DECL5(hotspot, thread__start, char*, intptr_t,
-  intptr_t, intptr_t, bool);
-HS_DTRACE_PROBE_DECL5(hotspot, thread__stop, char*, intptr_t,
-  intptr_t, intptr_t, bool);
-
-#define DTRACE_THREAD_PROBE(probe, javathread)                             \
-  {                                                                        \
-    ResourceMark rm(this);                                                 \
-    int len = 0;                                                           \
-    const char* name = (javathread)->get_thread_name();                    \
-    len = strlen(name);                                                    \
-    HS_DTRACE_PROBE5(hotspot, thread__##probe,                             \
-      name, len,                                                           \
-      java_lang_Thread::thread_id((javathread)->threadObj()),              \
-      (javathread)->osthread()->thread_id(),                               \
-      java_lang_Thread::is_daemon((javathread)->threadObj()));             \
-  }
-
-#else /* USDT2 */
-
-#define HOTSPOT_THREAD_PROBE_start HOTSPOT_THREAD_PROBE_START
-#define HOTSPOT_THREAD_PROBE_stop HOTSPOT_THREAD_PROBE_STOP
+#define HOTSPOT_THREAD_PROBE_start HOTSPOT_THREAD_START
+#define HOTSPOT_THREAD_PROBE_stop HOTSPOT_THREAD_STOP
 
 #define DTRACE_THREAD_PROBE(probe, javathread)                             \
   {                                                                        \
@@ -155,8 +132,6 @@
       java_lang_Thread::is_daemon((javathread)->threadObj()));             \
   }
 
-#endif /* USDT2 */
-
 #else //  ndef DTRACE_ENABLED
 
 #define DTRACE_THREAD_PROBE(probe, javathread)
@@ -316,6 +291,9 @@
 void Thread::record_stack_base_and_size() {
   set_stack_base(os::current_stack_base());
   set_stack_size(os::current_stack_size());
+  if (is_Java_thread()) {
+    ((JavaThread*) this)->set_stack_overflow_limit();
+  }
   // CR 7190089: on Solaris, primordial thread's stack is adjusted
   // in initialize_thread(). Without the adjustment, stack size is
   // incorrect if stack is set to unlimited (ulimit -s unlimited).
@@ -771,7 +749,7 @@
 void JavaThread::record_jump(address target, address instr, const char* file, int line) {
 
   // This should not need to be atomic as the only way for simultaneous
-  // updates is via interrupts. Even then this should be rare or non-existant
+  // updates is via interrupts. Even then this should be rare or non-existent
   // and we don't care that much anyway.
 
   int index = _jmp_ring_index;
@@ -830,7 +808,7 @@
   return false;
 }
 
-void Thread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void Thread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   active_handles()->oops_do(f);
   // Do oop for ThreadShadow
   f->do_oop((oop*)&_pending_exception);
@@ -932,10 +910,10 @@
         // Threads_lock is special, since the safepoint synchronization will not start before this is
         // acquired. Hence, a JavaThread cannot be holding it at a safepoint. So is VMOperationRequest_lock,
         // since it is used to transfer control between JavaThreads and the VMThread
-        // Do not *exclude* any locks unless you are absolutly sure it is correct. Ask someone else first!
+        // Do not *exclude* any locks unless you are absolutely sure it is correct. Ask someone else first!
         if ( (cur->allow_vm_block() &&
               cur != Threads_lock &&
-              cur != Compile_lock &&               // Temporary: should not be necessary when we get spearate compilation
+              cur != Compile_lock &&               // Temporary: should not be necessary when we get separate compilation
               cur != VMOperationRequest_lock &&
               cur != VMOperationQueue_lock) ||
               cur->rank() == Mutex::special) {
@@ -1278,7 +1256,7 @@
         time_slept = 0;
         time_before_loop = now;
     } else {
-        // need to recalulate since we might have new tasks in _tasks
+        // need to recalculate since we might have new tasks in _tasks
         time_slept = (int) ((now - time_before_loop) / 1000000);
     }
 
@@ -1693,7 +1671,7 @@
   // initialize thread-local alloc buffer related fields
   this->initialize_tlab();
 
-  // used to test validitity of stack trace backs
+  // used to test validity of stack trace backs
   this->record_base_of_stack_pointer();
 
   // Record real stack base and size.
@@ -2777,7 +2755,7 @@
   }
 };
 
-void JavaThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   // Verify that the deferred card marks have been flushed.
   assert(deferred_card_mark().is_empty(), "Should be empty during GC");
 
@@ -3307,7 +3285,7 @@
 #endif
 }
 
-void CompilerThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void CompilerThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   JavaThread::oops_do(f, cld_f, cf);
   if (_scanned_nmethod != NULL && cf != NULL) {
     // Safepoints can occur when the sweeper is scanning an nmethod so
@@ -3363,6 +3341,58 @@
   // If CompilerThreads ever become non-JavaThreads, add them here
 }
 
+
+void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
+  TraceTime timer("Initialize java.lang classes", TraceStartupTime);
+
+  if (EagerXrunInit && Arguments::init_libraries_at_startup()) {
+    create_vm_init_libraries();
+  }
+
+  initialize_class(vmSymbols::java_lang_String(), CHECK);
+
+  // Initialize java_lang.System (needed before creating the thread)
+  initialize_class(vmSymbols::java_lang_System(), CHECK);
+  initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK);
+  Handle thread_group = create_initial_thread_group(CHECK);
+  Universe::set_main_thread_group(thread_group());
+  initialize_class(vmSymbols::java_lang_Thread(), CHECK);
+  oop thread_object = create_initial_thread(thread_group, main_thread, CHECK);
+  main_thread->set_threadObj(thread_object);
+  // Set thread status to running since main thread has
+  // been started and running.
+  java_lang_Thread::set_thread_status(thread_object,
+                                      java_lang_Thread::RUNNABLE);
+
+  // The VM creates & returns objects of this class. Make sure it's initialized.
+  initialize_class(vmSymbols::java_lang_Class(), CHECK);
+
+  // The VM preresolves methods to these classes. Make sure that they get initialized
+  initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK);
+  initialize_class(vmSymbols::java_lang_ref_Finalizer(),  CHECK);
+  call_initializeSystemClass(CHECK);
+
+  // get the Java runtime name after java.lang.System is initialized
+  JDK_Version::set_runtime_name(get_java_runtime_name(THREAD));
+  JDK_Version::set_runtime_version(get_java_runtime_version(THREAD));
+
+  // an instance of OutOfMemory exception has been allocated earlier
+  initialize_class(vmSymbols::java_lang_OutOfMemoryError(), CHECK);
+  initialize_class(vmSymbols::java_lang_NullPointerException(), CHECK);
+  initialize_class(vmSymbols::java_lang_ClassCastException(), CHECK);
+  initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK);
+  initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK);
+  initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK);
+  initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK);
+  initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK);
+}
+
+void Threads::initialize_jsr292_core_classes(TRAPS) {
+  initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK);
+  initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK);
+  initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK);
+}
+
 jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
 
   extern void JDK_Version_init();
@@ -3382,7 +3412,7 @@
   // Initialize system properties.
   Arguments::init_system_properties();
 
-  // So that JDK version can be used as a discrimintor when parsing arguments
+  // So that JDK version can be used as a discriminator when parsing arguments
   JDK_Version_init();
 
   // Update/Initialize System properties after JDK version number is known
@@ -3401,11 +3431,7 @@
     os::pause();
   }
 
-#ifndef USDT2
-  HS_DTRACE_PROBE(hotspot, vm__init__begin);
-#else /* USDT2 */
   HOTSPOT_VM_INIT_BEGIN();
-#endif /* USDT2 */
 
   // Record VM creation timing statistics
   TraceVmCreationTime create_vm_timer;
@@ -3421,7 +3447,7 @@
   jint adjust_after_os_result = Arguments::adjust_after_os();
   if (adjust_after_os_result != JNI_OK) return adjust_after_os_result;
 
-  // intialize TLS
+  // initialize TLS
   ThreadLocalStorage::init();
 
   // Bootstrap native memory tracking, so it can start recording memory
@@ -3541,13 +3567,13 @@
     VMThread::execute(&verify_op);
   }
 
-  EXCEPTION_MARK;
+  Thread* THREAD = Thread::current();
 
   // At this point, the Universe is initialized, but we have not executed
   // any byte code.  Now is a good time (the only time) to dump out the
   // internal state of the JVM for sharing.
   if (DumpSharedSpaces) {
-    MetaspaceShared::preload_and_dump(CHECK_0);
+    MetaspaceShared::preload_and_dump(CHECK_JNI_ERR);
     ShouldNotReachHere();
   }
 
@@ -3558,74 +3584,12 @@
   // Notify JVMTI agents that VM has started (JNI is up) - nop if no agents.
   JvmtiExport::post_vm_start();
 
-  {
-    TraceTime timer("Initialize java.lang classes", TraceStartupTime);
-
-    if (EagerXrunInit && Arguments::init_libraries_at_startup()) {
-      create_vm_init_libraries();
-    }
-
-    initialize_class(vmSymbols::java_lang_String(), CHECK_0);
-
-    // Initialize java_lang.System (needed before creating the thread)
-    initialize_class(vmSymbols::java_lang_System(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_ThreadGroup(), CHECK_0);
-    Handle thread_group = create_initial_thread_group(CHECK_0);
-    Universe::set_main_thread_group(thread_group());
-    initialize_class(vmSymbols::java_lang_Thread(), CHECK_0);
-    oop thread_object = create_initial_thread(thread_group, main_thread, CHECK_0);
-    main_thread->set_threadObj(thread_object);
-    // Set thread status to running since main thread has
-    // been started and running.
-    java_lang_Thread::set_thread_status(thread_object,
-                                        java_lang_Thread::RUNNABLE);
-
-    // The VM creates & returns objects of this class. Make sure it's initialized.
-    initialize_class(vmSymbols::java_lang_Class(), CHECK_0);
-
-    // The VM preresolves methods to these classes. Make sure that they get initialized
-    initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_ref_Finalizer(),  CHECK_0);
-    call_initializeSystemClass(CHECK_0);
-
-    // get the Java runtime name after java.lang.System is initialized
-    JDK_Version::set_runtime_name(get_java_runtime_name(THREAD));
-    JDK_Version::set_runtime_version(get_java_runtime_version(THREAD));
-
-    // an instance of OutOfMemory exception has been allocated earlier
-    initialize_class(vmSymbols::java_lang_OutOfMemoryError(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_NullPointerException(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_ClassCastException(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_ArrayStoreException(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK_0);
-  }
-
-  // See        : bugid 4211085.
-  // Background : the static initializer of java.lang.Compiler tries to read
-  //              property"java.compiler" and read & write property "java.vm.info".
-  //              When a security manager is installed through the command line
-  //              option "-Djava.security.manager", the above properties are not
-  //              readable and the static initializer for java.lang.Compiler fails
-  //              resulting in a NoClassDefFoundError.  This can happen in any
-  //              user code which calls methods in java.lang.Compiler.
-  // Hack :       the hack is to pre-load and initialize this class, so that only
-  //              system domains are on the stack when the properties are read.
-  //              Currently even the AWT code has calls to methods in java.lang.Compiler.
-  //              On the classic VM, java.lang.Compiler is loaded very early to load the JIT.
-  // Future Fix : the best fix is to grant everyone permissions to read "java.compiler" and
-  //              read and write"java.vm.info" in the default policy file. See bugid 4211383
-  //              Once that is done, we should remove this hack.
-  initialize_class(vmSymbols::java_lang_Compiler(), CHECK_0);
-
-  // More hackery - the static initializer of java.lang.Compiler adds the string "nojit" to
-  // the java.vm.info property if no jit gets loaded through java.lang.Compiler (the hotspot
-  // compiler does not get loaded through java.lang.Compiler).  "java -version" with the
-  // hotspot vm says "nojit" all the time which is confusing.  So, we reset it here.
-  // This should also be taken out as soon as 4211383 gets fixed.
-  reset_vm_info_property(CHECK_0);
+  initialize_java_lang_classes(main_thread, CHECK_JNI_ERR);
+
+  // We need this for ClassDataSharing - the initial vm.info property is set
+  // with the default value of CDS "sharing" which may be reset through
+  // command line options.
+  reset_vm_info_property(CHECK_JNI_ERR);
 
   quicken_jni_functions();
 
@@ -3638,11 +3602,7 @@
   // debug stuff, that does not work until all basic classes have been initialized.
   set_init_completed();
 
-#ifndef USDT2
-  HS_DTRACE_PROBE(hotspot, vm__init__end);
-#else /* USDT2 */
   HOTSPOT_VM_INIT_END();
-#endif /* USDT2 */
 
   // record VM initialization completion time
 #if INCLUDE_MANAGEMENT
@@ -3654,10 +3614,7 @@
   // Note that we do not use CHECK_0 here since we are inside an EXCEPTION_MARK and
   // set_init_completed has just been called, causing exceptions not to be shortcut
   // anymore. We call vm_exit_during_initialization directly instead.
-  SystemDictionary::compute_java_system_loader(THREAD);
-  if (HAS_PENDING_EXCEPTION) {
-    vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
-  }
+  SystemDictionary::compute_java_system_loader(CHECK_JNI_ERR);
 
 #if INCLUDE_ALL_GCS
   // Support for ConcurrentMarkSweep. This should be cleaned up
@@ -3665,12 +3622,9 @@
   // once things are properly refactored. XXX YSR
   if (UseConcMarkSweepGC || UseG1GC) {
     if (UseConcMarkSweepGC) {
-      ConcurrentMarkSweepThread::makeSurrogateLockerThread(THREAD);
+      ConcurrentMarkSweepThread::makeSurrogateLockerThread(CHECK_JNI_ERR);
     } else {
-      ConcurrentMarkThread::makeSurrogateLockerThread(THREAD);
-    }
-    if (HAS_PENDING_EXCEPTION) {
-      vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
+      ConcurrentMarkThread::makeSurrogateLockerThread(CHECK_JNI_ERR);
     }
   }
 #endif // INCLUDE_ALL_GCS
@@ -3713,19 +3667,16 @@
   CompileBroker::compilation_init();
 #endif
 
+  // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
+  // It is done after compilers are initialized, because otherwise compilations of
+  // signature polymorphic MH intrinsics can be missed
+  // (see SystemDictionary::find_method_handle_intrinsic).
   if (EnableInvokeDynamic) {
-    // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
-    // It is done after compilers are initialized, because otherwise compilations of
-    // signature polymorphic MH intrinsics can be missed
-    // (see SystemDictionary::find_method_handle_intrinsic).
-    initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK_0);
-    initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK_0);
+    initialize_jsr292_core_classes(CHECK_JNI_ERR);
   }
 
 #if INCLUDE_MANAGEMENT
   Management::initialize(THREAD);
-#endif // INCLUDE_MANAGEMENT
 
   if (HAS_PENDING_EXCEPTION) {
     // management agent fails to start possibly due to
@@ -3733,6 +3684,7 @@
     // stack trace if appropriate. Simply exit VM.
     vm_exit(1);
   }
+#endif // INCLUDE_MANAGEMENT
 
   if (Arguments::has_profile())       FlatProfiler::engage(main_thread, true);
   if (MemProfiling)                   MemProfiler::engage();
@@ -4233,17 +4185,17 @@
 // but the garbage collector must provide a safe context for them to run.
 // In particular, these things should never be called when the Threads_lock
 // is held by some other thread. (Note: the Safepoint abstraction also
-// uses the Threads_lock to gurantee this property. It also makes sure that
+// uses the Threads_lock to guarantee this property. It also makes sure that
 // all threads gets blocked when exiting or starting).
 
-void Threads::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void Threads::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   ALL_JAVA_THREADS(p) {
     p->oops_do(f, cld_f, cf);
   }
   VMThread::vm_thread()->oops_do(f, cld_f, cf);
 }
 
-void Threads::possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   // Introduce a mechanism allowing parallel threads to claim threads as
   // root groups.  Overhead should be small enough to use all the time,
   // even in sequential code.
@@ -4523,9 +4475,7 @@
         ++ctr ;
         if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
            if (Yields > 5) {
-             // Consider using a simple NakedSleep() instead.
-             // Then SpinAcquire could be called by non-JVM threads
-             Thread::current()->_ParkEvent->park(1) ;
+             os::naked_short_sleep(1);
            } else {
              os::NakedYield() ;
              ++Yields ;
--- a/src/share/vm/runtime/thread.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/thread.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -487,7 +487,7 @@
   // Apply "cld_f->do_cld" to CLDs that are otherwise not kept alive.
   //   Used by JavaThread::oops_do.
   // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
-  virtual void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  virtual void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
   // Handles the parallel case for the method below.
 private:
@@ -929,7 +929,12 @@
   static void collect_counters(typeArrayOop array);
  private:
 #endif // GRAAL
-  StackGuardState        _stack_guard_state;
+
+  StackGuardState  _stack_guard_state;
+
+  // Precompute the limit of the stack as used in stack overflow checks.
+  // We load it from here to simplify the stack overflow check in assembly.
+  address          _stack_overflow_limit;
 
   nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
 
@@ -1057,20 +1062,31 @@
 
   // Last frame anchor routines
 
-  JavaFrameAnchor* frame_anchor(void)                { return &_anchor; }
+  JavaFrameAnchor* frame_anchor(void)            { return &_anchor; }
 
   // last_Java_sp
-  bool has_last_Java_frame() const                   { return _anchor.has_last_Java_frame(); }
-  intptr_t* last_Java_sp() const                     { return _anchor.last_Java_sp(); }
+  bool has_last_Java_frame() const               { return _anchor.has_last_Java_frame(); }
+  intptr_t* last_Java_sp() const                 { return _anchor.last_Java_sp(); }
 
   // last_Java_pc
 
-  address last_Java_pc(void)                         { return _anchor.last_Java_pc(); }
+  address last_Java_pc(void)                     { return _anchor.last_Java_pc(); }
 
   // Safepoint support
+#ifndef PPC64
   JavaThreadState thread_state() const           { return _thread_state; }
-  void set_thread_state(JavaThreadState s)       { _thread_state=s;      }
-  ThreadSafepointState *safepoint_state() const  { return _safepoint_state;  }
+  void set_thread_state(JavaThreadState s)       { _thread_state = s;    }
+#else
+  // Use membars when accessing volatile _thread_state. See
+  // Threads::create_vm() for size checks.
+  JavaThreadState thread_state() const           {
+    return (JavaThreadState) OrderAccess::load_acquire((volatile jint*)&_thread_state);
+  }
+  void set_thread_state(JavaThreadState s)       {
+    OrderAccess::release_store((volatile jint*)&_thread_state, (jint)s);
+  }
+#endif
+  ThreadSafepointState *safepoint_state() const  { return _safepoint_state; }
   void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; }
   bool is_at_poll_safepoint()                    { return _safepoint_state->is_at_poll_safepoint(); }
 
@@ -1259,7 +1275,7 @@
   void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; }
   vframeArray* vframe_array_head() const         { return _vframe_array_head;  }
 
-  // Side structure for defering update of java frame locals until deopt occurs
+  // Side structure for deferring update of java frame locals until deopt occurs
   GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; }
   void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; }
 
@@ -1354,6 +1370,14 @@
   // and reguard if possible.
   bool reguard_stack(void);
 
+  address stack_overflow_limit() { return _stack_overflow_limit; }
+  void set_stack_overflow_limit() {
+    _stack_overflow_limit = _stack_base - _stack_size +
+                            ((StackShadowPages +
+                              StackYellowPages +
+                              StackRedPages) * os::vm_page_size());
+  }
+
   // Misc. accessors/mutators
   void set_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = true; }
   void clr_do_not_unlock(void)                   { _do_not_unlock_if_synchronized = false; }
@@ -1393,6 +1417,7 @@
   static ByteSize exception_oop_offset()         { return byte_offset_of(JavaThread, _exception_oop       ); }
   static ByteSize exception_pc_offset()          { return byte_offset_of(JavaThread, _exception_pc        ); }
   static ByteSize exception_handler_pc_offset()  { return byte_offset_of(JavaThread, _exception_handler_pc); }
+  static ByteSize stack_overflow_limit_offset()  { return byte_offset_of(JavaThread, _stack_overflow_limit); }
   static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); }
   static ByteSize stack_guard_state_offset()     { return byte_offset_of(JavaThread, _stack_guard_state   ); }
   static ByteSize suspend_flags_offset()         { return byte_offset_of(JavaThread, _suspend_flags       ); }
@@ -1464,7 +1489,7 @@
   void frames_do(void f(frame*, const RegisterMap*));
 
   // Memory operations
-  void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
   // Sweeper operations
   void nmethods_do(CodeBlobClosure* cf);
@@ -1756,6 +1781,9 @@
 #ifdef TARGET_OS_ARCH_linux_ppc
 # include "thread_linux_ppc.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "thread_aix_ppc.hpp"
+#endif
 #ifdef TARGET_OS_ARCH_bsd_x86
 # include "thread_bsd_x86.hpp"
 #endif
@@ -1892,7 +1920,7 @@
   // GC support
   // Apply "f->do_oop" to all root oops in "this".
   // Apply "cf->do_code_blob" (if !NULL) to all code blobs active in frames
-  void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
 #ifndef PRODUCT
 private:
@@ -1931,6 +1959,8 @@
   static bool        _vm_complete;
 #endif
 
+  static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS);
+  static void initialize_jsr292_core_classes(TRAPS);
  public:
   // Thread management
   // force_daemon is a concession to JNI, where we may need to add a
@@ -1959,9 +1989,9 @@
 
   // Apply "f->do_oop" to all root oops in all threads.
   // This version may only be called by sequential code.
-  static void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
   // This version may be called by sequential or parallel code.
-  static void possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  static void possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
   // This creates a list of GCTasks, one per thread.
   static void create_thread_roots_tasks(GCTaskQueue* q);
   // This creates a list of GCTasks, one per thread, for marking objects.
--- a/src/share/vm/runtime/thread.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/thread.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -37,6 +37,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "thread_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "thread_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "thread_bsd.inline.hpp"
 #endif
--- a/src/share/vm/runtime/threadLocalStorage.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/threadLocalStorage.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -68,6 +68,9 @@
 #ifdef TARGET_OS_ARCH_linux_ppc
 # include "threadLS_linux_ppc.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "threadLS_aix_ppc.hpp"
+#endif
 #ifdef TARGET_OS_ARCH_bsd_x86
 # include "threadLS_bsd_x86.hpp"
 #endif
--- a/src/share/vm/runtime/timer.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/timer.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -35,6 +35,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
--- a/src/share/vm/runtime/unhandledOops.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/unhandledOops.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 // destructor.  The constructor adds the oop address on a list
 // off each thread and the destructor removes the oop.  At a potential
 // safepoint, the stack addresses of the local variable oops are trashed
-// with a recognizeable value.  If the local variable is used again, it
+// with a recognizable value.  If the local variable is used again, it
 // will segfault, indicating an unsafe use of that oop.
 // eg:
 //    oop o;    //register &o on list
--- a/src/share/vm/runtime/vframe.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/vframe.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vframe.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/vframe.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vframeArray.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/vframeArray.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@
 
     frame _frame;                                                // the interpreter frame we will unpack into
     int  _bci;                                                   // raw bci for this vframe
-    bool _reexecute;                                             // whether sould we reexecute this bytecode
+    bool _reexecute;                                             // whether we should reexecute this bytecode
     Method*    _method;                                          // the method for this vframe
     MonitorChunk* _monitors;                                     // active monitors for this vframe
     StackValueCollection* _locals;
@@ -158,7 +158,7 @@
   // Tells whether index is within bounds.
   bool is_within_bounds(int index) const        { return 0 <= index && index < frames(); }
 
-  // Accessores for instance variable
+  // Accessories for instance variable
   int frames() const                            { return _frames;   }
 
   static vframeArray* allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
--- a/src/share/vm/runtime/virtualspace.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/virtualspace.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -36,6 +36,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
@@ -215,9 +218,9 @@
          noaccess_prefix == _alignment, "noaccess prefix wrong");
 
   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
-         "area must be distinguisable from marks for mark-sweep");
+         "area must be distinguishable from marks for mark-sweep");
   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
-         "area must be distinguisable from marks for mark-sweep");
+         "area must be distinguishable from marks for mark-sweep");
 }
 
 
@@ -551,10 +554,10 @@
 
   // Determine which regions need to grow in this expand_by call.
   // If you are growing in the lower region, high() must be in that
-  // region so calcuate the size based on high().  For the middle and
+  // region so calculate the size based on high().  For the middle and
   // upper regions, determine the starting point of growth based on the
   // location of high().  By getting the MAX of the region's low address
-  // (or the prevoius region's high address) and high(), we can tell if it
+  // (or the previous region's high address) and high(), we can tell if it
   // is an intra or inter region growth.
   size_t lower_needs = 0;
   if (aligned_lower_new_high > lower_high()) {
--- a/src/share/vm/runtime/virtualspace.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/virtualspace.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/runtime/vmStructs.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/vmStructs.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -145,6 +145,9 @@
 #ifdef TARGET_OS_ARCH_linux_ppc
 # include "vmStructs_linux_ppc.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "vmStructs_aix_ppc.hpp"
+#endif
 #ifdef TARGET_OS_ARCH_bsd_x86
 # include "vmStructs_bsd_x86.hpp"
 #endif
@@ -201,10 +204,13 @@
 #ifdef TARGET_ARCH_MODEL_arm
 # include "adfiles/adGlobals_arm.hpp"
 #endif
-#ifdef TARGET_ARCH_MODEL_ppc
-# include "adfiles/adGlobals_ppc.hpp"
+#ifdef TARGET_ARCH_MODEL_ppc_32
+# include "adfiles/adGlobals_ppc_32.hpp"
 #endif
+#ifdef TARGET_ARCH_MODEL_ppc_64
+# include "adfiles/adGlobals_ppc_64.hpp"
 #endif
+#endif // COMPILER2
 
 // Note: the cross-product of (c1, c2, product, nonproduct, ...),
 // (nonstatic, static), and (unchecked, checked) has not been taken.
@@ -245,7 +251,6 @@
 typedef Hashtable<Klass*, mtClass>            KlassHashtable;
 typedef HashtableEntry<Klass*, mtClass>       KlassHashtableEntry;
 typedef TwoOopHashtable<Symbol*, mtClass>     SymbolTwoOopHashtable;
-typedef BinaryTreeDictionary<Metablock, FreeList> MetablockTreeDictionary;
 
 //--------------------------------------------------------------------------------
 // VM_STRUCTS
@@ -1214,9 +1219,9 @@
   c2_nonstatic_field(Block,              _pre_order,               uint)                                                             \
   c2_nonstatic_field(Block,              _dom_depth,               uint)                                                             \
   c2_nonstatic_field(Block,              _idom,                    Block*)                                                           \
-  c2_nonstatic_field(Block,              _freq,                    jfloat)                                                           \
+  c2_nonstatic_field(Block,              _freq,                    jdouble)                                                          \
                                                                                                                                      \
-  c2_nonstatic_field(CFGElement,         _freq,                    jfloat)                                                           \
+  c2_nonstatic_field(CFGElement,         _freq,                    jdouble)                                                          \
                                                                                                                                      \
   c2_nonstatic_field(Block_List,         _cnt,                     uint)                                                             \
                                                                                                                                      \
@@ -1324,11 +1329,8 @@
   volatile_nonstatic_field(FreeChunk,          _size,                                        size_t)                                 \
   nonstatic_field(FreeChunk,                   _next,                                        FreeChunk*)                             \
   nonstatic_field(FreeChunk,                   _prev,                                        FreeChunk*)                             \
-  nonstatic_field(FreeList<FreeChunk>,         _size,                                        size_t)                                 \
-  nonstatic_field(FreeList<Metablock>,         _size,                                        size_t)                                 \
-  nonstatic_field(FreeList<FreeChunk>,         _count,                                       ssize_t)                                \
-  nonstatic_field(FreeList<Metablock>,         _count,                                       ssize_t)                                \
-  nonstatic_field(MetablockTreeDictionary,     _total_size,                                  size_t)
+  nonstatic_field(AdaptiveFreeList<FreeChunk>, _size,                                        size_t)                                 \
+  nonstatic_field(AdaptiveFreeList<FreeChunk>, _count,                                       ssize_t)
 
 //--------------------------------------------------------------------------------
 // VM_TYPES
@@ -1857,6 +1859,8 @@
   declare_c2_type(MemBarNode, MultiNode)                                  \
   declare_c2_type(MemBarAcquireNode, MemBarNode)                          \
   declare_c2_type(MemBarReleaseNode, MemBarNode)                          \
+  declare_c2_type(LoadFenceNode, MemBarNode)                              \
+  declare_c2_type(StoreFenceNode, MemBarNode)                             \
   declare_c2_type(MemBarVolatileNode, MemBarNode)                         \
   declare_c2_type(MemBarCPUOrderNode, MemBarNode)                         \
   declare_c2_type(InitializeNode, MemBarNode)                             \
@@ -1981,15 +1985,6 @@
   declare_c2_type(CmpF3Node, CmpFNode)                                    \
   declare_c2_type(CmpDNode, CmpNode)                                      \
   declare_c2_type(CmpD3Node, CmpDNode)                                    \
-  declare_c2_type(MathExactNode, MultiNode)                               \
-  declare_c2_type(MathExactINode, MathExactNode)                          \
-  declare_c2_type(AddExactINode, MathExactINode)                          \
-  declare_c2_type(AddExactLNode, MathExactLNode)                          \
-  declare_c2_type(SubExactINode, MathExactINode)                          \
-  declare_c2_type(SubExactLNode, MathExactLNode)                          \
-  declare_c2_type(NegExactINode, MathExactINode)                          \
-  declare_c2_type(MulExactINode, MathExactINode)                          \
-  declare_c2_type(FlagsProjNode, ProjNode)                                \
   declare_c2_type(BoolNode, Node)                                         \
   declare_c2_type(AbsNode, Node)                                          \
   declare_c2_type(AbsINode, AbsNode)                                      \
@@ -2070,6 +2065,15 @@
   declare_c2_type(ExtractLNode, ExtractNode)                              \
   declare_c2_type(ExtractFNode, ExtractNode)                              \
   declare_c2_type(ExtractDNode, ExtractNode)                              \
+  declare_c2_type(OverflowNode, CmpNode)                                  \
+  declare_c2_type(OverflowINode, OverflowNode)                            \
+  declare_c2_type(OverflowAddINode, OverflowINode)                        \
+  declare_c2_type(OverflowSubINode, OverflowINode)                        \
+  declare_c2_type(OverflowMulINode, OverflowINode)                        \
+  declare_c2_type(OverflowLNode, OverflowNode)                            \
+  declare_c2_type(OverflowAddLNode, OverflowLNode)                        \
+  declare_c2_type(OverflowSubLNode, OverflowLNode)                        \
+  declare_c2_type(OverflowMulLNode, OverflowLNode)                        \
                                                                           \
   /*********************/                                                 \
   /* Adapter Blob Entries */                                              \
@@ -2198,14 +2202,8 @@
                                                                           \
   /* freelist */                                                          \
   declare_toplevel_type(FreeChunk*)                                       \
-  declare_toplevel_type(Metablock*)                                       \
-  declare_toplevel_type(FreeBlockDictionary<FreeChunk>*)                  \
-  declare_toplevel_type(FreeList<FreeChunk>*)                             \
-  declare_toplevel_type(FreeList<FreeChunk>)                              \
-  declare_toplevel_type(FreeBlockDictionary<Metablock>*)                  \
-  declare_toplevel_type(FreeList<Metablock>*)                             \
-  declare_toplevel_type(FreeList<Metablock>)                              \
-  declare_type(MetablockTreeDictionary, FreeBlockDictionary<Metablock>)
+  declare_toplevel_type(AdaptiveFreeList<FreeChunk>*)                     \
+  declare_toplevel_type(AdaptiveFreeList<FreeChunk>)
 
 
 //--------------------------------------------------------------------------------
--- a/src/share/vm/runtime/vmThread.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/vmThread.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -40,12 +40,6 @@
 #include "utilities/events.hpp"
 #include "utilities/xmlstream.hpp"
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL3(hotspot, vmops__request, char *, uintptr_t, int);
-HS_DTRACE_PROBE_DECL3(hotspot, vmops__begin, char *, uintptr_t, int);
-HS_DTRACE_PROBE_DECL3(hotspot, vmops__end, char *, uintptr_t, int);
-#endif /* !USDT2 */
-
 // Dummy VM operation to act as first element in our circular double-linked list
 class VM_Dummy: public VM_Operation {
   VMOp_Type type() const { return VMOp_Dummy; }
@@ -154,14 +148,9 @@
 // High-level interface
 bool VMOperationQueue::add(VM_Operation *op) {
 
-#ifndef USDT2
-  HS_DTRACE_PROBE3(hotspot, vmops__request, op->name(), strlen(op->name()),
-                   op->evaluation_mode());
-#else /* USDT2 */
   HOTSPOT_VMOPS_REQUEST(
                    (char *) op->name(), strlen(op->name()),
                    op->evaluation_mode());
-#endif /* USDT2 */
 
   // Encapsulates VM queue policy. Currently, that
   // only involves putting them on the right list
@@ -358,14 +347,9 @@
 
   {
     PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time());
-#ifndef USDT2
-    HS_DTRACE_PROBE3(hotspot, vmops__begin, op->name(), strlen(op->name()),
-                     op->evaluation_mode());
-#else /* USDT2 */
     HOTSPOT_VMOPS_BEGIN(
                      (char *) op->name(), strlen(op->name()),
                      op->evaluation_mode());
-#endif /* USDT2 */
 
     EventExecuteVMOperation event;
 
@@ -383,14 +367,9 @@
       event.commit();
     }
 
-#ifndef USDT2
-    HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()),
-                     op->evaluation_mode());
-#else /* USDT2 */
     HOTSPOT_VMOPS_END(
                      (char *) op->name(), strlen(op->name()),
                      op->evaluation_mode());
-#endif /* USDT2 */
   }
 
   // Last access of info in _cur_vm_operation!
@@ -677,7 +656,7 @@
 }
 
 
-void VMThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
+void VMThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) {
   Thread::oops_do(f, cld_f, cf);
   _vm_queue->oops_do(f);
 }
--- a/src/share/vm/runtime/vmThread.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/vmThread.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -126,7 +126,7 @@
   static VMThread* vm_thread()                    { return _vm_thread; }
 
   // GC support
-  void oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf);
+  void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf);
 
   // Debugging
   void print_on(outputStream* st) const;
--- a/src/share/vm/runtime/vm_operations.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/vm_operations.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -154,7 +154,7 @@
   void set_next(VM_Operation *next)              { _next = next; }
   void set_prev(VM_Operation *prev)              { _prev = prev; }
 
-  // Configuration. Override these appropriatly in subclasses.
+  // Configuration. Override these appropriately in subclasses.
   virtual VMOp_Type type() const = 0;
   virtual Mode evaluation_mode() const            { return _safepoint; }
   virtual bool allow_nested_vm_operations() const { return false; }
--- a/src/share/vm/runtime/vm_version.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/vm_version.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -182,6 +182,7 @@
 #define OS       LINUX_ONLY("linux")             \
                  WINDOWS_ONLY("windows")         \
                  SOLARIS_ONLY("solaris")         \
+                 AIX_ONLY("aix")                 \
                  BSD_ONLY("bsd")
 
 #ifdef ZERO
@@ -191,7 +192,8 @@
                  IA64_ONLY("ia64")               \
                  AMD64_ONLY("amd64")             \
                  ARM_ONLY("arm")                 \
-                 PPC_ONLY("ppc")                 \
+                 PPC32_ONLY("ppc")               \
+                 PPC64_ONLY("ppc64")             \
                  SPARC_ONLY("sparc")
 #endif // ZERO
 
@@ -243,6 +245,9 @@
       #endif
     #elif defined(__GNUC__)
         #define HOTSPOT_BUILD_COMPILER "gcc " __VERSION__
+    #elif defined(__IBMCPP__)
+        #define HOTSPOT_BUILD_COMPILER "xlC " XSTR(__IBMCPP__)
+
     #else
       #define HOTSPOT_BUILD_COMPILER "unknown compiler"
     #endif
@@ -255,7 +260,7 @@
       #define FLOAT_ARCH_STR "-e500v2"
     #elif defined(ARM)
       #define FLOAT_ARCH_STR "-vfp"
-    #elif defined(PPC)
+    #elif defined(PPC32)
       #define FLOAT_ARCH_STR "-hflt"
     #else
       #define FLOAT_ARCH_STR ""
--- a/src/share/vm/runtime/vm_version.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/runtime/vm_version.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/attachListener.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/attachListener.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -282,6 +282,20 @@
       return JNI_ERR;
     }
   }
+
+  if (strncmp(name, "MaxHeapFreeRatio", 17) == 0) {
+    FormatBuffer<80> err_msg("");
+    if (!Arguments::verify_MaxHeapFreeRatio(err_msg, value)) {
+      out->print_cr(err_msg.buffer());
+      return JNI_ERR;
+    }
+  } else if (strncmp(name, "MinHeapFreeRatio", 17) == 0) {
+    FormatBuffer<80> err_msg("");
+    if (!Arguments::verify_MinHeapFreeRatio(err_msg, value)) {
+      out->print_cr(err_msg.buffer());
+      return JNI_ERR;
+    }
+  }
   bool res = CommandLineFlags::uintxAtPut((char*)name, &value, Flag::ATTACH_ON_DEMAND);
   if (! res) {
     out->print_cr("setting flag %s failed", name);
@@ -451,15 +465,39 @@
   }
 }
 
+bool AttachListener::has_init_error(TRAPS) {
+  if (HAS_PENDING_EXCEPTION) {
+    tty->print_cr("Exception in VM (AttachListener::init) : ");
+    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+    tty->cr();
+
+    CLEAR_PENDING_EXCEPTION;
+
+    return true;
+  } else {
+    return false;
+  }
+}
+
 // Starts the Attach Listener thread
 void AttachListener::init() {
   EXCEPTION_MARK;
-  Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK);
+  Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, THREAD);
+  if (has_init_error(THREAD)) {
+    return;
+  }
+
   instanceKlassHandle klass (THREAD, k);
-  instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
+  instanceHandle thread_oop = klass->allocate_instance_handle(THREAD);
+  if (has_init_error(THREAD)) {
+    return;
+  }
 
   const char thread_name[] = "Attach Listener";
-  Handle string = java_lang_String::create_from_str(thread_name, CHECK);
+  Handle string = java_lang_String::create_from_str(thread_name, THREAD);
+  if (has_init_error(THREAD)) {
+    return;
+  }
 
   // Initialize thread_oop to put it into the system threadGroup
   Handle thread_group (THREAD, Universe::system_thread_group());
@@ -472,13 +510,7 @@
                        string,
                        THREAD);
 
-  if (HAS_PENDING_EXCEPTION) {
-    tty->print_cr("Exception in VM (AttachListener::init) : ");
-    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
-    tty->cr();
-
-    CLEAR_PENDING_EXCEPTION;
-
+  if (has_init_error(THREAD)) {
     return;
   }
 
@@ -490,14 +522,7 @@
                         vmSymbols::thread_void_signature(),
                         thread_oop,             // ARG 1
                         THREAD);
-
-  if (HAS_PENDING_EXCEPTION) {
-    tty->print_cr("Exception in VM (AttachListener::init) : ");
-    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
-    tty->cr();
-
-    CLEAR_PENDING_EXCEPTION;
-
+  if (has_init_error(THREAD)) {
     return;
   }
 
@@ -507,7 +532,7 @@
     // Check that thread and osthread were created
     if (listener_thread == NULL || listener_thread->osthread() == NULL) {
       vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    "unable to create new native thread");
+                                    os::native_thread_creation_failed_msg());
     }
 
     java_lang_Thread::set_thread(thread_oop(), listener_thread);
--- a/src/share/vm/services/attachListener.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/attachListener.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -94,6 +94,9 @@
   // dequeue the next operation
   static AttachOperation* dequeue();
 #endif // !INCLUDE_SERVICES
+
+ private:
+  static bool has_init_error(TRAPS);
 };
 
 #if INCLUDE_SERVICES
--- a/src/share/vm/services/classLoadingService.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/classLoadingService.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -37,26 +37,6 @@
 
 // Only bother with this argument setup if dtrace is available
 
-#ifndef USDT2
-
-HS_DTRACE_PROBE_DECL4(hotspot, class__loaded, char*, int, oop, bool);
-HS_DTRACE_PROBE_DECL4(hotspot, class__unloaded, char*, int, oop, bool);
-
-#define DTRACE_CLASSLOAD_PROBE(type, clss, shared)  \
-  {                                                 \
-    char* data = NULL;                              \
-    int len = 0;                                    \
-    Symbol* name = (clss)->name();                  \
-    if (name != NULL) {                             \
-      data = (char*)name->bytes();                  \
-      len = name->utf8_length();                    \
-    }                                               \
-    HS_DTRACE_PROBE4(hotspot, class__##type,        \
-      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), (shared)); \
-  }
-
-#else /* USDT2 */
-
 #define HOTSPOT_CLASS_unloaded HOTSPOT_CLASS_UNLOADED
 #define HOTSPOT_CLASS_loaded HOTSPOT_CLASS_LOADED
 #define DTRACE_CLASSLOAD_PROBE(type, clss, shared)  \
@@ -72,7 +52,6 @@
       data, len, (clss)->class_loader(), (shared)); \
   }
 
-#endif /* USDT2 */
 #else //  ndef DTRACE_ENABLED
 
 #define DTRACE_CLASSLOAD_PROBE(type, clss, shared)
--- a/src/share/vm/services/classLoadingService.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/classLoadingService.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/diagnosticCommand.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/diagnosticCommand.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/os.hpp"
 #include "services/diagnosticArgument.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "services/diagnosticFramework.hpp"
@@ -44,6 +45,7 @@
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<CommandLineDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintSystemPropertiesDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<PrintVMFlagsDCmd>(full_export, true, false));
+  DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VMDynamicLibrariesDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<VMUptimeDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<SystemGCDCmd>(full_export, true, false));
   DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl<RunFinalizationDCmd>(full_export, true, false));
@@ -610,8 +612,7 @@
 }
 
 JMXStartLocalDCmd::JMXStartLocalDCmd(outputStream *output, bool heap_allocated) :
-  DCmd(output, heap_allocated)
-{
+  DCmd(output, heap_allocated) {
   // do nothing
 }
 
@@ -632,7 +633,6 @@
     JavaCalls::call_static(&result, ik, vmSymbols::startLocalAgent_name(), vmSymbols::void_method_signature(), CHECK);
 }
 
-
 void JMXStopRemoteDCmd::execute(DCmdSource source, TRAPS) {
     ResourceMark rm(THREAD);
     HandleMark hm(THREAD);
@@ -650,3 +650,12 @@
     JavaCalls::call_static(&result, ik, vmSymbols::stopRemoteAgent_name(), vmSymbols::void_method_signature(), CHECK);
 }
 
+VMDynamicLibrariesDCmd::VMDynamicLibrariesDCmd(outputStream *output, bool heap_allocated) :
+  DCmd(output, heap_allocated) {
+  // do nothing
+}
+
+void VMDynamicLibrariesDCmd::execute(DCmdSource source, TRAPS) {
+  os::print_dll_info(output());
+  output()->cr();
+}
--- a/src/share/vm/services/diagnosticCommand.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/diagnosticCommand.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -132,6 +132,29 @@
   virtual void execute(DCmdSource source, TRAPS);
 };
 
+class VMDynamicLibrariesDCmd : public DCmd {
+public:
+  VMDynamicLibrariesDCmd(outputStream* output, bool heap);
+  static const char* name() {
+    return "VM.dynlibs";
+  }
+  static const char* description() {
+    return "Print loaded dynamic libraries.";
+  }
+  static const char* impact() {
+    return "Low";
+  }
+  static const JavaPermission permission() {
+    JavaPermission p = {"java.lang.management.ManagementPermission",
+                        "monitor", NULL};
+    return p;
+  }
+  static int num_arguments() {
+    return 0;
+  };
+  virtual void execute(DCmdSource source, TRAPS);
+};
+
 class VMUptimeDCmd : public DCmdWithParser {
 protected:
   DCmdArgument<bool> _date;
--- a/src/share/vm/services/dtraceAttacher.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/dtraceAttacher.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/g1MemoryPool.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/g1MemoryPool.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/heapDumper.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/heapDumper.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1604,6 +1604,18 @@
               }
             }
           }
+          StackValueCollection *exprs = jvf->expressions();
+          for(int index = 0; index < exprs->size(); index++) {
+            if (exprs->at(index)->type() == T_OBJECT) {
+               oop o = exprs->obj_at(index)();
+               if (o != NULL) {
+                 writer()->write_u1(HPROF_GC_ROOT_JAVA_FRAME);
+                 writer()->write_objectID(o);
+                 writer()->write_u4(thread_serial_num);
+                 writer()->write_u4((u4) (stack_depth + extra_frames));
+               }
+             }
+          }
         } else {
           // native frame
           if (stack_depth == 0) {
--- a/src/share/vm/services/jmm.h	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/jmm.h	Wed Mar 12 13:30:08 2014 +0100
@@ -153,6 +153,7 @@
   JMM_VMGLOBAL_ORIGIN_ENVIRON_VAR  = 4,   /* Set via environment variables */
   JMM_VMGLOBAL_ORIGIN_CONFIG_FILE  = 5,   /* Set via config file (such as .hotspotrc) */
   JMM_VMGLOBAL_ORIGIN_ERGONOMIC    = 6,   /* Set via ergonomic */
+  JMM_VMGLOBAL_ORIGIN_ATTACH_ON_DEMAND = 7,   /* Set via attach */
   JMM_VMGLOBAL_ORIGIN_OTHER        = 99   /* Set via some other mechanism */
 } jmmVMGlobalOrigin;
 
--- a/src/share/vm/services/management.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/management.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1724,6 +1724,9 @@
     case Flag::ERGONOMIC:
       global->origin = JMM_VMGLOBAL_ORIGIN_ERGONOMIC;
       break;
+    case Flag::ATTACH_ON_DEMAND:
+      global->origin = JMM_VMGLOBAL_ORIGIN_ATTACH_ON_DEMAND;
+      break;
     default:
       global->origin = JMM_VMGLOBAL_ORIGIN_OTHER;
   }
@@ -1821,7 +1824,7 @@
               "This flag is not writeable.");
   }
 
-  bool succeed;
+  bool succeed = false;
   if (flag->is_bool()) {
     bool bvalue = (new_value.z == JNI_TRUE ? true : false);
     succeed = CommandLineFlags::boolAtPut(name, &bvalue, Flag::MANAGEMENT);
@@ -1830,6 +1833,18 @@
     succeed = CommandLineFlags::intxAtPut(name, &ivalue, Flag::MANAGEMENT);
   } else if (flag->is_uintx()) {
     uintx uvalue = (uintx)new_value.j;
+
+    if (strncmp(name, "MaxHeapFreeRatio", 17) == 0) {
+      FormatBuffer<80> err_msg("");
+      if (!Arguments::verify_MaxHeapFreeRatio(err_msg, uvalue)) {
+        THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), err_msg.buffer());
+      }
+    } else if (strncmp(name, "MinHeapFreeRatio", 17) == 0) {
+      FormatBuffer<80> err_msg("");
+      if (!Arguments::verify_MinHeapFreeRatio(err_msg, uvalue)) {
+        THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), err_msg.buffer());
+      }
+    }
     succeed = CommandLineFlags::uintxAtPut(name, &uvalue, Flag::MANAGEMENT);
   } else if (flag->is_uint64_t()) {
     uint64_t uvalue = (uint64_t)new_value.j;
@@ -1841,6 +1856,9 @@
     }
     ccstr svalue = java_lang_String::as_utf8_string(str);
     succeed = CommandLineFlags::ccstrAtPut(name, &svalue, Flag::MANAGEMENT);
+    if (succeed) {
+      FREE_C_HEAP_ARRAY(char, svalue, mtInternal);
+    }
   }
   assert(succeed, "Setting flag should succeed");
 JVM_END
--- a/src/share/vm/services/memReporter.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/memReporter.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memReporter.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/memReporter.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memSnapshot.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/memSnapshot.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memoryManager.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/memoryManager.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -36,13 +36,6 @@
 #include "services/gcNotifier.hpp"
 #include "utilities/dtrace.hpp"
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL8(hotspot, mem__pool__gc__begin, char*, int, char*, int,
-  size_t, size_t, size_t, size_t);
-HS_DTRACE_PROBE_DECL8(hotspot, mem__pool__gc__end, char*, int, char*, int,
-  size_t, size_t, size_t, size_t);
-#endif /* !USDT2 */
-
 MemoryManager::MemoryManager() {
   _num_pools = 0;
   (void)const_cast<instanceOop&>(_memory_mgr_obj = NULL);
@@ -242,19 +235,11 @@
       MemoryPool* pool = MemoryService::get_memory_pool(i);
       MemoryUsage usage = pool->get_memory_usage();
       _current_gc_stat->set_before_gc_usage(i, usage);
-#ifndef USDT2
-      HS_DTRACE_PROBE8(hotspot, mem__pool__gc__begin,
-        name(), strlen(name()),
-        pool->name(), strlen(pool->name()),
-        usage.init_size(), usage.used(),
-        usage.committed(), usage.max_size());
-#else /* USDT2 */
       HOTSPOT_MEM_POOL_GC_BEGIN(
         (char *) name(), strlen(name()),
         (char *) pool->name(), strlen(pool->name()),
         usage.init_size(), usage.used(),
         usage.committed(), usage.max_size());
-#endif /* USDT2 */
     }
   }
 }
@@ -280,19 +265,11 @@
       MemoryPool* pool = MemoryService::get_memory_pool(i);
       MemoryUsage usage = pool->get_memory_usage();
 
-#ifndef USDT2
-      HS_DTRACE_PROBE8(hotspot, mem__pool__gc__end,
-        name(), strlen(name()),
-        pool->name(), strlen(pool->name()),
-        usage.init_size(), usage.used(),
-        usage.committed(), usage.max_size());
-#else /* USDT2 */
       HOTSPOT_MEM_POOL_GC_END(
         (char *) name(), strlen(name()),
         (char *) pool->name(), strlen(pool->name()),
         usage.init_size(), usage.used(),
         usage.committed(), usage.max_size());
-#endif /* USDT2 */
 
       _current_gc_stat->set_after_gc_usage(i, usage);
     }
--- a/src/share/vm/services/memoryManager.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/memoryManager.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memoryPool.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/memoryPool.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memoryService.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/memoryService.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memoryService.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/memoryService.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/memoryUsage.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/memoryUsage.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/psMemoryPool.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/psMemoryPool.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/services/runtimeService.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/runtimeService.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -31,11 +31,6 @@
 #include "utilities/exceptions.hpp"
 #include "utilities/macros.hpp"
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL(hs_private, safepoint__begin);
-HS_DTRACE_PROBE_DECL(hs_private, safepoint__end);
-#endif /* !USDT2 */
-
 #if INCLUDE_MANAGEMENT
 TimeStamp RuntimeService::_app_timer;
 TimeStamp RuntimeService::_safepoint_timer;
@@ -112,11 +107,7 @@
 }
 
 void RuntimeService::record_safepoint_begin() {
-#ifndef USDT2
-  HS_DTRACE_PROBE(hs_private, safepoint__begin);
-#else /* USDT2 */
   HS_PRIVATE_SAFEPOINT_BEGIN();
-#endif /* USDT2 */
 
   // Print the time interval in which the app was executing
   if (PrintGCApplicationConcurrentTime && _app_timer.is_updated()) {
@@ -143,11 +134,7 @@
 }
 
 void RuntimeService::record_safepoint_end() {
-#ifndef USDT2
-  HS_DTRACE_PROBE(hs_private, safepoint__end);
-#else /* USDT2 */
   HS_PRIVATE_SAFEPOINT_END();
-#endif /* USDT2 */
 
   // Print the time interval for which the app was stopped
   // during the current safepoint operation.
--- a/src/share/vm/services/threadService.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/services/threadService.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/shark/sharkBlock.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/shark/sharkBlock.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/share/vm/shark/sharkBuilder.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/shark/sharkBuilder.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/share/vm/shark/sharkCompiler.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/shark/sharkCompiler.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/share/vm/shark/sharkCompiler.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/shark/sharkCompiler.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/share/vm/shark/sharkConstant.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/shark/sharkConstant.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/share/vm/shark/sharkFunction.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/shark/sharkFunction.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/share/vm/shark/sharkInliner.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/shark/sharkInliner.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/share/vm/shark/sharkInvariants.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/shark/sharkInvariants.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/share/vm/shark/sharkTopLevelBlock.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/shark/sharkTopLevelBlock.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2008, 2009, 2010 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
--- a/src/share/vm/trace/trace.xml	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/trace/trace.xml	Wed Mar 12 13:30:08 2014 +0100
@@ -122,6 +122,46 @@
       <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
     </event>
 
+    <event id="LongFlagChanged" path="vm/flag/long_changed" label="Long Flag Changed"
+          is_instant="true">
+      <value type="UTF8" field="name" label="Name" />
+      <value type="LONG" field="old_value" label="Old Value" />
+      <value type="LONG" field="new_value" label="New Value" />
+      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
+    </event>
+
+    <event id="UnsignedLongFlagChanged" path="vm/flag/ulong_changed" label="Unsigned Long Flag Changed"
+          is_instant="true">
+      <value type="UTF8" field="name" label="Name" />
+      <value type="ULONG" field="old_value" label="Old Value" />
+      <value type="ULONG" field="new_value" label="New Value" />
+      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
+    </event>
+
+    <event id="DoubleFlagChanged" path="vm/flag/double_changed" label="Double Flag Changed"
+         is_instant="true">
+      <value type="UTF8" field="name" label="Name" />
+      <value type="DOUBLE" field="old_value" label="Old Value" />
+      <value type="DOUBLE" field="new_value" label="New Value" />
+      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
+    </event>
+
+    <event id="BooleanFlagChanged" path="vm/flag/boolean_changed" label="Boolean Flag Changed"
+         is_instant="true">
+      <value type="UTF8" field="name" label="Name" />
+      <value type="BOOLEAN" field="old_value" label="Old Value" />
+      <value type="BOOLEAN" field="new_value" label="New Value" />
+      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
+    </event>
+
+    <event id="StringFlagChanged" path="vm/flag/string_changed" label="String Flag Changed"
+         is_instant="true">
+      <value type="UTF8" field="name" label="Name" />
+      <value type="UTF8" field="old_value" label="Old Value" />
+      <value type="UTF8" field="new_value" label="New Value" />
+      <value type="FLAGVALUEORIGIN" field="origin" label="Origin" />
+    </event>
+
     <struct id="VirtualSpace">
       <value type="ADDRESS" field="start" label="Start Address" description="Start address of the virtual space" />
       <value type="ADDRESS" field="committedEnd" label="Committed End Address" description="End address of the committed memory for the virtual space" />
--- a/src/share/vm/trace/tracetypes.xml	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/trace/tracetypes.xml	Wed Mar 12 13:30:08 2014 +0100
@@ -85,12 +85,6 @@
       <value type="UTF8" field="name" label="Name"/>
     </content_type>
 
-    <content_type id="StackTrace" hr_name="Stacktrace"
-                  type="U8" builtin_type="STACKTRACE">
-      <value type="BOOLEAN" field="truncated" label="Truncated"/>
-      <structarray type="StackFrame" field="frames" label="Stack frames"/>
-    </content_type>
-
     <content_type id="Class" hr_name="Java class"
                   type="U8" builtin_type="CLASS">
       <value type="CLASS" field="loaderClass" label="ClassLoader"/>
@@ -116,17 +110,6 @@
       <value type="UTF8" field="name" label="Name"/>
     </content_type>
 
-    <content_type id="FrameType" hr_name="Frame type"
-                  type="U1" jvm_type="FRAMETYPE">
-      <value type="UTF8" field="desc" label="Description"/>
-    </content_type>
-
-    <struct_type id="StackFrame">
-      <value type="METHOD" field="method" label="Java Method"/>
-      <value type="INTEGER" field="line" label="Line number"/>
-      <value type="FRAMETYPE" field="type" label="Frame type"/>
-    </struct_type>
-
     <content_type id="GCName" hr_name="GC Name"
                   type="U1" jvm_type="GCNAME">
       <value type="UTF8" field="name" label="name" />
@@ -167,6 +150,11 @@
       <value type="UTF8" field="phase" label="phase" />
     </content_type>
 
+    <content_type id="FlagValueOrigin" hr_name="Flag Value Origin"
+                  type="U1" jvm_type="FLAGVALUEORIGIN">
+      <value type="UTF8" field="origin" label="origin" />
+    </content_type>
+
   </content_types>
 
 
@@ -351,6 +339,10 @@
     <!-- VMOPERATIONTYPE -->
     <primary_type symbol="VMOPERATIONTYPE" datatype="U2" contenttype="VMOPERATIONTYPE"
                   type="u2" sizeop="sizeof(u2)" />
+                  
+    <!-- FLAGVALUEORIGIN -->
+    <primary_type symbol="FLAGVALUEORIGIN" datatype="U1"
+                  contenttype="FLAGVALUEORIGIN" type="u1" sizeop="sizeof(u1)" />
 
   </primary_types>
 </types>
--- a/src/share/vm/utilities/accessFlags.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/accessFlags.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -34,6 +34,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
--- a/src/share/vm/utilities/array.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/array.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,7 +58,7 @@
 
   void initialize(size_t esize, int length) {
     assert(length >= 0, "illegal length");
-    assert(_data == NULL, "must be new object");
+    assert(StressRewriter || _data == NULL, "must be new object");
     _length  = length;
     _data    = resource_allocate_bytes(esize * length);
     DEBUG_ONLY(init_nesting();)
--- a/src/share/vm/utilities/bitMap.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/bitMap.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
@@ -107,7 +110,7 @@
     while (true) {
       intptr_t res = Atomic::cmpxchg_ptr(nw, pw, w);
       if (res == w) break;
-      w  = *pw;
+      w  = res;
       nw = value ? (w | ~mr) : (w & mr);
     }
   }
--- a/src/share/vm/utilities/bitMap.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/bitMap.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/bitMap.inline.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/bitMap.inline.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/debug.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/debug.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -25,8 +25,8 @@
 #ifndef SHARE_VM_UTILITIES_DEBUG_HPP
 #define SHARE_VM_UTILITIES_DEBUG_HPP
 
+#include "utilities/globalDefinitions.hpp"
 #include "prims/jvm.h"
-#include "utilities/globalDefinitions.hpp"
 
 #include <stdarg.h>
 
--- a/src/share/vm/utilities/decoder.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/decoder.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,8 @@
   #include "decoder_windows.hpp"
 #elif defined(__APPLE__)
   #include "decoder_machO.hpp"
+#elif defined(AIX)
+  #include "decoder_aix.hpp"
 #else
   #include "decoder_elf.hpp"
 #endif
@@ -66,6 +68,8 @@
   decoder = new (std::nothrow) WindowsDecoder();
 #elif defined (__APPLE__)
   decoder = new (std::nothrow)MachODecoder();
+#elif defined(AIX)
+  decoder = new (std::nothrow)AIXDecoder();
 #else
   decoder = new (std::nothrow)ElfDecoder();
 #endif
--- a/src/share/vm/utilities/decoder.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/decoder.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/decoder_elf.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/decoder_elf.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -73,4 +73,4 @@
 
   return file;
 }
-#endif
+#endif // !_WINDOWS && !__APPLE__
--- a/src/share/vm/utilities/decoder_elf.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/decoder_elf.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,5 +55,5 @@
   ElfFile*         _opened_elf_files;
 };
 
-#endif
+#endif // !_WINDOWS && !__APPLE__
 #endif // SHARE_VM_UTILITIES_DECODER_ELF_HPP
--- a/src/share/vm/utilities/dtrace.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/dtrace.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -37,21 +37,18 @@
 // Work around dtrace tail call bug 6672627 until it is fixed in solaris 10.
 #define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG() \
   do { volatile size_t dtrace_workaround_tail_call_bug = 1; } while (0)
-
-#define USDT1 1
 #elif defined(LINUX)
 #define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG()
-#define USDT1 1
 #elif defined(__APPLE__)
 #define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG()
-#define USDT2 1
 #include <sys/types.h>
+#else
+#error "dtrace enabled for unknown os"
+#endif /* defined(SOLARIS) */
+
 #include "dtracefiles/hotspot.h"
 #include "dtracefiles/hotspot_jni.h"
 #include "dtracefiles/hs_private.h"
-#else
-#error "dtrace enabled for unknown os"
-#endif /* defined(SOLARIS) */
 
 #else /* defined(DTRACE_ENABLED) */
 
@@ -60,147 +57,8 @@
 
 #define HS_DTRACE_WORKAROUND_TAIL_CALL_BUG()
 
-#ifndef USDT2
-
-#define DTRACE_PROBE(a,b) {;}
-#define DTRACE_PROBE1(a,b,c) {;}
-#define DTRACE_PROBE2(a,b,c,d) {;}
-#define DTRACE_PROBE3(a,b,c,d,e) {;}
-#define DTRACE_PROBE4(a,b,c,d,e,f) {;}
-#define DTRACE_PROBE5(a,b,c,d,e,f,g) {;}
-#define DTRACE_PROBE6(a,b,c,d,e,f,g,h) {;}
-#define DTRACE_PROBE7(a,b,c,d,e,f,g,h,i) {;}
-#define DTRACE_PROBE8(a,b,c,d,e,f,g,h,i,j) {;}
-#define DTRACE_PROBE9(a,b,c,d,e,f,g,h,i,j,k) {;}
-#define DTRACE_PROBE10(a,b,c,d,e,f,g,h,i,j,k,l) {;}
-
-#else /* USDT2 */
-
-#include "dtrace_usdt2_disabled.hpp"
-#endif /* USDT2 */
+#include "dtrace_disabled.hpp"
 
 #endif /* defined(DTRACE_ENABLED) */
 
-#ifndef USDT2
-
-#define HS_DTRACE_PROBE_FN(provider,name)\
-  __dtrace_##provider##___##name
-
-#ifdef SOLARIS
-// Solaris dtrace needs actual extern function decls.
-#define HS_DTRACE_PROBE_DECL_N(provider,name,args) \
-  DTRACE_ONLY(extern "C" void HS_DTRACE_PROBE_FN(provider,name) args)
-#define HS_DTRACE_PROBE_CDECL_N(provider,name,args) \
-  DTRACE_ONLY(extern void HS_DTRACE_PROBE_FN(provider,name) args)
-#else
-// Systemtap dtrace compatible probes on GNU/Linux don't.
-// If dtrace is disabled this macro becomes NULL
-#define HS_DTRACE_PROBE_DECL_N(provider,name,args)
-#define HS_DTRACE_PROBE_CDECL_N(provider,name,args)
-#endif
-
-/* Dtrace probe declarations */
-#define HS_DTRACE_PROBE_DECL(provider,name) \
-  HS_DTRACE_PROBE_DECL0(provider,name)
-#define HS_DTRACE_PROBE_DECL0(provider,name)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(void))
-#define HS_DTRACE_PROBE_DECL1(provider,name,t0)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(uintptr_t))
-#define HS_DTRACE_PROBE_DECL2(provider,name,t0,t1)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(uintptr_t,uintptr_t))
-#define HS_DTRACE_PROBE_DECL3(provider,name,t0,t1,t2)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(uintptr_t,uintptr_t,uintptr_t))
-#define HS_DTRACE_PROBE_DECL4(provider,name,t0,t1,t2,t3)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(uintptr_t,uintptr_t,uintptr_t,\
-    uintptr_t))
-#define HS_DTRACE_PROBE_DECL5(provider,name,t0,t1,t2,t3,t4)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(\
-    uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t))
-#define HS_DTRACE_PROBE_DECL6(provider,name,t0,t1,t2,t3,t4,t5)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(\
-    uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t))
-#define HS_DTRACE_PROBE_DECL7(provider,name,t0,t1,t2,t3,t4,t5,t6)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(\
-    uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t))
-#define HS_DTRACE_PROBE_DECL8(provider,name,t0,t1,t2,t3,t4,t5,t6,t7)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(\
-    uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,\
-    uintptr_t))
-#define HS_DTRACE_PROBE_DECL9(provider,name,t0,t1,t2,t3,t4,t5,t6,t7,t8)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(\
-    uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,\
-    uintptr_t,uintptr_t))
-#define HS_DTRACE_PROBE_DECL10(provider,name,t0,t1,t2,t3,t4,t5,t6,t7,t8,t9)\
-  HS_DTRACE_PROBE_DECL_N(provider,name,(\
-    uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,uintptr_t,\
-    uintptr_t,uintptr_t,uintptr_t))
-
-/* Dtrace probe definitions */
-#if defined(SOLARIS)
-// Solaris dtrace uses actual function calls.
-#define HS_DTRACE_PROBE_N(provider,name, args) \
-  DTRACE_ONLY(HS_DTRACE_PROBE_FN(provider,name) args)
-
-#define HS_DTRACE_PROBE(provider,name) HS_DTRACE_PROBE0(provider,name)
-#define HS_DTRACE_PROBE0(provider,name)\
-  HS_DTRACE_PROBE_N(provider,name,())
-#define HS_DTRACE_PROBE1(provider,name,a0)\
-  HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0))
-#define HS_DTRACE_PROBE2(provider,name,a0,a1)\
-  HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0,(uintptr_t)a1))
-#define HS_DTRACE_PROBE3(provider,name,a0,a1,a2)\
-  HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0,(uintptr_t)a1,(uintptr_t)a2))
-#define HS_DTRACE_PROBE4(provider,name,a0,a1,a2,a3)\
-  HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0,(uintptr_t)a1,(uintptr_t)a2,\
-    (uintptr_t)a3))
-#define HS_DTRACE_PROBE5(provider,name,a0,a1,a2,a3,a4)\
-  HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0,(uintptr_t)a1,(uintptr_t)a2,\
-    (uintptr_t)a3,(uintptr_t)a4))
-#define HS_DTRACE_PROBE6(provider,name,a0,a1,a2,a3,a4,a5)\
-  HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0,(uintptr_t)a1,(uintptr_t)a2,\
-    (uintptr_t)a3,(uintptr_t)a4,(uintptr_t)a5))
-#define HS_DTRACE_PROBE7(provider,name,a0,a1,a2,a3,a4,a5,a6)\
-  HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0,(uintptr_t)a1,(uintptr_t)a2,\
-    (uintptr_t)a3,(uintptr_t)a4,(uintptr_t)a5,(uintptr_t)a6))
-#define HS_DTRACE_PROBE8(provider,name,a0,a1,a2,a3,a4,a5,a6,a7)\
-  HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0,(uintptr_t)a1,(uintptr_t)a2,\
-    (uintptr_t)a3,(uintptr_t)a4,(uintptr_t)a5,(uintptr_t)a6,(uintptr_t)a7))
-#define HS_DTRACE_PROBE9(provider,name,a0,a1,a2,a3,a4,a5,a6,a7,a8)\
-  HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0,(uintptr_t)a1,(uintptr_t)a2,\
-    (uintptr_t)a3,(uintptr_t)a4,(uintptr_t)a5,(uintptr_t)a6,(uintptr_t)a7,\
-    (uintptr_t)a8))
-#define HS_DTRACE_PROBE10(provider,name,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9)\
-  HS_DTRACE_PROBE_N(provider,name,((uintptr_t)a0,(uintptr_t)a1,(uintptr_t)a2,\
-    (uintptr_t)a3,(uintptr_t)a4,(uintptr_t)a5,(uintptr_t)a6,(uintptr_t)a7,\
-    (uintptr_t)a8,(uintptr_t)a9))
-#else
-// Systemtap dtrace compatible probes on GNU/Linux use direct macros.
-// If dtrace is disabled this macro becomes NULL
-#define HS_DTRACE_PROBE(provider,name) HS_DTRACE_PROBE0(provider,name)
-#define HS_DTRACE_PROBE0(provider,name)\
-  DTRACE_PROBE(provider,name)
-#define HS_DTRACE_PROBE1(provider,name,a0)\
-  DTRACE_PROBE1(provider,name,a0)
-#define HS_DTRACE_PROBE2(provider,name,a0,a1)\
-  DTRACE_PROBE2(provider,name,a0,a1)
-#define HS_DTRACE_PROBE3(provider,name,a0,a1,a2)\
-  DTRACE_PROBE3(provider,name,a0,a1,a2)
-#define HS_DTRACE_PROBE4(provider,name,a0,a1,a2,a3)\
-  DTRACE_PROBE4(provider,name,a0,a1,a2,a3)
-#define HS_DTRACE_PROBE5(provider,name,a0,a1,a2,a3,a4)\
-  DTRACE_PROBE5(provider,name,a0,a1,a2,a3,a4)
-#define HS_DTRACE_PROBE6(provider,name,a0,a1,a2,a3,a4,a5)\
-  DTRACE_PROBE6(provider,name,a0,a1,a2,a3,a4,a5)
-#define HS_DTRACE_PROBE7(provider,name,a0,a1,a2,a3,a4,a5,a6)\
-  DTRACE_PROBE7(provider,name,a0,a1,a2,a3,a4,a5,a6)
-#define HS_DTRACE_PROBE8(provider,name,a0,a1,a2,a3,a4,a5,a6,a7)\
-  DTRACE_PROBE8(provider,name,a0,a1,a2,a3,a4,a5,a6,a7)
-#define HS_DTRACE_PROBE9(provider,name,a0,a1,a2,a3,a4,a5,a6,a7,a8)\
-  DTRACE_PROBE9(provider,name,a0,a1,a2,a3,a4,a5,a6,a7,a8)
-#define HS_DTRACE_PROBE10(provider,name,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9)\
-  DTRACE_PROBE10(provider,name,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9)
-#endif
-
-#endif /* !USDT2 */
-
 #endif // SHARE_VM_UTILITIES_DTRACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/dtrace_disabled.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,1091 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_DTRACE_USDT2_DISABLED_HPP
+#define SHARE_VM_UTILITIES_DTRACE_USDT2_DISABLED_HPP
+
+/* This file contains dummy provider probes needed when compiling a hotspot
+ * that does not support dtrace probes. This could be because we're building
+ * on a system that doesn't suuport dtrace or because we're bulding a variant
+ * of hotspot (like core) where we do not support dtrace
+ */
+#if !defined(DTRACE_ENABLED)
+
+/* hotspot provider probes */
+#define HOTSPOT_CLASS_LOADED(arg0, arg1, arg2, arg3)
+#define HOTSPOT_CLASS_LOADED_ENABLED()  0
+#define HOTSPOT_CLASS_UNLOADED(arg0, arg1, arg2, arg3)
+#define HOTSPOT_CLASS_UNLOADED_ENABLED()  0
+#define HOTSPOT_CLASS_INITIALIZATION_REQUIRED(arg0, arg1, arg2, arg3)
+#define HOTSPOT_CLASS_INITIALIZATION_REQUIRED_ENABLED() 0
+#define HOTSPOT_CLASS_INITIALIZATION_RECURSIVE(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_CLASS_INITIALIZATION_RECURSIVE_ENABLED() 0
+#define HOTSPOT_CLASS_INITIALIZATION_CONCURRENT(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_CLASS_INITIALIZATION_CONCURRENT_ENABLED() 0
+#define HOTSPOT_CLASS_INITIALIZATION_ERRONEOUS(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_CLASS_INITIALIZATION_ERRONEOUS_ENABLED() 0
+#define HOTSPOT_CLASS_INITIALIZATION_SUPER_FAILED(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_CLASS_INITIALIZATION_SUPER_FAILED_ENABLED() 0
+#define HOTSPOT_CLASS_INITIALIZATION_CLINIT(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_CLASS_INITIALIZATION_CLINIT_ENABLED() 0
+#define HOTSPOT_CLASS_INITIALIZATION_ERROR(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_CLASS_INITIALIZATION_ERROR_ENABLED() 0
+#define HOTSPOT_CLASS_INITIALIZATION_END(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_CLASS_INITIALIZATION_END_ENABLED() 0
+#define HOTSPOT_COMPILED_METHOD_LOAD(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
+#define HOTSPOT_COMPILED_METHOD_LOAD_ENABLED()  0
+#define HOTSPOT_COMPILED_METHOD_UNLOAD(arg0, arg1, arg2, arg3, arg4, arg5)
+#define HOTSPOT_COMPILED_METHOD_UNLOAD_ENABLED() 0
+#define HOTSPOT_GC_BEGIN(arg0)
+#define HOTSPOT_GC_BEGIN_ENABLED() 0
+#define HOTSPOT_GC_END()
+#define HOTSPOT_GC_END_ENABLED() 0
+#define HOTSPOT_MEM_POOL_GC_BEGIN(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
+#define HOTSPOT_MEM_POOL_GC_BEGIN_ENABLED() 0
+#define HOTSPOT_MEM_POOL_GC_END(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
+#define HOTSPOT_MEM_POOL_GC_END_ENABLED() 0
+#define HOTSPOT_METHOD_COMPILE_BEGIN(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
+#define HOTSPOT_METHOD_COMPILE_BEGIN_ENABLED() 0
+#define HOTSPOT_METHOD_COMPILE_END(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
+#define HOTSPOT_METHOD_COMPILE_END_ENABLED() 0
+#define HOTSPOT_METHOD_ENTRY(arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+#define HOTSPOT_METHOD_ENTRY_ENABLED() 0
+#define HOTSPOT_METHOD_RETURN(arg0, arg1, arg2, arg3, arg4, arg5, arg6)
+#define HOTSPOT_METHOD_RETURN_ENABLED() 0
+#define HOTSPOT_MONITOR_CONTENDED_ENTER(arg0, arg1, arg2, arg3)
+#define HOTSPOT_MONITOR_CONTENDED_ENTER_ENABLED() 0
+#define HOTSPOT_MONITOR_CONTENDED_ENTERED(arg0, arg1, arg2, arg3)
+#define HOTSPOT_MONITOR_CONTENDED_ENTERED_ENABLED() 0
+#define HOTSPOT_MONITOR_CONTENDED_EXIT(arg0, arg1, arg2, arg3)
+#define HOTSPOT_MONITOR_CONTENDED_EXIT_ENABLED() 0
+#define HOTSPOT_MONITOR_NOTIFY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_MONITOR_NOTIFY_ENABLED() 0
+#define HOTSPOT_MONITOR_NOTIFYALL(arg0, arg1, arg2, arg3)
+#define HOTSPOT_MONITOR_NOTIFYALL_ENABLED() 0
+#define HOTSPOT_MONITOR_WAIT(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_MONITOR_WAIT_ENABLED() 0
+#define HOTSPOT_MONITOR_WAIT_PROBE(arg0, arg1, arg2, arg3)
+#define HOTSPOT_MONITOR_WAIT_PROBE_ENABLED() 0
+#define HOTSPOT_MONITOR_WAITED(arg0, arg1, arg2, arg3)
+#define HOTSPOT_MONITOR_WAITED_ENABLED() 0
+#define HOTSPOT_OBJECT_ALLOC(arg0, arg1, arg2, arg3)
+#define HOTSPOT_OBJECT_ALLOC_ENABLED() 0
+#define HOTSPOT_THREAD_START(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_THREAD_START_ENABLED() 0
+#define HOTSPOT_THREAD_STOP(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_THREAD_STOP_ENABLED() 0
+#define HOTSPOT_THREAD_SLEEP_BEGIN(arg0)
+#define HOTSPOT_THREAD_SLEEP_BEGIN_ENABLED() 0
+#define HOTSPOT_THREAD_SLEEP_END(arg0)
+#define HOTSPOT_THREAD_SLEEP_END_ENABLED() 0
+#define HOTSPOT_THREAD_YIELD()
+#define HOTSPOT_THREAD_YIELD_ENABLED() 0
+#define HOTSPOT_THREAD_PARK_BEGIN(arg0, arg1, arg2)
+#define HOTSPOT_THREAD_PARK_BEGIN_ENABLED() 0
+#define HOTSPOT_THREAD_PARK_END(arg0)
+#define HOTSPOT_THREAD_PARK_END_ENABLED() 0
+#define HOTSPOT_THREAD_UNPARK(arg0)
+#define HOTSPOT_THREAD_UNPARK_ENABLED() 0
+#define HOTSPOT_VM_INIT_BEGIN()
+#define HOTSPOT_VM_INIT_BEGIN_ENABLED() 0
+#define HOTSPOT_VM_INIT_END()
+#define HOTSPOT_VM_INIT_END_ENABLED() 0
+#define HOTSPOT_VM_SHUTDOWN()
+#define HOTSPOT_VM_SHUTDOWN_ENABLED() 0
+#define HOTSPOT_VMOPS_REQUEST(arg0, arg1, arg2)
+#define HOTSPOT_VMOPS_REQUEST_ENABLED() 0
+#define HOTSPOT_VMOPS_BEGIN(arg0, arg1, arg2)
+#define HOTSPOT_VMOPS_BEGIN_ENABLED() 0
+#define HOTSPOT_VMOPS_END(arg0, arg1, arg2)
+#define HOTSPOT_VMOPS_END_ENABLED() 0
+
+/* hs_private provider probes */
+#define HS_PRIVATE_CMS_INITMARK_BEGIN()
+#define HS_PRIVATE_CMS_INITMARK_BEGIN_ENABLED() 0
+#define HS_PRIVATE_CMS_INITMARK_END()
+#define HS_PRIVATE_CMS_INITMARK_END_ENABLED() 0
+#define HS_PRIVATE_CMS_REMARK_BEGIN()
+#define HS_PRIVATE_CMS_REMARK_BEGIN_ENABLED() 0
+#define HS_PRIVATE_CMS_REMARK_END()
+#define HS_PRIVATE_CMS_REMARK_END_ENABLED() 0
+#define HS_PRIVATE_HASHTABLE_NEW_ENTRY(arg0, arg1, arg2, arg3)
+#define HS_PRIVATE_HASHTABLE_NEW_ENTRY_ENABLED() 0
+#define HS_PRIVATE_SAFEPOINT_BEGIN()
+#define HS_PRIVATE_SAFEPOINT_BEGIN_ENABLED() 0
+#define HS_PRIVATE_SAFEPOINT_END()
+#define HS_PRIVATE_SAFEPOINT_END_ENABLED() 0
+
+/* hotspot_jni provider probes */
+#define HOTSPOT_JNI_ALLOCOBJECT_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_ALLOCOBJECT_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_ALLOCOBJECT_RETURN(arg0)
+#define HOTSPOT_JNI_ALLOCOBJECT_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_ATTACHCURRENTTHREAD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_ATTACHCURRENTTHREAD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN(arg0)
+#define HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN(arg0)
+#define HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLBOOLEANMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLBOOLEANMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLBOOLEANMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLBOOLEANMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLBOOLEANMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLBOOLEANMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLBOOLEANMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLBOOLEANMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLBOOLEANMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLBOOLEANMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLBOOLEANMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLBOOLEANMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLBYTEMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLBYTEMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLBYTEMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLBYTEMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLBYTEMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLBYTEMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLBYTEMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLBYTEMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLBYTEMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLBYTEMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLBYTEMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLBYTEMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLCHARMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLCHARMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLCHARMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLCHARMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLCHARMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLCHARMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLCHARMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLCHARMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLCHARMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLCHARMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLCHARMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLCHARMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLDOUBLEMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLDOUBLEMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLDOUBLEMETHOD_RETURN()
+#define HOTSPOT_JNI_CALLDOUBLEMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLDOUBLEMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLDOUBLEMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLDOUBLEMETHODA_RETURN()
+#define HOTSPOT_JNI_CALLDOUBLEMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLDOUBLEMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLDOUBLEMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLDOUBLEMETHODV_RETURN()
+#define HOTSPOT_JNI_CALLDOUBLEMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLFLOATMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLFLOATMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLFLOATMETHOD_RETURN()
+#define HOTSPOT_JNI_CALLFLOATMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLFLOATMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLFLOATMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLFLOATMETHODA_RETURN()
+#define HOTSPOT_JNI_CALLFLOATMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLFLOATMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLFLOATMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLFLOATMETHODV_RETURN()
+#define HOTSPOT_JNI_CALLFLOATMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLINTMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLINTMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLINTMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLINTMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLINTMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLINTMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLINTMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLINTMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLINTMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLINTMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLINTMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLINTMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLLONGMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLLONGMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLLONGMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLLONGMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLLONGMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLLONGMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLLONGMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLLONGMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLLONGMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLLONGMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLLONGMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLLONGMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODA_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODV_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODA_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODV_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODA_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODV_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHOD_RETURN()
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODA_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODA_RETURN()
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODV_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODV_RETURN()
+#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHOD_RETURN()
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODA_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODA_RETURN()
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODV_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODV_RETURN()
+#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODA_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODV_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODA_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODV_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODA_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODV_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODA_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODV_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_RETURN()
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODA_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODA_RETURN()
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODV_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODV_RETURN()
+#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLOBJECTMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLOBJECTMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLOBJECTMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLOBJECTMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLOBJECTMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLOBJECTMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLOBJECTMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLOBJECTMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLOBJECTMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLOBJECTMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLOBJECTMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLOBJECTMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSHORTMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSHORTMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSHORTMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSHORTMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSHORTMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSHORTMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSHORTMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSHORTMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSHORTMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSHORTMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSHORTMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSHORTMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICBYTEMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICCHARMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICCHARMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICCHARMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICCHARMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICCHARMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICCHARMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICCHARMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICCHARMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICCHARMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICCHARMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICCHARMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICCHARMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHOD_RETURN()
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODA_RETURN()
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODV_RETURN()
+#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHOD_RETURN()
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHODA_RETURN()
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHODV_RETURN()
+#define HOTSPOT_JNI_CALLSTATICFLOATMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICINTMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICINTMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICINTMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICINTMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICINTMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICINTMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICINTMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICINTMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICINTMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICINTMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICINTMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICINTMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICLONGMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICLONGMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICLONGMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICLONGMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICLONGMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICLONGMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICLONGMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICLONGMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICLONGMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICLONGMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICLONGMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICLONGMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHODA_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHODV_RETURN(arg0)
+#define HOTSPOT_JNI_CALLSTATICSHORTMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHOD_RETURN()
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHODA_RETURN()
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHODV_RETURN()
+#define HOTSPOT_JNI_CALLSTATICVOIDMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLVOIDMETHOD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLVOIDMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLVOIDMETHOD_RETURN()
+#define HOTSPOT_JNI_CALLVOIDMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLVOIDMETHODA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLVOIDMETHODA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLVOIDMETHODA_RETURN()
+#define HOTSPOT_JNI_CALLVOIDMETHODA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CALLVOIDMETHODV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CALLVOIDMETHODV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CALLVOIDMETHODV_RETURN()
+#define HOTSPOT_JNI_CALLVOIDMETHODV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_CREATEJAVAVM_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_CREATEJAVAVM_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_CREATEJAVAVM_RETURN(arg0)
+#define HOTSPOT_JNI_CREATEJAVAVM_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_DEFINECLASS_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_DEFINECLASS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_DEFINECLASS_RETURN(arg0)
+#define HOTSPOT_JNI_DEFINECLASS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_DELETEGLOBALREF_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_DELETEGLOBALREF_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_DELETEGLOBALREF_RETURN()
+#define HOTSPOT_JNI_DELETEGLOBALREF_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_DELETELOCALREF_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_DELETELOCALREF_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_DELETELOCALREF_RETURN()
+#define HOTSPOT_JNI_DELETELOCALREF_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_DELETEWEAKGLOBALREF_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_DELETEWEAKGLOBALREF_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_DELETEWEAKGLOBALREF_RETURN()
+#define HOTSPOT_JNI_DELETEWEAKGLOBALREF_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_DESTROYJAVAVM_ENTRY(arg0)
+#define HOTSPOT_JNI_DESTROYJAVAVM_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_DESTROYJAVAVM_RETURN(arg0)
+#define HOTSPOT_JNI_DESTROYJAVAVM_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY(arg0)
+#define HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(arg0)
+#define HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_ENSURELOCALCAPACITY_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_ENSURELOCALCAPACITY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_ENSURELOCALCAPACITY_RETURN(arg0)
+#define HOTSPOT_JNI_ENSURELOCALCAPACITY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_EXCEPTIONCHECK_ENTRY(arg0)
+#define HOTSPOT_JNI_EXCEPTIONCHECK_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_EXCEPTIONCHECK_RETURN(arg0)
+#define HOTSPOT_JNI_EXCEPTIONCHECK_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_EXCEPTIONCLEAR_ENTRY(arg0)
+#define HOTSPOT_JNI_EXCEPTIONCLEAR_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_EXCEPTIONCLEAR_RETURN()
+#define HOTSPOT_JNI_EXCEPTIONCLEAR_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_EXCEPTIONDESCRIBE_ENTRY(arg0)
+#define HOTSPOT_JNI_EXCEPTIONDESCRIBE_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_EXCEPTIONDESCRIBE_RETURN()
+#define HOTSPOT_JNI_EXCEPTIONDESCRIBE_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_EXCEPTIONOCCURRED_ENTRY(arg0)
+#define HOTSPOT_JNI_EXCEPTIONOCCURRED_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_EXCEPTIONOCCURRED_RETURN(arg0)
+#define HOTSPOT_JNI_EXCEPTIONOCCURRED_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_FATALERROR_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_FATALERROR_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_FINDCLASS_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_FINDCLASS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_FINDCLASS_RETURN(arg0)
+#define HOTSPOT_JNI_FINDCLASS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_FROMREFLECTEDFIELD_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_FROMREFLECTEDFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_FROMREFLECTEDFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_FROMREFLECTEDFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_FROMREFLECTEDMETHOD_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_FROMREFLECTEDMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_FROMREFLECTEDMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_FROMREFLECTEDMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETARRAYLENGTH_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_GETARRAYLENGTH_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETARRAYLENGTH_RETURN(arg0)
+#define HOTSPOT_JNI_GETARRAYLENGTH_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETBOOLEANARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETBOOLEANARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETBOOLEANARRAYELEMENTS_RETURN(arg0)
+#define HOTSPOT_JNI_GETBOOLEANARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETBOOLEANARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_GETBOOLEANARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETBOOLEANARRAYREGION_RETURN()
+#define HOTSPOT_JNI_GETBOOLEANARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETBOOLEANFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETBOOLEANFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETBOOLEANFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETBOOLEANFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETBYTEARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETBYTEARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETBYTEARRAYELEMENTS_RETURN(arg0)
+#define HOTSPOT_JNI_GETBYTEARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETBYTEARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_GETBYTEARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETBYTEARRAYREGION_RETURN()
+#define HOTSPOT_JNI_GETBYTEARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETBYTEFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETBYTEFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETBYTEFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETBYTEFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETCHARARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETCHARARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETCHARARRAYELEMENTS_RETURN(arg0)
+#define HOTSPOT_JNI_GETCHARARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETCHARARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_GETCHARARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETCHARARRAYREGION_RETURN()
+#define HOTSPOT_JNI_GETCHARARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETCHARFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETCHARFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETCHARFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETCHARFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETCREATEDJAVAVMS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETCREATEDJAVAVMS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETCREATEDJAVAVMS_RETURN(arg0)
+#define HOTSPOT_JNI_GETCREATEDJAVAVMS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_ENTRY(arg0)
+#define HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_RETURN(arg0)
+#define HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_RETURN(arg0)
+#define HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_RETURN(arg0)
+#define HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETDOUBLEARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETDOUBLEARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETDOUBLEARRAYELEMENTS_RETURN(arg0)
+#define HOTSPOT_JNI_GETDOUBLEARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETDOUBLEARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_GETDOUBLEARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETDOUBLEARRAYREGION_RETURN()
+#define HOTSPOT_JNI_GETDOUBLEARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETDOUBLEFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETDOUBLEFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETDOUBLEFIELD_RETURN()
+#define HOTSPOT_JNI_GETDOUBLEFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETENV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETENV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETENV_RETURN(arg0)
+#define HOTSPOT_JNI_GETENV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETFIELDID_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_GETFIELDID_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETFIELDID_RETURN(arg0)
+#define HOTSPOT_JNI_GETFIELDID_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETFLOATARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETFLOATARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETFLOATARRAYELEMENTS_RETURN(arg0)
+#define HOTSPOT_JNI_GETFLOATARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETFLOATARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_GETFLOATARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETFLOATARRAYREGION_RETURN()
+#define HOTSPOT_JNI_GETFLOATARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETFLOATFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETFLOATFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETFLOATFIELD_RETURN()
+#define HOTSPOT_JNI_GETFLOATFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETINTARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETINTARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETINTARRAYELEMENTS_RETURN(arg0)
+#define HOTSPOT_JNI_GETINTARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETINTARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_GETINTARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETINTARRAYREGION_RETURN()
+#define HOTSPOT_JNI_GETINTARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETINTFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETINTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETINTFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETINTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETJAVAVM_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_GETJAVAVM_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETJAVAVM_RETURN(arg0)
+#define HOTSPOT_JNI_GETJAVAVM_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETLONGARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETLONGARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETLONGARRAYELEMENTS_RETURN(arg0)
+#define HOTSPOT_JNI_GETLONGARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETLONGARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_GETLONGARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETLONGARRAYREGION_RETURN()
+#define HOTSPOT_JNI_GETLONGARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETLONGFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETLONGFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETLONGFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETLONGFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETMETHODID_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_GETMETHODID_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETMETHODID_RETURN(arg0)
+#define HOTSPOT_JNI_GETMETHODID_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_RETURN(arg0)
+#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETOBJECTCLASS_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_GETOBJECTCLASS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETOBJECTCLASS_RETURN(arg0)
+#define HOTSPOT_JNI_GETOBJECTCLASS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETOBJECTFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETOBJECTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETOBJECTFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETOBJECTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN(arg0)
+#define HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN(arg0)
+#define HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSHORTARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSHORTARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSHORTARRAYELEMENTS_RETURN(arg0)
+#define HOTSPOT_JNI_GETSHORTARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSHORTARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_GETSHORTARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSHORTARRAYREGION_RETURN()
+#define HOTSPOT_JNI_GETSHORTARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSHORTFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSHORTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSHORTFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETSHORTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICBOOLEANFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTATICBOOLEANFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICBOOLEANFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTATICBOOLEANFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICBYTEFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTATICBYTEFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICBYTEFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTATICBYTEFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICCHARFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTATICCHARFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICCHARFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTATICCHARFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICDOUBLEFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTATICDOUBLEFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICDOUBLEFIELD_RETURN()
+#define HOTSPOT_JNI_GETSTATICDOUBLEFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICFIELDID_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_GETSTATICFIELDID_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICFIELDID_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTATICFIELDID_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICFLOATFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTATICFLOATFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICFLOATFIELD_RETURN()
+#define HOTSPOT_JNI_GETSTATICFLOATFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICINTFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTATICINTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICINTFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTATICINTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICLONGFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTATICLONGFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICLONGFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTATICLONGFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICMETHODID_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_GETSTATICMETHODID_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICMETHODID_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTATICMETHODID_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICOBJECTFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTATICOBJECTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICSHORTFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTATICSHORTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTATICSHORTFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTATICSHORTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTRINGCHARS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGCHARS_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTRINGCHARS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGLENGTH_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTRINGLENGTH_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_GETSTRINGREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGREGION_RETURN()
+#define HOTSPOT_JNI_GETSTRINGREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGUTFCHARS_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTRINGUTFCHARS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGUTFLENGTH_RETURN(arg0)
+#define HOTSPOT_JNI_GETSTRINGUTFLENGTH_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGUTFREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_GETSTRINGUTFREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSTRINGUTFREGION_RETURN()
+#define HOTSPOT_JNI_GETSTRINGUTFREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETSUPERCLASS_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_GETSUPERCLASS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETSUPERCLASS_RETURN(arg0)
+#define HOTSPOT_JNI_GETSUPERCLASS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_GETVERSION_ENTRY(arg0)
+#define HOTSPOT_JNI_GETVERSION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_GETVERSION_RETURN(arg0)
+#define HOTSPOT_JNI_GETVERSION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_ISASSIGNABLEFROM_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_ISASSIGNABLEFROM_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(arg0)
+#define HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_ISINSTANCEOF_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_ISINSTANCEOF_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_ISINSTANCEOF_RETURN(arg0)
+#define HOTSPOT_JNI_ISINSTANCEOF_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_ISSAMEOBJECT_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_ISSAMEOBJECT_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_ISSAMEOBJECT_RETURN(arg0)
+#define HOTSPOT_JNI_ISSAMEOBJECT_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_MONITORENTER_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_MONITORENTER_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_MONITORENTER_RETURN(arg0)
+#define HOTSPOT_JNI_MONITORENTER_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_MONITOREXIT_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_MONITOREXIT_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_MONITOREXIT_RETURN(arg0)
+#define HOTSPOT_JNI_MONITOREXIT_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWBOOLEANARRAY_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWBOOLEANARRAY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWBOOLEANARRAY_RETURN(arg0)
+#define HOTSPOT_JNI_NEWBOOLEANARRAY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWBYTEARRAY_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWBYTEARRAY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWBYTEARRAY_RETURN(arg0)
+#define HOTSPOT_JNI_NEWBYTEARRAY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWCHARARRAY_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWCHARARRAY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWCHARARRAY_RETURN(arg0)
+#define HOTSPOT_JNI_NEWCHARARRAY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(arg0)
+#define HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWDOUBLEARRAY_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWDOUBLEARRAY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWDOUBLEARRAY_RETURN(arg0)
+#define HOTSPOT_JNI_NEWDOUBLEARRAY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWFLOATARRAY_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWFLOATARRAY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWFLOATARRAY_RETURN(arg0)
+#define HOTSPOT_JNI_NEWFLOATARRAY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWGLOBALREF_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWGLOBALREF_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWGLOBALREF_RETURN(arg0)
+#define HOTSPOT_JNI_NEWGLOBALREF_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWINTARRAY_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWINTARRAY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWINTARRAY_RETURN(arg0)
+#define HOTSPOT_JNI_NEWINTARRAY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWLOCALREF_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWLOCALREF_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWLOCALREF_RETURN(arg0)
+#define HOTSPOT_JNI_NEWLOCALREF_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWLONGARRAY_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWLONGARRAY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWLONGARRAY_RETURN(arg0)
+#define HOTSPOT_JNI_NEWLONGARRAY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWOBJECT_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_NEWOBJECT_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWOBJECT_RETURN(arg0)
+#define HOTSPOT_JNI_NEWOBJECT_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWOBJECTA_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_NEWOBJECTA_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWOBJECTA_RETURN(arg0)
+#define HOTSPOT_JNI_NEWOBJECTA_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWOBJECTARRAY_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_NEWOBJECTARRAY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWOBJECTARRAY_RETURN(arg0)
+#define HOTSPOT_JNI_NEWOBJECTARRAY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWOBJECTV_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_NEWOBJECTV_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWOBJECTV_RETURN(arg0)
+#define HOTSPOT_JNI_NEWOBJECTV_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWSHORTARRAY_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWSHORTARRAY_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWSHORTARRAY_RETURN(arg0)
+#define HOTSPOT_JNI_NEWSHORTARRAY_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWSTRING_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_NEWSTRING_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWSTRING_RETURN(arg0)
+#define HOTSPOT_JNI_NEWSTRING_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWSTRINGUTF_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWSTRINGUTF_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWSTRINGUTF_RETURN(arg0)
+#define HOTSPOT_JNI_NEWSTRINGUTF_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN(arg0)
+#define HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_POPLOCALFRAME_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_POPLOCALFRAME_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_POPLOCALFRAME_RETURN(arg0)
+#define HOTSPOT_JNI_POPLOCALFRAME_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_PUSHLOCALFRAME_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_PUSHLOCALFRAME_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_PUSHLOCALFRAME_RETURN(arg0)
+#define HOTSPOT_JNI_PUSHLOCALFRAME_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_REGISTERNATIVES_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_REGISTERNATIVES_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_REGISTERNATIVES_RETURN(arg0)
+#define HOTSPOT_JNI_REGISTERNATIVES_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEBOOLEANARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_RELEASEBOOLEANARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEBOOLEANARRAYELEMENTS_RETURN()
+#define HOTSPOT_JNI_RELEASEBOOLEANARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEBYTEARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_RELEASEBYTEARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEBYTEARRAYELEMENTS_RETURN()
+#define HOTSPOT_JNI_RELEASEBYTEARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASECHARARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_RELEASECHARARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASECHARARRAYELEMENTS_RETURN()
+#define HOTSPOT_JNI_RELEASECHARARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEDOUBLEARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_RELEASEDOUBLEARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEDOUBLEARRAYELEMENTS_RETURN()
+#define HOTSPOT_JNI_RELEASEDOUBLEARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEFLOATARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_RELEASEFLOATARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEFLOATARRAYELEMENTS_RETURN()
+#define HOTSPOT_JNI_RELEASEFLOATARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEINTARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_RELEASEINTARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEINTARRAYELEMENTS_RETURN()
+#define HOTSPOT_JNI_RELEASEINTARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASELONGARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_RELEASELONGARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASELONGARRAYELEMENTS_RETURN()
+#define HOTSPOT_JNI_RELEASELONGARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN()
+#define HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASESHORTARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_RELEASESHORTARRAYELEMENTS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASESHORTARRAYELEMENTS_RETURN()
+#define HOTSPOT_JNI_RELEASESHORTARRAYELEMENTS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASESTRINGCHARS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_RELEASESTRINGCHARS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASESTRINGCHARS_RETURN()
+#define HOTSPOT_JNI_RELEASESTRINGCHARS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN()
+#define HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_RELEASESTRINGUTFCHARS_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_RELEASESTRINGUTFCHARS_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_RELEASESTRINGUTFCHARS_RETURN()
+#define HOTSPOT_JNI_RELEASESTRINGUTFCHARS_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETBOOLEANARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_SETBOOLEANARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETBOOLEANARRAYREGION_RETURN()
+#define HOTSPOT_JNI_SETBOOLEANARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETBOOLEANFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETBOOLEANFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETBOOLEANFIELD_RETURN()
+#define HOTSPOT_JNI_SETBOOLEANFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETBYTEARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_SETBYTEARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETBYTEARRAYREGION_RETURN()
+#define HOTSPOT_JNI_SETBYTEARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETBYTEFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETBYTEFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETBYTEFIELD_RETURN()
+#define HOTSPOT_JNI_SETBYTEFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETCHARARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_SETCHARARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETCHARARRAYREGION_RETURN()
+#define HOTSPOT_JNI_SETCHARARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETCHARFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETCHARFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETCHARFIELD_RETURN()
+#define HOTSPOT_JNI_SETCHARFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETDOUBLEARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_SETDOUBLEARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETDOUBLEARRAYREGION_RETURN()
+#define HOTSPOT_JNI_SETDOUBLEARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETDOUBLEFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_SETDOUBLEFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETDOUBLEFIELD_RETURN()
+#define HOTSPOT_JNI_SETDOUBLEFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETFLOATARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_SETFLOATARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETFLOATARRAYREGION_RETURN()
+#define HOTSPOT_JNI_SETFLOATARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETFLOATFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_SETFLOATFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETFLOATFIELD_RETURN()
+#define HOTSPOT_JNI_SETFLOATFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETINTARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_SETINTARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETINTARRAYREGION_RETURN()
+#define HOTSPOT_JNI_SETINTARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETINTFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETINTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETINTFIELD_RETURN()
+#define HOTSPOT_JNI_SETINTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETLONGARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_SETLONGARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETLONGARRAYREGION_RETURN()
+#define HOTSPOT_JNI_SETLONGARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETLONGFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETLONGFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETLONGFIELD_RETURN()
+#define HOTSPOT_JNI_SETLONGFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETOBJECTARRAYELEMENT_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETOBJECTARRAYELEMENT_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETOBJECTARRAYELEMENT_RETURN()
+#define HOTSPOT_JNI_SETOBJECTARRAYELEMENT_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETOBJECTFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETOBJECTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETOBJECTFIELD_RETURN()
+#define HOTSPOT_JNI_SETOBJECTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSHORTARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
+#define HOTSPOT_JNI_SETSHORTARRAYREGION_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSHORTARRAYREGION_RETURN()
+#define HOTSPOT_JNI_SETSHORTARRAYREGION_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSHORTFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETSHORTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSHORTFIELD_RETURN()
+#define HOTSPOT_JNI_SETSHORTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICBOOLEANFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETSTATICBOOLEANFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICBOOLEANFIELD_RETURN()
+#define HOTSPOT_JNI_SETSTATICBOOLEANFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICBYTEFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETSTATICBYTEFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICBYTEFIELD_RETURN()
+#define HOTSPOT_JNI_SETSTATICBYTEFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICCHARFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETSTATICCHARFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICCHARFIELD_RETURN()
+#define HOTSPOT_JNI_SETSTATICCHARFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICDOUBLEFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_SETSTATICDOUBLEFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICDOUBLEFIELD_RETURN()
+#define HOTSPOT_JNI_SETSTATICDOUBLEFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICFLOATFIELD_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_SETSTATICFLOATFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICFLOATFIELD_RETURN()
+#define HOTSPOT_JNI_SETSTATICFLOATFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICINTFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETSTATICINTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICINTFIELD_RETURN()
+#define HOTSPOT_JNI_SETSTATICINTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICLONGFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETSTATICLONGFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICLONGFIELD_RETURN()
+#define HOTSPOT_JNI_SETSTATICLONGFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICOBJECTFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETSTATICOBJECTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN()
+#define HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICSHORTFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_SETSTATICSHORTFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_SETSTATICSHORTFIELD_RETURN()
+#define HOTSPOT_JNI_SETSTATICSHORTFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_THROW_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_THROW_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_THROW_RETURN(arg0)
+#define HOTSPOT_JNI_THROW_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_THROWNEW_ENTRY(arg0, arg1, arg2)
+#define HOTSPOT_JNI_THROWNEW_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_THROWNEW_RETURN(arg0)
+#define HOTSPOT_JNI_THROWNEW_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_TOREFLECTEDFIELD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_TOREFLECTEDFIELD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_TOREFLECTEDFIELD_RETURN(arg0)
+#define HOTSPOT_JNI_TOREFLECTEDFIELD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_TOREFLECTEDMETHOD_ENTRY(arg0, arg1, arg2, arg3)
+#define HOTSPOT_JNI_TOREFLECTEDMETHOD_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_TOREFLECTEDMETHOD_RETURN(arg0)
+#define HOTSPOT_JNI_TOREFLECTEDMETHOD_RETURN_ENABLED()  0
+#define HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY(arg0, arg1)
+#define HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY_ENABLED()  0
+#define HOTSPOT_JNI_UNREGISTERNATIVES_RETURN(arg0)
+#define HOTSPOT_JNI_UNREGISTERNATIVES_RETURN_ENABLED()  0
+
+#else /* !defined(DTRACE_ENABLED) */
+#error This file should only be included when dtrace is not enabled
+#endif /* !defined(DTRACE_ENABLED) */
+
+#endif // SHARE_VM_UTILITIES_DTRACE_USDT2_DISABLED_HPP
--- a/src/share/vm/utilities/dtrace_usdt2_disabled.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1097 +0,0 @@
-/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_UTILITIES_DTRACE_USDT2_DISABLED_HPP
-#define SHARE_VM_UTILITIES_DTRACE_USDT2_DISABLED_HPP
-
-/* This file contains dummy provider probes needed when compiling a hotspot
- * that does not support dtrace probes. This could be because we're building
- * on a system that doesn't suuport dtrace or because we're bulding a variant
- * of hotspot (like core) where we do not support dtrace
- */
-#if !defined(DTRACE_ENABLED)
-
-#ifdef USDT2
-
-/* hotspot provider probes */
-#define HOTSPOT_CLASS_LOADED(arg0, arg1, arg2, arg3)
-#define HOTSPOT_CLASS_LOADED_ENABLED()  0
-#define HOTSPOT_CLASS_UNLOADED(arg0, arg1, arg2, arg3)
-#define HOTSPOT_CLASS_UNLOADED_ENABLED()  0
-#define HOTSPOT_CLASS_INITIALIZATION_REQUIRED(arg0, arg1, arg2, arg3)
-#define HOTSPOT_CLASS_INITIALIZATION_REQUIRED_ENABLED() 0
-#define HOTSPOT_CLASS_INITIALIZATION_RECURSIVE(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_CLASS_INITIALIZATION_RECURSIVE_ENABLED() 0
-#define HOTSPOT_CLASS_INITIALIZATION_CONCURRENT(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_CLASS_INITIALIZATION_CONCURRENT_ENABLED() 0
-#define HOTSPOT_CLASS_INITIALIZATION_ERRONEOUS(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_CLASS_INITIALIZATION_ERRONEOUS_ENABLED() 0
-#define HOTSPOT_CLASS_INITIALIZATION_SUPER_FAILED(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_CLASS_INITIALIZATION_SUPER_FAILED_ENABLED() 0
-#define HOTSPOT_CLASS_INITIALIZATION_CLINIT(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_CLASS_INITIALIZATION_CLINIT_ENABLED() 0
-#define HOTSPOT_CLASS_INITIALIZATION_ERROR(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_CLASS_INITIALIZATION_ERROR_ENABLED() 0
-#define HOTSPOT_CLASS_INITIALIZATION_END(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_CLASS_INITIALIZATION_END_ENABLED() 0
-#define HOTSPOT_COMPILED_METHOD_LOAD(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
-#define HOTSPOT_COMPILED_METHOD_LOAD_ENABLED()  0
-#define HOTSPOT_COMPILED_METHOD_UNLOAD(arg0, arg1, arg2, arg3, arg4, arg5)
-#define HOTSPOT_COMPILED_METHOD_UNLOAD_ENABLED() 0
-#define HOTSPOT_GC_BEGIN(arg0)
-#define HOTSPOT_GC_BEGIN_ENABLED() 0
-#define HOTSPOT_GC_END()
-#define HOTSPOT_GC_END_ENABLED() 0
-#define HOTSPOT_MEM_POOL_GC_BEGIN(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
-#define HOTSPOT_MEM_POOL_GC_BEGIN_ENABLED() 0
-#define HOTSPOT_MEM_POOL_GC_END(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
-#define HOTSPOT_MEM_POOL_GC_END_ENABLED() 0
-#define HOTSPOT_METHOD_COMPILE_BEGIN(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
-#define HOTSPOT_METHOD_COMPILE_BEGIN_ENABLED() 0
-#define HOTSPOT_METHOD_COMPILE_END(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
-#define HOTSPOT_METHOD_COMPILE_END_ENABLED() 0
-#define HOTSPOT_METHOD_ENTRY(arg0, arg1, arg2, arg3, arg4, arg5, arg6)
-#define HOTSPOT_METHOD_ENTRY_ENABLED() 0
-#define HOTSPOT_METHOD_RETURN(arg0, arg1, arg2, arg3, arg4, arg5, arg6)
-#define HOTSPOT_METHOD_RETURN_ENABLED() 0
-#define HOTSPOT_MONITOR_CONTENDED_ENTER(arg0, arg1, arg2, arg3)
-#define HOTSPOT_MONITOR_CONTENDED_ENTER_ENABLED() 0
-#define HOTSPOT_MONITOR_CONTENDED_ENTERED(arg0, arg1, arg2, arg3)
-#define HOTSPOT_MONITOR_CONTENDED_ENTERED_ENABLED() 0
-#define HOTSPOT_MONITOR_CONTENDED_EXIT(arg0, arg1, arg2, arg3)
-#define HOTSPOT_MONITOR_CONTENDED_EXIT_ENABLED() 0
-#define HOTSPOT_MONITOR_NOTIFY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_MONITOR_NOTIFY_ENABLED() 0
-#define HOTSPOT_MONITOR_NOTIFYALL(arg0, arg1, arg2, arg3)
-#define HOTSPOT_MONITOR_NOTIFYALL_ENABLED() 0
-#define HOTSPOT_MONITOR_WAIT(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_MONITOR_WAIT_ENABLED() 0
-#define HOTSPOT_MONITOR_WAIT_PROBE(arg0, arg1, arg2, arg3)
-#define HOTSPOT_MONITOR_WAIT_PROBE_ENABLED() 0
-#define HOTSPOT_MONITOR_WAITED(arg0, arg1, arg2, arg3)
-#define HOTSPOT_MONITOR_WAITED_ENABLED() 0
-#define HOTSPOT_OBJECT_ALLOC(arg0, arg1, arg2, arg3)
-#define HOTSPOT_OBJECT_ALLOC_ENABLED() 0
-#define HOTSPOT_THREAD_START(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_THREAD_START_ENABLED() 0
-#define HOTSPOT_THREAD_STOP(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_THREAD_STOP_ENABLED() 0
-#define HOTSPOT_THREAD_SLEEP_BEGIN(arg0)
-#define HOTSPOT_THREAD_SLEEP_BEGIN_ENABLED() 0
-#define HOTSPOT_THREAD_SLEEP_END(arg0)
-#define HOTSPOT_THREAD_SLEEP_END_ENABLED() 0
-#define HOTSPOT_THREAD_YIELD()
-#define HOTSPOT_THREAD_YIELD_ENABLED() 0
-#define HOTSPOT_THREAD_PARK_BEGIN(arg0, arg1, arg2)
-#define HOTSPOT_THREAD_PARK_BEGIN_ENABLED() 0
-#define HOTSPOT_THREAD_PARK_END(arg0)
-#define HOTSPOT_THREAD_PARK_END_ENABLED() 0
-#define HOTSPOT_THREAD_UNPARK()
-#define HOTSPOT_THREAD_UNPARK_ENABLED() 0
-#define HOTSPOT_VM_INIT_BEGIN()
-#define HOTSPOT_VM_INIT_BEGIN_ENABLED() 0
-#define HOTSPOT_VM_INIT_END()
-#define HOTSPOT_VM_INIT_END_ENABLED() 0
-#define HOTSPOT_VM_SHUTDOWN()
-#define HOTSPOT_VM_SHUTDOWN_ENABLED() 0
-#define HOTSPOT_VMOPS_REQUEST(arg0, arg1, arg2)
-#define HOTSPOT_VMOPS_REQUEST_ENABLED() 0
-#define HOTSPOT_VMOPS_BEGIN(arg0, arg1, arg2)
-#define HOTSPOT_VMOPS_BEGIN_ENABLED() 0
-#define HOTSPOT_VMOPS_END(arg0, arg1, arg2)
-#define HOTSPOT_VMOPS_END_ENABLED() 0
-
-/* hs_private provider probes */
-#define HS_PRIVATE_CMS_INITMARK_BEGIN()
-#define HS_PRIVATE_CMS_INITMARK_BEGIN_ENABLED() 0
-#define HS_PRIVATE_CMS_INITMARK_END()
-#define HS_PRIVATE_CMS_INITMARK_END_ENABLED() 0
-#define HS_PRIVATE_CMS_REMARK_BEGIN()
-#define HS_PRIVATE_CMS_REMARK_BEGIN_ENABLED() 0
-#define HS_PRIVATE_CMS_REMARK_END()
-#define HS_PRIVATE_CMS_REMARK_END_ENABLED() 0
-#define HS_PRIVATE_HASHTABLE_NEW_ENTRY(arg0, arg1, arg2, arg3)
-#define HS_PRIVATE_HASHTABLE_NEW_ENTRY_ENABLED() 0
-#define HS_PRIVATE_SAFEPOINT_BEGIN()
-#define HS_PRIVATE_SAFEPOINT_BEGIN_ENABLED() 0
-#define HS_PRIVATE_SAFEPOINT_END()
-#define HS_PRIVATE_SAFEPOINT_END_ENABLED() 0
-
-/* hotspot_jni provider probes */
-#define HOTSPOT_JNI_ALLOCOBJECT_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_ALLOCOBJECT_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_ALLOCOBJECT_RETURN(arg0)
-#define HOTSPOT_JNI_ALLOCOBJECT_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_ATTACHCURRENTTHREAD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_ATTACHCURRENTTHREAD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN(arg0)
-#define HOTSPOT_JNI_ATTACHCURRENTTHREAD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN(arg0)
-#define HOTSPOT_JNI_ATTACHCURRENTTHREADASDAEMON_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLBOOLEANMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLBOOLEANMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLBOOLEANMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLBOOLEANMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLBOOLEANMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLBOOLEANMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLBOOLEANMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLBOOLEANMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLBOOLEANMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLBOOLEANMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLBOOLEANMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLBOOLEANMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLBYTEMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLBYTEMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLBYTEMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLBYTEMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLBYTEMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLBYTEMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLBYTEMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLBYTEMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLBYTEMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLBYTEMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLBYTEMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLBYTEMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLCHARMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLCHARMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLCHARMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLCHARMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLCHARMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLCHARMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLCHARMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLCHARMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLCHARMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLCHARMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLCHARMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLCHARMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLDOUBLEMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLDOUBLEMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLDOUBLEMETHOD_RETURN()
-#define HOTSPOT_JNI_CALLDOUBLEMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLDOUBLEMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLDOUBLEMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLDOUBLEMETHODA_RETURN()
-#define HOTSPOT_JNI_CALLDOUBLEMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLDOUBLEMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLDOUBLEMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLDOUBLEMETHODV_RETURN()
-#define HOTSPOT_JNI_CALLDOUBLEMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLFLOATMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLFLOATMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLFLOATMETHOD_RETURN()
-#define HOTSPOT_JNI_CALLFLOATMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLFLOATMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLFLOATMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLFLOATMETHODA_RETURN()
-#define HOTSPOT_JNI_CALLFLOATMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLFLOATMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLFLOATMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLFLOATMETHODV_RETURN()
-#define HOTSPOT_JNI_CALLFLOATMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLINTMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLINTMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLINTMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLINTMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLINTMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLINTMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLINTMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLINTMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLINTMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLINTMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLINTMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLINTMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLLONGMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLLONGMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLLONGMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLLONGMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLLONGMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLLONGMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLLONGMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLLONGMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLLONGMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLLONGMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLLONGMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLLONGMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODA_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODV_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALBOOLEANMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODA_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODV_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALBYTEMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODA_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODV_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALCHARMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHOD_RETURN()
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODA_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODA_RETURN()
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODV_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODV_RETURN()
-#define HOTSPOT_JNI_CALLNONVIRTUALDOUBLEMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHOD_RETURN()
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODA_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODA_RETURN()
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODV_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODV_RETURN()
-#define HOTSPOT_JNI_CALLNONVIRTUALFLOATMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODA_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODV_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALINTMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODA_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODV_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALLONGMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODA_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODV_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALOBJECTMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODA_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODV_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLNONVIRTUALSHORTMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_RETURN()
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODA_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODA_RETURN()
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODV_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODV_RETURN()
-#define HOTSPOT_JNI_CALLNONVIRTUALVOIDMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLOBJECTMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLOBJECTMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLOBJECTMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLOBJECTMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLOBJECTMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLOBJECTMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLOBJECTMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLOBJECTMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLOBJECTMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLOBJECTMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLOBJECTMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLOBJECTMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSHORTMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSHORTMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSHORTMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSHORTMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSHORTMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSHORTMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSHORTMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSHORTMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSHORTMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSHORTMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSHORTMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSHORTMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICBOOLEANMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICBYTEMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICCHARMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICCHARMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICCHARMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICCHARMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICCHARMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICCHARMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICCHARMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICCHARMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICCHARMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICCHARMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICCHARMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICCHARMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHOD_RETURN()
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODA_RETURN()
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODV_RETURN()
-#define HOTSPOT_JNI_CALLSTATICDOUBLEMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHOD_RETURN()
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHODA_RETURN()
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHODV_RETURN()
-#define HOTSPOT_JNI_CALLSTATICFLOATMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICINTMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICINTMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICINTMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICINTMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICINTMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICINTMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICINTMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICINTMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICINTMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICINTMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICINTMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICINTMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICLONGMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICLONGMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICLONGMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICLONGMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICLONGMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICLONGMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICLONGMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICLONGMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICLONGMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICLONGMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICLONGMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICLONGMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICOBJECTMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHODA_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHODV_RETURN(arg0)
-#define HOTSPOT_JNI_CALLSTATICSHORTMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHOD_RETURN()
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHODA_RETURN()
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHODV_RETURN()
-#define HOTSPOT_JNI_CALLSTATICVOIDMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLVOIDMETHOD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLVOIDMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLVOIDMETHOD_RETURN()
-#define HOTSPOT_JNI_CALLVOIDMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLVOIDMETHODA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLVOIDMETHODA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLVOIDMETHODA_RETURN()
-#define HOTSPOT_JNI_CALLVOIDMETHODA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CALLVOIDMETHODV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CALLVOIDMETHODV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CALLVOIDMETHODV_RETURN()
-#define HOTSPOT_JNI_CALLVOIDMETHODV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_CREATEJAVAVM_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_CREATEJAVAVM_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_CREATEJAVAVM_RETURN(arg0)
-#define HOTSPOT_JNI_CREATEJAVAVM_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_DEFINECLASS_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_DEFINECLASS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_DEFINECLASS_RETURN(arg0)
-#define HOTSPOT_JNI_DEFINECLASS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_DELETEGLOBALREF_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_DELETEGLOBALREF_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_DELETEGLOBALREF_RETURN()
-#define HOTSPOT_JNI_DELETEGLOBALREF_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_DELETELOCALREF_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_DELETELOCALREF_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_DELETELOCALREF_RETURN()
-#define HOTSPOT_JNI_DELETELOCALREF_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_DELETEWEAKGLOBALREF_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_DELETEWEAKGLOBALREF_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_DELETEWEAKGLOBALREF_RETURN()
-#define HOTSPOT_JNI_DELETEWEAKGLOBALREF_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_DESTROYJAVAVM_ENTRY(arg0)
-#define HOTSPOT_JNI_DESTROYJAVAVM_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_DESTROYJAVAVM_RETURN(arg0)
-#define HOTSPOT_JNI_DESTROYJAVAVM_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY(arg0)
-#define HOTSPOT_JNI_DETACHCURRENTTHREAD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN(arg0)
-#define HOTSPOT_JNI_DETACHCURRENTTHREAD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_ENSURELOCALCAPACITY_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_ENSURELOCALCAPACITY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_ENSURELOCALCAPACITY_RETURN(arg0)
-#define HOTSPOT_JNI_ENSURELOCALCAPACITY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_EXCEPTIONCHECK_ENTRY(arg0)
-#define HOTSPOT_JNI_EXCEPTIONCHECK_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_EXCEPTIONCHECK_RETURN(arg0)
-#define HOTSPOT_JNI_EXCEPTIONCHECK_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_EXCEPTIONCLEAR_ENTRY(arg0)
-#define HOTSPOT_JNI_EXCEPTIONCLEAR_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_EXCEPTIONCLEAR_RETURN()
-#define HOTSPOT_JNI_EXCEPTIONCLEAR_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_EXCEPTIONDESCRIBE_ENTRY(arg0)
-#define HOTSPOT_JNI_EXCEPTIONDESCRIBE_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_EXCEPTIONDESCRIBE_RETURN()
-#define HOTSPOT_JNI_EXCEPTIONDESCRIBE_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_EXCEPTIONOCCURRED_ENTRY(arg0)
-#define HOTSPOT_JNI_EXCEPTIONOCCURRED_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_EXCEPTIONOCCURRED_RETURN(arg0)
-#define HOTSPOT_JNI_EXCEPTIONOCCURRED_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_FATALERROR_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_FATALERROR_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_FINDCLASS_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_FINDCLASS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_FINDCLASS_RETURN(arg0)
-#define HOTSPOT_JNI_FINDCLASS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_FROMREFLECTEDFIELD_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_FROMREFLECTEDFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_FROMREFLECTEDFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_FROMREFLECTEDFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_FROMREFLECTEDMETHOD_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_FROMREFLECTEDMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_FROMREFLECTEDMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_FROMREFLECTEDMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETARRAYLENGTH_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_GETARRAYLENGTH_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETARRAYLENGTH_RETURN(arg0)
-#define HOTSPOT_JNI_GETARRAYLENGTH_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETBOOLEANARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETBOOLEANARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETBOOLEANARRAYELEMENTS_RETURN(arg0)
-#define HOTSPOT_JNI_GETBOOLEANARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETBOOLEANARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_GETBOOLEANARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETBOOLEANARRAYREGION_RETURN()
-#define HOTSPOT_JNI_GETBOOLEANARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETBOOLEANFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETBOOLEANFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETBOOLEANFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETBOOLEANFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETBYTEARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETBYTEARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETBYTEARRAYELEMENTS_RETURN(arg0)
-#define HOTSPOT_JNI_GETBYTEARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETBYTEARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_GETBYTEARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETBYTEARRAYREGION_RETURN()
-#define HOTSPOT_JNI_GETBYTEARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETBYTEFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETBYTEFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETBYTEFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETBYTEFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETCHARARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETCHARARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETCHARARRAYELEMENTS_RETURN(arg0)
-#define HOTSPOT_JNI_GETCHARARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETCHARARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_GETCHARARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETCHARARRAYREGION_RETURN()
-#define HOTSPOT_JNI_GETCHARARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETCHARFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETCHARFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETCHARFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETCHARFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETCREATEDJAVAVMS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETCREATEDJAVAVMS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETCREATEDJAVAVMS_RETURN(arg0)
-#define HOTSPOT_JNI_GETCREATEDJAVAVMS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_ENTRY(arg0)
-#define HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_RETURN(arg0)
-#define HOTSPOT_JNI_GETDEFAULTJAVAVMINITARGS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_RETURN(arg0)
-#define HOTSPOT_JNI_GETDIRECTBUFFERADDRESS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_RETURN(arg0)
-#define HOTSPOT_JNI_GETDIRECTBUFFERCAPACITY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETDOUBLEARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETDOUBLEARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETDOUBLEARRAYELEMENTS_RETURN(arg0)
-#define HOTSPOT_JNI_GETDOUBLEARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETDOUBLEARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_GETDOUBLEARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETDOUBLEARRAYREGION_RETURN()
-#define HOTSPOT_JNI_GETDOUBLEARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETDOUBLEFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETDOUBLEFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETDOUBLEFIELD_RETURN()
-#define HOTSPOT_JNI_GETDOUBLEFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETENV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETENV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETENV_RETURN(arg0)
-#define HOTSPOT_JNI_GETENV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETFIELDID_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_GETFIELDID_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETFIELDID_RETURN(arg0)
-#define HOTSPOT_JNI_GETFIELDID_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETFLOATARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETFLOATARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETFLOATARRAYELEMENTS_RETURN(arg0)
-#define HOTSPOT_JNI_GETFLOATARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETFLOATARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_GETFLOATARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETFLOATARRAYREGION_RETURN()
-#define HOTSPOT_JNI_GETFLOATARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETFLOATFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETFLOATFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETFLOATFIELD_RETURN()
-#define HOTSPOT_JNI_GETFLOATFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETINTARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETINTARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETINTARRAYELEMENTS_RETURN(arg0)
-#define HOTSPOT_JNI_GETINTARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETINTARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_GETINTARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETINTARRAYREGION_RETURN()
-#define HOTSPOT_JNI_GETINTARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETINTFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETINTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETINTFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETINTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETJAVAVM_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_GETJAVAVM_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETJAVAVM_RETURN(arg0)
-#define HOTSPOT_JNI_GETJAVAVM_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETLONGARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETLONGARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETLONGARRAYELEMENTS_RETURN(arg0)
-#define HOTSPOT_JNI_GETLONGARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETLONGARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_GETLONGARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETLONGARRAYREGION_RETURN()
-#define HOTSPOT_JNI_GETLONGARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETLONGFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETLONGFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETLONGFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETLONGFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETMETHODID_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_GETMETHODID_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETMETHODID_RETURN(arg0)
-#define HOTSPOT_JNI_GETMETHODID_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_RETURN(arg0)
-#define HOTSPOT_JNI_GETOBJECTARRAYELEMENT_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETOBJECTCLASS_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_GETOBJECTCLASS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETOBJECTCLASS_RETURN(arg0)
-#define HOTSPOT_JNI_GETOBJECTCLASS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETOBJECTFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETOBJECTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETOBJECTFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETOBJECTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN(arg0)
-#define HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN(arg0)
-#define HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSHORTARRAYELEMENTS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSHORTARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSHORTARRAYELEMENTS_RETURN(arg0)
-#define HOTSPOT_JNI_GETSHORTARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSHORTARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_GETSHORTARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSHORTARRAYREGION_RETURN()
-#define HOTSPOT_JNI_GETSHORTARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSHORTFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSHORTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSHORTFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETSHORTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICBOOLEANFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTATICBOOLEANFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICBOOLEANFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTATICBOOLEANFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICBYTEFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTATICBYTEFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICBYTEFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTATICBYTEFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICCHARFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTATICCHARFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICCHARFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTATICCHARFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICDOUBLEFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTATICDOUBLEFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICDOUBLEFIELD_RETURN()
-#define HOTSPOT_JNI_GETSTATICDOUBLEFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICFIELDID_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_GETSTATICFIELDID_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICFIELDID_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTATICFIELDID_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICFLOATFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTATICFLOATFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICFLOATFIELD_RETURN()
-#define HOTSPOT_JNI_GETSTATICFLOATFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICINTFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTATICINTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICINTFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTATICINTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICLONGFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTATICLONGFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICLONGFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTATICLONGFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICMETHODID_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_GETSTATICMETHODID_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICMETHODID_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTATICMETHODID_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTATICOBJECTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICOBJECTFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTATICOBJECTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICSHORTFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTATICSHORTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTATICSHORTFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTATICSHORTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTRINGCHARS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGCHARS_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTRINGCHARS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTRINGCRITICAL_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGLENGTH_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTRINGLENGTH_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_GETSTRINGREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGREGION_RETURN()
-#define HOTSPOT_JNI_GETSTRINGREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGUTFCHARS_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTRINGUTFCHARS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGUTFLENGTH_RETURN(arg0)
-#define HOTSPOT_JNI_GETSTRINGUTFLENGTH_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGUTFREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_GETSTRINGUTFREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSTRINGUTFREGION_RETURN()
-#define HOTSPOT_JNI_GETSTRINGUTFREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETSUPERCLASS_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_GETSUPERCLASS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETSUPERCLASS_RETURN(arg0)
-#define HOTSPOT_JNI_GETSUPERCLASS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_GETVERSION_ENTRY(arg0)
-#define HOTSPOT_JNI_GETVERSION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_GETVERSION_RETURN(arg0)
-#define HOTSPOT_JNI_GETVERSION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_ISASSIGNABLEFROM_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_ISASSIGNABLEFROM_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN(arg0)
-#define HOTSPOT_JNI_ISASSIGNABLEFROM_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_ISINSTANCEOF_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_ISINSTANCEOF_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_ISINSTANCEOF_RETURN(arg0)
-#define HOTSPOT_JNI_ISINSTANCEOF_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_ISSAMEOBJECT_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_ISSAMEOBJECT_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_ISSAMEOBJECT_RETURN(arg0)
-#define HOTSPOT_JNI_ISSAMEOBJECT_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_MONITORENTER_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_MONITORENTER_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_MONITORENTER_RETURN(arg0)
-#define HOTSPOT_JNI_MONITORENTER_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_MONITOREXIT_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_MONITOREXIT_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_MONITOREXIT_RETURN(arg0)
-#define HOTSPOT_JNI_MONITOREXIT_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWBOOLEANARRAY_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWBOOLEANARRAY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWBOOLEANARRAY_RETURN(arg0)
-#define HOTSPOT_JNI_NEWBOOLEANARRAY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWBYTEARRAY_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWBYTEARRAY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWBYTEARRAY_RETURN(arg0)
-#define HOTSPOT_JNI_NEWBYTEARRAY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWCHARARRAY_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWCHARARRAY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWCHARARRAY_RETURN(arg0)
-#define HOTSPOT_JNI_NEWCHARARRAY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(arg0)
-#define HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWDOUBLEARRAY_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWDOUBLEARRAY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWDOUBLEARRAY_RETURN(arg0)
-#define HOTSPOT_JNI_NEWDOUBLEARRAY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWFLOATARRAY_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWFLOATARRAY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWFLOATARRAY_RETURN(arg0)
-#define HOTSPOT_JNI_NEWFLOATARRAY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWGLOBALREF_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWGLOBALREF_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWGLOBALREF_RETURN(arg0)
-#define HOTSPOT_JNI_NEWGLOBALREF_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWINTARRAY_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWINTARRAY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWINTARRAY_RETURN(arg0)
-#define HOTSPOT_JNI_NEWINTARRAY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWLOCALREF_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWLOCALREF_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWLOCALREF_RETURN(arg0)
-#define HOTSPOT_JNI_NEWLOCALREF_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWLONGARRAY_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWLONGARRAY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWLONGARRAY_RETURN(arg0)
-#define HOTSPOT_JNI_NEWLONGARRAY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWOBJECT_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_NEWOBJECT_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWOBJECT_RETURN(arg0)
-#define HOTSPOT_JNI_NEWOBJECT_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWOBJECTA_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_NEWOBJECTA_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWOBJECTA_RETURN(arg0)
-#define HOTSPOT_JNI_NEWOBJECTA_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWOBJECTARRAY_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_NEWOBJECTARRAY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWOBJECTARRAY_RETURN(arg0)
-#define HOTSPOT_JNI_NEWOBJECTARRAY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWOBJECTV_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_NEWOBJECTV_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWOBJECTV_RETURN(arg0)
-#define HOTSPOT_JNI_NEWOBJECTV_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWSHORTARRAY_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWSHORTARRAY_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWSHORTARRAY_RETURN(arg0)
-#define HOTSPOT_JNI_NEWSHORTARRAY_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWSTRING_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_NEWSTRING_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWSTRING_RETURN(arg0)
-#define HOTSPOT_JNI_NEWSTRING_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWSTRINGUTF_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWSTRINGUTF_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWSTRINGUTF_RETURN(arg0)
-#define HOTSPOT_JNI_NEWSTRINGUTF_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_NEWWEAKGLOBALREF_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN(arg0)
-#define HOTSPOT_JNI_NEWWEAKGLOBALREF_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_POPLOCALFRAME_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_POPLOCALFRAME_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_POPLOCALFRAME_RETURN(arg0)
-#define HOTSPOT_JNI_POPLOCALFRAME_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_PUSHLOCALFRAME_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_PUSHLOCALFRAME_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_PUSHLOCALFRAME_RETURN(arg0)
-#define HOTSPOT_JNI_PUSHLOCALFRAME_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_REGISTERNATIVES_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_REGISTERNATIVES_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_REGISTERNATIVES_RETURN(arg0)
-#define HOTSPOT_JNI_REGISTERNATIVES_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEBOOLEANARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_RELEASEBOOLEANARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEBOOLEANARRAYELEMENTS_RETURN()
-#define HOTSPOT_JNI_RELEASEBOOLEANARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEBYTEARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_RELEASEBYTEARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEBYTEARRAYELEMENTS_RETURN()
-#define HOTSPOT_JNI_RELEASEBYTEARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASECHARARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_RELEASECHARARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASECHARARRAYELEMENTS_RETURN()
-#define HOTSPOT_JNI_RELEASECHARARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEDOUBLEARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_RELEASEDOUBLEARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEDOUBLEARRAYELEMENTS_RETURN()
-#define HOTSPOT_JNI_RELEASEDOUBLEARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEFLOATARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_RELEASEFLOATARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEFLOATARRAYELEMENTS_RETURN()
-#define HOTSPOT_JNI_RELEASEFLOATARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEINTARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_RELEASEINTARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEINTARRAYELEMENTS_RETURN()
-#define HOTSPOT_JNI_RELEASEINTARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASELONGARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_RELEASELONGARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASELONGARRAYELEMENTS_RETURN()
-#define HOTSPOT_JNI_RELEASELONGARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN()
-#define HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASESHORTARRAYELEMENTS_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_RELEASESHORTARRAYELEMENTS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASESHORTARRAYELEMENTS_RETURN()
-#define HOTSPOT_JNI_RELEASESHORTARRAYELEMENTS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASESTRINGCHARS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_RELEASESTRINGCHARS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASESTRINGCHARS_RETURN()
-#define HOTSPOT_JNI_RELEASESTRINGCHARS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_RELEASESTRINGCRITICAL_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN()
-#define HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_RELEASESTRINGUTFCHARS_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_RELEASESTRINGUTFCHARS_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_RELEASESTRINGUTFCHARS_RETURN()
-#define HOTSPOT_JNI_RELEASESTRINGUTFCHARS_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETBOOLEANARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_SETBOOLEANARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETBOOLEANARRAYREGION_RETURN()
-#define HOTSPOT_JNI_SETBOOLEANARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETBOOLEANFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETBOOLEANFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETBOOLEANFIELD_RETURN()
-#define HOTSPOT_JNI_SETBOOLEANFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETBYTEARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_SETBYTEARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETBYTEARRAYREGION_RETURN()
-#define HOTSPOT_JNI_SETBYTEARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETBYTEFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETBYTEFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETBYTEFIELD_RETURN()
-#define HOTSPOT_JNI_SETBYTEFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETCHARARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_SETCHARARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETCHARARRAYREGION_RETURN()
-#define HOTSPOT_JNI_SETCHARARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETCHARFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETCHARFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETCHARFIELD_RETURN()
-#define HOTSPOT_JNI_SETCHARFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETDOUBLEARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_SETDOUBLEARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETDOUBLEARRAYREGION_RETURN()
-#define HOTSPOT_JNI_SETDOUBLEARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETDOUBLEFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_SETDOUBLEFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETDOUBLEFIELD_RETURN()
-#define HOTSPOT_JNI_SETDOUBLEFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETFLOATARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_SETFLOATARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETFLOATARRAYREGION_RETURN()
-#define HOTSPOT_JNI_SETFLOATARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETFLOATFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_SETFLOATFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETFLOATFIELD_RETURN()
-#define HOTSPOT_JNI_SETFLOATFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETINTARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_SETINTARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETINTARRAYREGION_RETURN()
-#define HOTSPOT_JNI_SETINTARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETINTFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETINTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETINTFIELD_RETURN()
-#define HOTSPOT_JNI_SETINTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETLONGARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_SETLONGARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETLONGARRAYREGION_RETURN()
-#define HOTSPOT_JNI_SETLONGARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETLONGFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETLONGFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETLONGFIELD_RETURN()
-#define HOTSPOT_JNI_SETLONGFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETOBJECTARRAYELEMENT_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETOBJECTARRAYELEMENT_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETOBJECTARRAYELEMENT_RETURN()
-#define HOTSPOT_JNI_SETOBJECTARRAYELEMENT_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETOBJECTFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETOBJECTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETOBJECTFIELD_RETURN()
-#define HOTSPOT_JNI_SETOBJECTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSHORTARRAYREGION_ENTRY(arg0, arg1, arg2, arg3, arg4)
-#define HOTSPOT_JNI_SETSHORTARRAYREGION_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSHORTARRAYREGION_RETURN()
-#define HOTSPOT_JNI_SETSHORTARRAYREGION_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSHORTFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETSHORTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSHORTFIELD_RETURN()
-#define HOTSPOT_JNI_SETSHORTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICBOOLEANFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETSTATICBOOLEANFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICBOOLEANFIELD_RETURN()
-#define HOTSPOT_JNI_SETSTATICBOOLEANFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICBYTEFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETSTATICBYTEFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICBYTEFIELD_RETURN()
-#define HOTSPOT_JNI_SETSTATICBYTEFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICCHARFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETSTATICCHARFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICCHARFIELD_RETURN()
-#define HOTSPOT_JNI_SETSTATICCHARFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICDOUBLEFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_SETSTATICDOUBLEFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICDOUBLEFIELD_RETURN()
-#define HOTSPOT_JNI_SETSTATICDOUBLEFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICFLOATFIELD_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_SETSTATICFLOATFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICFLOATFIELD_RETURN()
-#define HOTSPOT_JNI_SETSTATICFLOATFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICINTFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETSTATICINTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICINTFIELD_RETURN()
-#define HOTSPOT_JNI_SETSTATICINTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICLONGFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETSTATICLONGFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICLONGFIELD_RETURN()
-#define HOTSPOT_JNI_SETSTATICLONGFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICOBJECTFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETSTATICOBJECTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN()
-#define HOTSPOT_JNI_SETSTATICOBJECTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICSHORTFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_SETSTATICSHORTFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_SETSTATICSHORTFIELD_RETURN()
-#define HOTSPOT_JNI_SETSTATICSHORTFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_THROW_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_THROW_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_THROW_RETURN(arg0)
-#define HOTSPOT_JNI_THROW_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_THROWNEW_ENTRY(arg0, arg1, arg2)
-#define HOTSPOT_JNI_THROWNEW_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_THROWNEW_RETURN(arg0)
-#define HOTSPOT_JNI_THROWNEW_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_TOREFLECTEDFIELD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_TOREFLECTEDFIELD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_TOREFLECTEDFIELD_RETURN(arg0)
-#define HOTSPOT_JNI_TOREFLECTEDFIELD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_TOREFLECTEDMETHOD_ENTRY(arg0, arg1, arg2, arg3)
-#define HOTSPOT_JNI_TOREFLECTEDMETHOD_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_TOREFLECTEDMETHOD_RETURN(arg0)
-#define HOTSPOT_JNI_TOREFLECTEDMETHOD_RETURN_ENABLED()  0
-#define HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY(arg0, arg1)
-#define HOTSPOT_JNI_UNREGISTERNATIVES_ENTRY_ENABLED()  0
-#define HOTSPOT_JNI_UNREGISTERNATIVES_RETURN(arg0)
-#define HOTSPOT_JNI_UNREGISTERNATIVES_RETURN_ENABLED()  0
-
-#else /* USDT2 */
-#error This file should only be included for USDT2
-#endif /* USDT2 */
-
-#else /* !defined(DTRACE_ENABLED) */
-#error This file should only be included when dtrace is not enabled
-#end /* !defined(DTRACE_ENABLED) */
-
-#endif // SHARE_VM_UTILITIES_DTRACE_USDT2_DISABLED_HPP
--- a/src/share/vm/utilities/elfFile.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/elfFile.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
 #include "memory/allocation.inline.hpp"
 #include "utilities/decoder.hpp"
 #include "utilities/elfFile.hpp"
+#include "utilities/elfFuncDescTable.hpp"
 #include "utilities/elfStringTable.hpp"
 #include "utilities/elfSymbolTable.hpp"
 
@@ -43,6 +44,7 @@
   memset(&m_elfHdr, 0, sizeof(m_elfHdr));
   m_string_tables = NULL;
   m_symbol_tables = NULL;
+  m_funcDesc_table = NULL;
   m_next = NULL;
   m_status = NullDecoder::no_error;
 
@@ -119,8 +121,8 @@
         m_status = NullDecoder::file_invalid;
         return false;
       }
-      // string table
       if (shdr.sh_type == SHT_STRTAB) {
+        // string tables
         ElfStringTable* table = new (std::nothrow) ElfStringTable(m_file, shdr, index);
         if (table == NULL) {
           m_status = NullDecoder::out_of_memory;
@@ -128,6 +130,7 @@
         }
         add_string_table(table);
       } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) {
+        // symbol tables
         ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(m_file, shdr);
         if (table == NULL) {
           m_status = NullDecoder::out_of_memory;
@@ -136,6 +139,46 @@
         add_symbol_table(table);
       }
     }
+
+#if defined(PPC64)
+    // Now read the .opd section wich contains the PPC64 function descriptor table.
+    // The .opd section is only available on PPC64 (see for example:
+    // http://refspecs.linuxfoundation.org/LSB_3.1.1/LSB-Core-PPC64/LSB-Core-PPC64/specialsections.html)
+    // so this code should do no harm on other platforms but because of performance reasons we only
+    // execute it on PPC64 platforms.
+    // Notice that we can only find the .opd section after we have successfully read in the string
+    // tables in the previous loop, because we need to query the name of each section which is
+    // contained in one of the string tables (i.e. the one with the index m_elfHdr.e_shstrndx).
+
+    // Reset the file pointer
+    if (fseek(m_file, m_elfHdr.e_shoff, SEEK_SET)) {
+      m_status = NullDecoder::file_invalid;
+      return false;
+    }
+    for (int index = 0; index < m_elfHdr.e_shnum; index ++) {
+      if (fread((void*)&shdr, sizeof(Elf_Shdr), 1, m_file) != 1) {
+        m_status = NullDecoder::file_invalid;
+        return false;
+      }
+      if (m_elfHdr.e_shstrndx != SHN_UNDEF && shdr.sh_type == SHT_PROGBITS) {
+        ElfStringTable* string_table = get_string_table(m_elfHdr.e_shstrndx);
+        if (string_table == NULL) {
+          m_status = NullDecoder::file_invalid;
+          return false;
+        }
+        char buf[8]; // '8' is enough because we only want to read ".opd"
+        if (string_table->string_at(shdr.sh_name, buf, sizeof(buf)) && !strncmp(".opd", buf, 4)) {
+          m_funcDesc_table = new (std::nothrow) ElfFuncDescTable(m_file, shdr, index);
+          if (m_funcDesc_table == NULL) {
+            m_status = NullDecoder::out_of_memory;
+            return false;
+          }
+          break;
+        }
+      }
+    }
+#endif
+
   }
   return true;
 }
@@ -151,8 +194,9 @@
   int off = INT_MAX;
   bool found_symbol = false;
   while (symbol_table != NULL) {
-    if (symbol_table->lookup(addr, &string_table_index, &pos_in_string_table, &off)) {
+    if (symbol_table->lookup(addr, &string_table_index, &pos_in_string_table, &off, m_funcDesc_table)) {
       found_symbol = true;
+      break;
     }
     symbol_table = symbol_table->m_next;
   }
@@ -221,4 +265,4 @@
 }
 #endif
 
-#endif // _WINDOWS
+#endif // !_WINDOWS && !__APPLE__
--- a/src/share/vm/utilities/elfFile.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/elfFile.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,6 +75,7 @@
 
 class ElfStringTable;
 class ElfSymbolTable;
+class ElfFuncDescTable;
 
 
 // On Solaris/Linux platforms, libjvm.so does contain all private symbols.
@@ -150,9 +151,12 @@
   // string tables
   ElfStringTable*              m_string_tables;
 
+  // function descriptors table
+  ElfFuncDescTable*            m_funcDesc_table;
+
   NullDecoder::decoder_status  m_status;
 };
 
-#endif // _WINDOWS
+#endif // !_WINDOWS && !__APPLE__
 
 #endif // SHARE_VM_UTILITIES_ELF_FILE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/elfFuncDescTable.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#if !defined(_WINDOWS) && !defined(__APPLE__)
+
+#include "memory/allocation.inline.hpp"
+#include "utilities/elfFuncDescTable.hpp"
+
+ElfFuncDescTable::ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index) {
+  assert(file, "null file handle");
+  // The actual function address (i.e. function entry point) is always the
+  // first value in the function descriptor (on IA64 and PPC64 they look as follows):
+  // PPC64: [function entry point, TOC pointer, environment pointer]
+  // IA64 : [function entry point, GP (global pointer) value]
+  // Unfortunately 'shdr.sh_entsize' doesn't always seem to contain this size (it's zero on PPC64) so we can't assert
+  // assert(IA64_ONLY(2) PPC64_ONLY(3) * sizeof(address) == shdr.sh_entsize, "Size mismatch for '.opd' section entries");
+
+  m_funcDescs = NULL;
+  m_file = file;
+  m_index = index;
+  m_status = NullDecoder::no_error;
+
+  // try to load the function descriptor table
+  long cur_offset = ftell(file);
+  if (cur_offset != -1) {
+    // call malloc so we can back up if memory allocation fails.
+    m_funcDescs = (address*)os::malloc(shdr.sh_size, mtInternal);
+    if (m_funcDescs) {
+      if (fseek(file, shdr.sh_offset, SEEK_SET) ||
+          fread((void*)m_funcDescs, shdr.sh_size, 1, file) != 1 ||
+          fseek(file, cur_offset, SEEK_SET)) {
+        m_status = NullDecoder::file_invalid;
+        os::free(m_funcDescs);
+        m_funcDescs = NULL;
+      }
+    }
+    if (!NullDecoder::is_error(m_status)) {
+      memcpy(&m_shdr, &shdr, sizeof(Elf_Shdr));
+    }
+  } else {
+    m_status = NullDecoder::file_invalid;
+  }
+}
+
+ElfFuncDescTable::~ElfFuncDescTable() {
+  if (m_funcDescs != NULL) {
+    os::free(m_funcDescs);
+  }
+}
+
+address ElfFuncDescTable::lookup(Elf_Word index) {
+  if (NullDecoder::is_error(m_status)) {
+    return NULL;
+  }
+
+  if (m_funcDescs != NULL) {
+    if (m_shdr.sh_size > 0 && m_shdr.sh_addr <= index && index <= m_shdr.sh_addr + m_shdr.sh_size) {
+      // Notice that 'index' is a byte-offset into the function descriptor table.
+      return m_funcDescs[(index - m_shdr.sh_addr) / sizeof(address)];
+    }
+    return NULL;
+  } else {
+    long cur_pos;
+    address addr;
+    if (!(m_shdr.sh_size > 0 && m_shdr.sh_addr <= index && index <= m_shdr.sh_addr + m_shdr.sh_size)) {
+      // don't put the whole decoder in error mode if we just tried a wrong index
+      return NULL;
+    }
+    if ((cur_pos = ftell(m_file)) == -1 ||
+        fseek(m_file, m_shdr.sh_offset + index - m_shdr.sh_addr, SEEK_SET) ||
+        fread(&addr, sizeof(addr), 1, m_file) != 1 ||
+        fseek(m_file, cur_pos, SEEK_SET)) {
+      m_status = NullDecoder::file_invalid;
+      return NULL;
+    }
+    return addr;
+  }
+}
+
+#endif // !_WINDOWS && !__APPLE__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/elfFuncDescTable.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_ELF_FUNC_DESC_TABLE_HPP
+#define SHARE_VM_UTILITIES_ELF_FUNC_DESC_TABLE_HPP
+
+#if !defined(_WINDOWS) && !defined(__APPLE__)
+
+
+#include "memory/allocation.hpp"
+#include "utilities/decoder.hpp"
+#include "utilities/elfFile.hpp"
+
+/*
+
+On PowerPC-64 (and other architectures like for example IA64) a pointer to a
+function is not just a plain code address, but instead a pointer to a so called
+function descriptor (which is simply a structure containing 3 pointers).
+This fact is also reflected in the ELF ABI for PowerPC-64.
+
+On architectures like x86 or SPARC, the ELF symbol table contains the start
+address and size of an object. So for example for a function object (i.e. type
+'STT_FUNC') the symbol table's 'st_value' and 'st_size' fields directly
+represent the starting address and size of that function. On PPC64 however, the
+symbol table's 'st_value' field only contains an index into another, PPC64
+specific '.opd' (official procedure descriptors) section, while the 'st_size'
+field still holds the size of the corresponding function. In order to get the
+actual start address of a function, it is necessary to read the corresponding
+function descriptor entry in the '.opd' section at the corresponding index and
+extract the start address from there.
+
+That's exactly what this 'ElfFuncDescTable' class is used for. If the HotSpot
+runs on a PPC64 machine, and the corresponding ELF files contains an '.opd'
+section (which is actually mandatory on PPC64) it will be read into an object
+of type 'ElfFuncDescTable' just like the string and symbol table sections.
+Later on, during symbol lookup in 'ElfSymbolTable::lookup()' this function
+descriptor table will be used if available to find the real function address.
+
+All this is how things work today (2013) on contemporary Linux distributions
+(i.e. SLES 10) and new version of GCC (i.e. > 4.0). However there is a history,
+and it goes like this:
+
+In SLES 9 times (sometimes before GCC 3.4) gcc/ld on PPC64 generated two
+entries in the symbol table for every function. The value of the symbol with
+the name of the function was the address of the function descriptor while the
+dot '.' prefixed name was reserved to hold the actual address of that function
+(http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.html#FUNC-DES).
+
+For a C-function 'foo' this resulted in two symbol table entries like this
+(extracted from the output of 'readelf -a <lib.so>'):
+
+Section Headers:
+  [ 9] .text             PROGBITS         0000000000000a20  00000a20
+       00000000000005a0  0000000000000000  AX       0     0     16
+  [21] .opd              PROGBITS         00000000000113b8  000013b8
+       0000000000000138  0000000000000000  WA       0     0     8
+
+Symbol table '.symtab' contains 86 entries:
+   Num:    Value          Size Type    Bind   Vis      Ndx Name
+    76: 00000000000114c0    24 FUNC    GLOBAL DEFAULT   21 foo
+    78: 0000000000000bb0    76 FUNC    GLOBAL DEFAULT    9 .foo
+
+You can see now that the '.foo' entry actually points into the '.text' segment
+('Ndx'=9) and its value and size fields represent the functions actual address
+and size. On the other hand, the entry for plain 'foo' points into the '.opd'
+section ('Ndx'=21) and its value and size fields are the index into the '.opd'
+section and the size of the corresponding '.opd' section entry (3 pointers on
+PPC64).
+
+These so called 'dot symbols' were dropped around gcc 3.4 from GCC and BINUTILS,
+see http://gcc.gnu.org/ml/gcc-patches/2004-08/msg00557.html.
+But nevertheless it may still be necessary to support both formats because we
+either run on an old system or because it is possible at any time that functions
+appear in the stack trace which come from old-style libraries.
+
+Therefore we not only have to check for the presence of the function descriptor
+table during symbol lookup in 'ElfSymbolTable::lookup()'. We additionally have
+to check that the symbol table entry references the '.opd' section. Only in
+that case we can resolve the actual function address from there. Otherwise we
+use the plain 'st_value' field from the symbol table as function address. This
+way we can also lookup the symbols in old-style ELF libraries (although we get
+the 'dotted' versions in that case). However, if present, the 'dot' will be
+conditionally removed on PPC64 from the symbol in 'ElfDecoder::demangle()' in
+decoder_linux.cpp.
+
+Notice that we can not reliably get the function address from old-style
+libraries because the 'st_value' field of the symbol table entries which point
+into the '.opd' section denote the size of the corresponding '.opd' entry and
+not that of the corresponding function. This has changed for the symbol table
+entries in new-style libraries as described at the beginning of this
+documentation.
+
+*/
+
+class ElfFuncDescTable: public CHeapObj<mtInternal> {
+  friend class ElfFile;
+ public:
+  ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index);
+  ~ElfFuncDescTable();
+
+  // return the function address for the function descriptor at 'index' or NULL on error
+  address lookup(Elf_Word index);
+
+  int get_index() { return m_index; };
+
+  NullDecoder::decoder_status get_status() { return m_status; };
+
+ protected:
+  // holds the complete function descriptor section if
+  // we can allocate enough memory
+  address*            m_funcDescs;
+
+  // file contains string table
+  FILE*               m_file;
+
+  // section header
+  Elf_Shdr            m_shdr;
+
+  // The section index of this function descriptor (i.e. '.opd') section in the ELF file
+  int                 m_index;
+
+  NullDecoder::decoder_status  m_status;
+};
+
+#endif // !_WINDOWS && !__APPLE__
+
+#endif // SHARE_VM_UTILITIES_ELF_FUNC_DESC_TABLE_HPP
--- a/src/share/vm/utilities/elfStringTable.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/elfStringTable.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -87,4 +87,4 @@
   }
 }
 
-#endif // _WINDOWS
+#endif // !_WINDOWS && !__APPLE__
--- a/src/share/vm/utilities/elfStringTable.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/elfStringTable.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -70,6 +70,6 @@
   NullDecoder::decoder_status  m_status;
 };
 
-#endif // _WINDOWS and _APPLE
+#endif // !_WINDOWS && !__APPLE__
 
 #endif // SHARE_VM_UTILITIES_ELF_STRING_TABLE_HPP
--- a/src/share/vm/utilities/elfSymbolTable.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/elfSymbolTable.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 #if !defined(_WINDOWS) && !defined(__APPLE__)
 
 #include "memory/allocation.inline.hpp"
+#include "utilities/elfFuncDescTable.hpp"
 #include "utilities/elfSymbolTable.hpp"
 
 ElfSymbolTable::ElfSymbolTable(FILE* file, Elf_Shdr shdr) {
@@ -68,7 +69,7 @@
   }
 }
 
-bool ElfSymbolTable::lookup(address addr, int* stringtableIndex, int* posIndex, int* offset) {
+bool ElfSymbolTable::lookup(address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable) {
   assert(stringtableIndex, "null string table index pointer");
   assert(posIndex, "null string table offset pointer");
   assert(offset, "null offset pointer");
@@ -77,19 +78,25 @@
     return false;
   }
 
-  address pc = 0;
   size_t  sym_size = sizeof(Elf_Sym);
   assert((m_shdr.sh_size % sym_size) == 0, "check size");
   int count = m_shdr.sh_size / sym_size;
   if (m_symbols != NULL) {
     for (int index = 0; index < count; index ++) {
       if (STT_FUNC == ELF_ST_TYPE(m_symbols[index].st_info)) {
-        address sym_addr = (address)m_symbols[index].st_value;
-        if (sym_addr < addr && (addr - sym_addr) < *offset) {
-          pc = (address)m_symbols[index].st_value;
-          *offset = (int)(addr - pc);
+        Elf_Word st_size = m_symbols[index].st_size;
+        address sym_addr;
+        if (funcDescTable != NULL && funcDescTable->get_index() == m_symbols[index].st_shndx) {
+          // We need to go another step trough the function descriptor table (currently PPC64 only)
+          sym_addr = funcDescTable->lookup(m_symbols[index].st_value);
+        } else {
+          sym_addr = (address)m_symbols[index].st_value;
+        }
+        if (sym_addr <= addr && (Elf_Word)(addr - sym_addr) < st_size) {
+          *offset = (int)(addr - sym_addr);
           *posIndex = m_symbols[index].st_name;
           *stringtableIndex = m_shdr.sh_link;
+          return true;
         }
       }
     }
@@ -105,12 +112,19 @@
     for (int index = 0; index < count; index ++) {
       if (fread(&sym, sym_size, 1, m_file) == 1) {
         if (STT_FUNC == ELF_ST_TYPE(sym.st_info)) {
-          address sym_addr = (address)sym.st_value;
-          if (sym_addr < addr && (addr - sym_addr) < *offset) {
-            pc = (address)sym.st_value;
-            *offset = (int)(addr - pc);
+          Elf_Word st_size = sym.st_size;
+          address sym_addr;
+          if (funcDescTable != NULL && funcDescTable->get_index() == sym.st_shndx) {
+            // We need to go another step trough the function descriptor table (currently PPC64 only)
+            sym_addr = funcDescTable->lookup(sym.st_value);
+          } else {
+            sym_addr = (address)sym.st_value;
+          }
+          if (sym_addr <= addr && (Elf_Word)(addr - sym_addr) < st_size) {
+            *offset = (int)(addr - sym_addr);
             *posIndex = sym.st_name;
             *stringtableIndex = m_shdr.sh_link;
+            return true;
           }
         }
       } else {
@@ -123,4 +137,4 @@
   return true;
 }
 
-#endif // _WINDOWS
+#endif // !_WINDOWS && !__APPLE__
--- a/src/share/vm/utilities/elfSymbolTable.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/elfSymbolTable.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,7 @@
   ~ElfSymbolTable();
 
   // search the symbol that is nearest to the specified address.
-  bool lookup(address addr, int* stringtableIndex, int* posIndex, int* offset);
+  bool lookup(address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable);
 
   NullDecoder::decoder_status get_status() { return m_status; };
 
@@ -65,6 +65,6 @@
   NullDecoder::decoder_status  m_status;
 };
 
-#endif // _WINDOWS and _APPLE
+#endif // !_WINDOWS and !__APPLE__
 
 #endif // SHARE_VM_UTILITIES_ELF_SYMBOL_TABLE_HPP
--- a/src/share/vm/utilities/exceptions.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/exceptions.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/exceptions.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/exceptions.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -223,6 +223,7 @@
 #define CHECK_NH                                 CHECK_(Handle())
 #define CHECK_NULL                               CHECK_(NULL)
 #define CHECK_false                              CHECK_(false)
+#define CHECK_JNI_ERR                            CHECK_(JNI_ERR)
 
 #define CHECK_AND_CLEAR                         THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return;        } (void)(0
 #define CHECK_AND_CLEAR_(result)                THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (void)(0
--- a/src/share/vm/utilities/globalDefinitions.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/globalDefinitions.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/globalDefinitions.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -38,6 +38,9 @@
 #ifdef TARGET_COMPILER_sparcWorks
 # include "utilities/globalDefinitions_sparcWorks.hpp"
 #endif
+#ifdef TARGET_COMPILER_xlc
+# include "utilities/globalDefinitions_xlc.hpp"
+#endif
 
 #include "utilities/macros.hpp"
 
@@ -149,7 +152,7 @@
 // The larger HeapWordSize for 64bit requires larger heaps
 // for the same application running in 64bit.  See bug 4967770.
 // The minimum alignment to a heap word size is done.  Other
-// parts of the memory system may required additional alignment
+// parts of the memory system may require additional alignment
 // and are responsible for those alignments.
 #ifdef _LP64
 #define ScaleForWordSize(x) align_size_down_((x) * 13 / 10, HeapWordSize)
@@ -395,6 +398,17 @@
 #define PLATFORM_NATIVE_STACK_WALKING_SUPPORTED 1
 #endif
 
+// To assure the IRIW property on processors that are not multiple copy
+// atomic, sync instructions must be issued between volatile reads to
+// assure their ordering, instead of after volatile stores.
+// (See "A Tutorial Introduction to the ARM and POWER Relaxed Memory Models"
+// by Luc Maranget, Susmit Sarkar and Peter Sewell, INRIA/Cambridge)
+#ifdef CPU_NOT_MULTIPLE_COPY_ATOMIC
+const bool support_IRIW_for_not_multiple_copy_atomic_cpu = true;
+#else
+const bool support_IRIW_for_not_multiple_copy_atomic_cpu = false;
+#endif
+
 // The byte alignment to be used by Arena::Amalloc.  See bugid 4169348.
 // Note: this value must be a power of 2
 
--- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/utilities/globalDefinitions_xlc.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2012, 2013 SAP AG. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP
+#define SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP
+
+#include "prims/jni.h"
+
+// This file holds compiler-dependent includes,
+// globally used constants & types, class (forward)
+// declarations and a few frequently used utility functions.
+
+#include <ctype.h>
+#include <string.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <wchar.h>
+
+#include <math.h>
+#ifndef FP_PZERO
+// Linux doesn't have positive/negative zero
+#define FP_PZERO FP_ZERO
+#endif
+#if (!defined fpclass)
+#define fpclass fpclassify
+#endif
+
+#include <time.h>
+#include <fcntl.h>
+#include <dlfcn.h>
+#include <pthread.h>
+
+#include <limits.h>
+#include <errno.h>
+
+#include <stdint.h>
+
+// Use XLC compiler builtins instead of inline assembler
+#define USE_XLC_BUILTINS
+#ifdef USE_XLC_BUILTINS
+#include <builtins.h>
+  #if __IBMCPP__ < 1000
+  // the funtion prototype for __dcbtst(void *) is missing in XLC V8.0
+  // I could compile a little test, where I provided the prototype.
+  // The generated code was correct there. This is the prototype:
+  // extern "builtin" void __dcbtst (void *);
+  // For now we don't make use of it when compiling with XLC V8.0
+  #else
+  // __IBMCPP__ >= 1000
+  // XLC V10 provides the prototype for __dcbtst (void *);
+  #define USE_XLC_PREFETCH_WRITE_BUILTIN
+  #endif
+#endif // USE_XLC_BUILTINS
+
+// NULL vs NULL_WORD:
+// On Linux NULL is defined as a special type '__null'. Assigning __null to
+// integer variable will cause gcc warning. Use NULL_WORD in places where a
+// pointer is stored as integer value.  On some platforms, sizeof(intptr_t) >
+// sizeof(void*), so here we want something which is integer type, but has the
+// same size as a pointer.
+#ifdef __GNUC__
+  #error XLC and __GNUC__?
+#else
+  #define NULL_WORD  NULL
+#endif
+
+// AIX also needs a 64 bit NULL to work as a null address pointer.
+// Most system includes on AIX would define it as an int 0 if not already defined with one
+// exception: /usr/include/dirent.h will unconditionally redefine NULL to int 0 again.
+// In this case you need to copy the following defines to a position after #include <dirent.h>
+// (see jmv_aix.h).
+#ifdef AIX
+  #ifdef _LP64
+    #undef NULL
+    #define NULL 0L
+  #else
+    #ifndef NULL
+      #define NULL 0
+    #endif
+  #endif
+#endif // AIX
+
+// Compiler-specific primitive types
+// All defs of int (uint16_6 etc) are defined in AIX' /usr/include/stdint.h
+
+// Additional Java basic types
+
+typedef uint8_t  jubyte;
+typedef uint16_t jushort;
+typedef uint32_t juint;
+typedef uint64_t julong;
+
+//----------------------------------------------------------------------------------------------------
+// Special (possibly not-portable) casts
+// Cast floats into same-size integers and vice-versa w/o changing bit-pattern
+// %%%%%% These seem like standard C++ to me--how about factoring them out? - Ungar
+
+inline jint    jint_cast   (jfloat  x)           { return *(jint*   )&x; }
+inline jlong   jlong_cast  (jdouble x)           { return *(jlong*  )&x; }
+
+inline jfloat  jfloat_cast (jint    x)           { return *(jfloat* )&x; }
+inline jdouble jdouble_cast(jlong   x)           { return *(jdouble*)&x; }
+
+//----------------------------------------------------------------------------------------------------
+// Constant for jlong (specifying an long long canstant is C++ compiler specific)
+
+// Build a 64bit integer constant
+#define CONST64(x)  (x ## LL)
+#define UCONST64(x) (x ## ULL)
+
+const jlong min_jlong = CONST64(0x8000000000000000);
+const jlong max_jlong = CONST64(0x7fffffffffffffff);
+
+//----------------------------------------------------------------------------------------------------
+// Debugging
+
+#define DEBUG_EXCEPTION ::abort();
+
+extern "C" void breakpoint();
+#define BREAKPOINT ::breakpoint()
+
+// checking for nanness
+#ifdef AIX
+inline int g_isnan(float  f) { return isnan(f); }
+inline int g_isnan(double f) { return isnan(f); }
+#else
+#error "missing platform-specific definition here"
+#endif
+
+// Checking for finiteness
+
+inline int g_isfinite(jfloat  f)                 { return finite(f); }
+inline int g_isfinite(jdouble f)                 { return finite(f); }
+
+
+// Wide characters
+
+inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
+
+
+// Portability macros
+#define PRAGMA_INTERFACE             #pragma interface
+#define PRAGMA_IMPLEMENTATION        #pragma implementation
+#define VALUE_OBJ_CLASS_SPEC
+
+// Formatting.
+#ifdef _LP64
+#define FORMAT64_MODIFIER "l"
+#else // !_LP64
+#define FORMAT64_MODIFIER "ll"
+#endif // _LP64
+
+// Cannot use xlc's offsetof as implementation of hotspot's
+// offset_of(), because xlc warns about applying offsetof() to non-POD
+// object and xlc cannot compile the expression offsetof(DataLayout,
+// _cells[index]) in DataLayout::cell_offset() .  Therefore we define
+// offset_of as it is defined for gcc.
+#define offset_of(klass,field) (size_t)((intx)&(((klass*)16)->field) - 16)
+
+// Some constant sizes used throughout the AIX port
+#define SIZE_1K   ((uint64_t)         0x400ULL)
+#define SIZE_4K   ((uint64_t)        0x1000ULL)
+#define SIZE_64K  ((uint64_t)       0x10000ULL)
+#define SIZE_1M   ((uint64_t)      0x100000ULL)
+#define SIZE_4M   ((uint64_t)      0x400000ULL)
+#define SIZE_8M   ((uint64_t)      0x800000ULL)
+#define SIZE_16M  ((uint64_t)     0x1000000ULL)
+#define SIZE_256M ((uint64_t)    0x10000000ULL)
+#define SIZE_1G   ((uint64_t)    0x40000000ULL)
+#define SIZE_2G   ((uint64_t)    0x80000000ULL)
+#define SIZE_4G   ((uint64_t)   0x100000000ULL)
+#define SIZE_16G  ((uint64_t)   0x400000000ULL)
+#define SIZE_32G  ((uint64_t)   0x800000000ULL)
+#define SIZE_64G  ((uint64_t)  0x1000000000ULL)
+#define SIZE_1T   ((uint64_t) 0x10000000000ULL)
+
+
+#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP
--- a/src/share/vm/utilities/growableArray.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/growableArray.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/hashtable.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/hashtable.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/altHashing.hpp"
 #include "classfile/javaClasses.hpp"
+#include "code/dependencies.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "memory/resourceArea.hpp"
@@ -93,7 +94,7 @@
   return false;
 }
 
-template <class T, MEMFLAGS F> jint Hashtable<T, F>::_seed = 0;
+template <class T, MEMFLAGS F> juint Hashtable<T, F>::_seed = 0;
 
 // Create a new table and using alternate hash code, populate the new table
 // with the existing elements.   This can be used to change the hash code
@@ -338,7 +339,6 @@
 
 #endif // PRODUCT
 
-
 #ifdef ASSERT
 
 template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(double load) {
@@ -351,6 +351,118 @@
 }
 
 #endif
+
+
+template<class T, class M> GenericHashtable<T, M>::GenericHashtable(int size, bool C_heap, MEMFLAGS memflag) {
+  assert(size > 0, " Invalid hashtable size");
+  _size    = size;
+  _C_heap  = C_heap;
+  _memflag = memflag;
+  // Perform subtype-specific resource allocation
+  _items = (C_heap) ?  NEW_C_HEAP_ARRAY(T*, size, memflag) : NEW_RESOURCE_ARRAY(T*, size);
+  memset(_items, 0, sizeof(T*) * size);
+
+  DEBUG_ONLY(_num_items = 0;)
+}
+
+template<class T, class M> GenericHashtable<T, M>::~GenericHashtable() {
+  if (on_C_heap()) {
+    // Check backing array
+    for (int i = 0; i < size(); i++) {
+      T* item = head(i);
+      // Delete all items in linked list
+      while (item != NULL) {
+        T* next_item = item->next();
+        delete item;
+        DEBUG_ONLY(_num_items--);
+        item = next_item;
+      }
+    }
+    FREE_C_HEAP_ARRAY(T*, _items, _memflag);
+    _items = NULL;
+    assert (_num_items == 0, "Not all memory released");
+  }
+}
+
+/**
+ * Return a pointer to the item 'I' that is stored in the hashtable for
+ * which match_item->equals(I) == true. If no such item is found, NULL
+ * is returned.
+ */
+template<class T, class F> T* GenericHashtable<T, F>::contains(T* match_item) {
+  if (match_item != NULL) {
+    int idx = index(match_item);
+    return contains_impl(match_item, idx);
+  }
+  return NULL;
+}
+
+/**
+ * Add item to the hashtable. Return 'true' if the item was added
+ * and false otherwise.
+ */
+template<class T, class F> bool GenericHashtable<T, F>::add(T* item) {
+  if (item != NULL) {
+    int idx = index(item);
+    T* found_item = contains_impl(item, idx);
+    if (found_item == NULL) {
+      T* list_head = head(idx);
+      item->set_next(list_head);
+      item->set_prev(NULL);
+
+      if (list_head != NULL) {
+        list_head->set_prev(item);
+      }
+      set_head(item, idx);
+      DEBUG_ONLY(_num_items++);
+      return true;
+    }
+  }
+  return false;
+}
+
+/**
+ * Removes an item 'I' from the hashtable, if present. 'I' is removed, if
+ * match_item->equals(I) == true. Removing an item from the hashtable does
+ * not free memory.
+ */
+template<class T, class F> T* GenericHashtable<T, F>::remove(T* match_item) {
+  if (match_item != NULL) {
+    int idx = index(match_item);
+    T* found_item = contains_impl(match_item, idx);
+    if (found_item != NULL) {
+      // Remove item from linked list
+      T* prev = found_item->prev();
+      T* next = found_item->next();
+      if (prev != NULL) {
+        prev->set_next(next);
+      } else {
+        set_head(next, idx);
+      }
+      if (next != NULL) {
+        next->set_prev(prev);
+      }
+
+      DEBUG_ONLY(_num_items--);
+      return found_item;
+    }
+  }
+  return NULL;
+}
+
+
+template<class T, class F> T* GenericHashtable<T, F>::contains_impl(T* item, int idx) {
+  T* current_item = head(idx);
+  while (current_item != NULL) {
+    if (current_item->equals(item)) {
+      return current_item;
+    }
+    current_item = current_item->next();
+  }
+  return NULL;
+}
+
+
 // Explicitly instantiate these types
 template class Hashtable<ConstantPool*, mtClass>;
 template class Hashtable<Symbol*, mtSymbol>;
@@ -370,3 +482,5 @@
 template class BasicHashtable<mtSymbol>;
 template class BasicHashtable<mtCode>;
 template class BasicHashtable<mtInternal>;
+
+template class GenericHashtable<DependencySignature, ResourceObj>;
--- a/src/share/vm/utilities/hashtable.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/hashtable.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -280,7 +280,7 @@
   // Function to move these elements into the new table.
   void move_to(Hashtable<T, F>* new_table);
   static bool use_alternate_hashcode()  { return _seed != 0; }
-  static jint seed()                    { return _seed; }
+  static juint seed()                    { return _seed; }
 
   static int literal_size(Symbol *symbol);
   static int literal_size(oop oop);
@@ -296,11 +296,11 @@
   void dump_table(outputStream* st, const char *table_name);
 
  private:
-  static jint _seed;
+  static juint _seed;
 };
 
 
-//  Verions of hashtable where two handles are used to compute the index.
+// Versions of hashtable where two handles are used to compute the index.
 
 template <class T, MEMFLAGS F> class TwoOopHashtable : public Hashtable<T, F> {
   friend class VMStructs;
@@ -327,4 +327,86 @@
   }
 };
 
+
+/*
+ * Usage of GenericHashtable:
+ *
+ * class X : public GenericHashtableEntry<X, ResourceObj> {
+ *
+ *   // Implement virtual functions in class X
+ *   bool      equals(X* sig) const;
+ *   uintptr_t hash()         const;
+ * };
+ *
+ * void foo() {
+ *   GenericHashtable<X, ResourceObj>* table = new GenericHashtable<X, ResourceObj>(11027, false);
+ *
+ *   X* elem = new X();
+ *   table->add(elem);
+ *   table->contains(elem);
+ * }
+ *
+ * You can choose other allocation types as well. For example, to store the hashtable to a
+ * particular region (CHeapObj<type>) simply replace ResourceObj with the desired type:
+ *
+ * class X : public GenericHashtableEntry<X, CHeapObj<mtCode> > { ... };
+ *
+ * To make the destructor (and remove) of the hashtable work:
+ * 1) override the delete operator of X
+ * 2) provide a destructor of the X
+ *
+ * You may also find it convenient to override the new operator.
+ *
+ * If you use this templates do not forget to add an explicit initialization
+ * (at the end of hashtable.cpp).
+ *
+ *  template class GenericHashtable<X, ResourceObj>;
+ */
+template <class T, class M> class GenericHashtableEntry : public M {
+ private:
+  T* _next;
+  T* _prev;
+ public:
+  // Must be implemented by subclass.
+  virtual uintptr_t key()            const = 0;
+  virtual bool      equals(T* other) const = 0;
+
+  T* next() const        { return _next; }
+  T* prev() const        { return _prev; }
+  void set_next(T* item) { _next = item; }
+  void set_prev(T* item) { _prev = item; }
+
+  // Constructor and destructor
+  GenericHashtableEntry() : _next(NULL), _prev(NULL) { };
+  virtual ~GenericHashtableEntry() {};
+};
+
+template <class T, class M> class GenericHashtable : public M {
+ private:
+  T**      _items;
+  int      _size;
+  bool     _C_heap;
+  MEMFLAGS _memflag;
+
+  // Accessor methods
+  T*   head    (int idx) const    { return _items[idx]; }
+  void set_head(T* item, int idx) { _items[idx] = item; }
+  int  index   (T* item)          { assert(item != NULL, "missing null check"); return item->key() % size(); }
+
+  // Helper function
+  T* contains_impl(T* item, int idx);
+
+  DEBUG_ONLY(int _num_items;)
+ public:
+  GenericHashtable(int size, bool C_heap = false, MEMFLAGS memflag = mtNone);
+  ~GenericHashtable();
+  T*   contains(T* match_item);
+  T*   remove  (T* match_item);
+  bool add     (T* item);
+
+
+  bool on_C_heap() const { return _C_heap; }
+  int  size()      const { return _size; }
+};
+
 #endif // SHARE_VM_UTILITIES_HASHTABLE_HPP
--- a/src/share/vm/utilities/histogram.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/histogram.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -37,6 +37,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
--- a/src/share/vm/utilities/macros.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/macros.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -276,6 +276,14 @@
 #define NOT_LINUX(code) code
 #endif
 
+#ifdef AIX
+#define AIX_ONLY(code) code
+#define NOT_AIX(code)
+#else
+#define AIX_ONLY(code)
+#define NOT_AIX(code) code
+#endif
+
 #ifdef SOLARIS
 #define SOLARIS_ONLY(code) code
 #define NOT_SOLARIS(code)
@@ -342,7 +350,11 @@
 #define NOT_IA32(code) code
 #endif
 
-#ifdef IA64
+// This is a REALLY BIG HACK, but on AIX <sys/systemcfg.h> unconditionally defines IA64.
+// At least on AIX 7.1 this is a real problem because 'systemcfg.h' is indirectly included
+// by 'pthread.h' and other common system headers.
+
+#if defined(IA64) && !defined(AIX)
 #define IA64_ONLY(code) code
 #define NOT_IA64(code)
 #else
@@ -366,14 +378,34 @@
 #define NOT_SPARC(code) code
 #endif
 
-#ifdef PPC
+#if defined(PPC32) || defined(PPC64)
+#ifndef PPC
+#define PPC
+#endif
 #define PPC_ONLY(code) code
 #define NOT_PPC(code)
 #else
+#undef PPC
 #define PPC_ONLY(code)
 #define NOT_PPC(code) code
 #endif
 
+#ifdef PPC32
+#define PPC32_ONLY(code) code
+#define NOT_PPC32(code)
+#else
+#define PPC32_ONLY(code)
+#define NOT_PPC32(code) code
+#endif
+
+#ifdef PPC64
+#define PPC64_ONLY(code) code
+#define NOT_PPC64(code)
+#else
+#define PPC64_ONLY(code)
+#define NOT_PPC64(code) code
+#endif
+
 #ifdef E500V2
 #define E500V2_ONLY(code) code
 #define NOT_E500V2(code)
--- a/src/share/vm/utilities/numberSeq.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/numberSeq.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/ostream.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/ostream.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -39,6 +39,9 @@
 #ifdef TARGET_OS_FAMILY_windows
 # include "os_windows.inline.hpp"
 #endif
+#ifdef TARGET_OS_FAMILY_aix
+# include "os_aix.inline.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_bsd
 # include "os_bsd.inline.hpp"
 #endif
@@ -1238,7 +1241,7 @@
 
 #ifndef PRODUCT
 
-#if defined(SOLARIS) || defined(LINUX) || defined(_ALLBSD_SOURCE)
+#if defined(SOLARIS) || defined(LINUX) || defined(AIX) || defined(_ALLBSD_SOURCE)
 #include <sys/types.h>
 #include <sys/socket.h>
 #include <netinet/in.h>
--- a/src/share/vm/utilities/ostream.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/ostream.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/resourceHash.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/resourceHash.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -44,8 +44,12 @@
 
 template<
     typename K, typename V,
-    typename ResourceHashtableFns<K>::hash_fn   HASH   = primitive_hash<K>,
-    typename ResourceHashtableFns<K>::equals_fn EQUALS = primitive_equals<K>,
+    // xlC does not compile this:
+    // http://stackoverflow.com/questions/8532961/template-argument-of-type-that-is-defined-by-inner-typedef-from-other-template-c
+    //typename ResourceHashtableFns<K>::hash_fn   HASH   = primitive_hash<K>,
+    //typename ResourceHashtableFns<K>::equals_fn EQUALS = primitive_equals<K>,
+    unsigned (*HASH)  (K const&)           = primitive_hash<K>,
+    bool     (*EQUALS)(K const&, K const&) = primitive_equals<K>,
     unsigned SIZE = 256
     >
 class ResourceHashtable : public ResourceObj {
--- a/src/share/vm/utilities/taskqueue.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/taskqueue.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -53,6 +53,9 @@
 #ifdef TARGET_OS_ARCH_linux_ppc
 # include "orderAccess_linux_ppc.inline.hpp"
 #endif
+#ifdef TARGET_OS_ARCH_aix_ppc
+# include "orderAccess_aix_ppc.inline.hpp"
+#endif
 #ifdef TARGET_OS_ARCH_bsd_x86
 # include "orderAccess_bsd_x86.inline.hpp"
 #endif
--- a/src/share/vm/utilities/top.hpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/top.hpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/share/vm/utilities/vmError.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/vmError.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1040,7 +1040,7 @@
     OnError = NULL;
   }
 
-  static bool skip_replay = false;
+  static bool skip_replay = ReplayCompiles; // Do not overwrite file during replay
   if (DumpReplayDataOnError && _thread && _thread->is_Compiler_thread() && !skip_replay) {
     skip_replay = true;
     ciEnv* env = ciEnv::current();
--- a/src/share/vm/utilities/yieldingWorkgroup.cpp	Tue Mar 11 15:34:06 2014 +0100
+++ b/src/share/vm/utilities/yieldingWorkgroup.cpp	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/Makefile	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/Makefile	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1995, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/test/TEST.ROOT	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/TEST.ROOT	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2005, 2007, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/test/TEST.groups	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/TEST.groups	Wed Mar 12 13:30:08 2014 +0100
@@ -67,10 +67,8 @@
   gc/metaspace/TestPerfCountersAndMemoryPools.java \
   runtime/6819213/TestBootNativeLibraryPath.java \
   runtime/6925573/SortMethodsTest.java \
-  runtime/7107135/Test7107135.sh \
   runtime/7158988/FieldMonitor.java \
   runtime/7194254/Test7194254.java \
-  runtime/jsig/Test8017498.sh \
   runtime/Metaspace/FragmentMetaspace.java \
   runtime/NMT/BaselineWithParameter.java \
   runtime/NMT/JcmdScale.java \
@@ -86,7 +84,8 @@
   runtime/RedefineObject/TestRedefineObject.java \
   runtime/XCheckJniJsig/XCheckJSig.java \
   serviceability/attach/AttachWithStalePidFile.java \
-  serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java
+  serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java \
+  serviceability/dcmd/DynLibDcmdTest.java
 
 
 # JRE adds further tests to compact3
@@ -174,7 +173,6 @@
   gc/startup_warnings/TestParallelScavengeSerialOld.java \
   gc/startup_warnings/TestParNewCMS.java \
   gc/startup_warnings/TestParNewSerialOld.java \
-  runtime/6929067/Test6929067.sh \
   runtime/SharedArchiveFile/SharedArchiveFile.java
 
 # Minimal VM on Compact 2 adds in some compact2 tests
--- a/test/compiler/5091921/Test7005594.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/5091921/Test7005594.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6431242/Test.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/6431242/Test.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6589834/Test_ia32.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/6589834/Test_ia32.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6636138/Test1.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/6636138/Test1.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6636138/Test2.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/6636138/Test2.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6795161/Test.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/6795161/Test.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/6826736/Test.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/6826736/Test.java	Wed Mar 12 13:30:08 2014 +0100
@@ -27,7 +27,7 @@
  * @bug 6826736
  * @summary CMS: core dump with -XX:+UseCompressedOops
  *
- * @run main/othervm/timeout=600 -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 Test
+ * @run main/othervm/timeout=600 -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 -Xmx256m -XX:ParallelGCThreads=4 Test
  */
 
 public class Test {
--- a/test/compiler/6857159/Test6857159.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/6857159/Test6857159.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/7068051/Test7068051.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/7068051/Test7068051.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/7070134/Test7070134.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/7070134/Test7070134.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/7184394/TestAESMain.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/7184394/TestAESMain.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014 Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,20 +39,32 @@
     System.out.println(iters + " iterations");
     TestAESEncode etest = new TestAESEncode();
     etest.prepare();
+    // warm-up for 20K iterations
+    System.out.println("Starting encryption warm-up");
+    for (int i=0; i<20000; i++) {
+      etest.run();
+    }
+    System.out.println("Finished encryption warm-up");
     long start = System.nanoTime();
     for (int i=0; i<iters; i++) {
       etest.run();
     }
     long end = System.nanoTime();
-    System.out.println("TestAESEncode runtime was " + (double)((end - start)/1000000000.0) + " ms");
+    System.out.println("TestAESEncode runtime was " + (double)((end - start)/1000000.0) + " ms");
 
     TestAESDecode dtest = new TestAESDecode();
     dtest.prepare();
+    // warm-up for 20K iterations
+    System.out.println("Starting decryption warm-up");
+    for (int i=0; i<20000; i++) {
+      dtest.run();
+    }
+    System.out.println("Finished decryption warm-up");
     start = System.nanoTime();
     for (int i=0; i<iters; i++) {
       dtest.run();
     }
     end = System.nanoTime();
-    System.out.println("TestAESDecode runtime was " + (double)((end - start)/1000000000.0) + " ms");
+    System.out.println("TestAESDecode runtime was " + (double)((end - start)/1000000.0) + " ms");
   }
 }
--- a/test/compiler/7200264/Test7200264.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/7200264/Test7200264.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,6 +1,6 @@
 #!/bin/sh
 # 
-# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 # This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/8000805/Test8000805.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/8000805/Test8000805.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/8005419/Test8005419.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/8005419/Test8005419.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/test/compiler/ciReplay/common.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/ciReplay/common.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -196,6 +196,11 @@
     then
         # enable core dump
         ulimit -c unlimited
+        new_ulimit=`ulimit -c`
+        if [ $new_ulimit != "unlimited" -a $new_ulimit != "-1" ]
+        then
+            test_fail 2 "CHECK :: ULIMIT" "Could not set 'ulimit -c unlimited'. 'ulimit -c' returns : $new_ulimit"
+        fi
 
         if [ $VM_OS = "solaris" ]
         then
@@ -228,7 +233,10 @@
     
     core_locations=`grep -i core crash.out | grep "location:" | \
             sed -e 's/.*location: //'`
+    echo CRASH OUTPUT:
+    cat crash.out    
     rm crash.out 
+    
     # processing core locations for *nix
     if [ $VM_OS != "windows" ]
     then
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/codegen/LoadWithMask.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8032207
+ * @summary Invalid node sizing for loadUS2L_immI16 and loadI2L_immI
+ * @run main/othervm -server -Xbatch -XX:-TieredCompilation -XX:CompileCommand=compileonly,LoadWithMask.foo LoadWithMask
+ *
+ */
+public class LoadWithMask {
+  static int x[] = new int[1];
+  static long foo() {
+    return x[0] & 0xfff0ffff;
+  }
+
+  public static void main(String[] args) {
+    x[0] = -1;
+    long l = 0;
+    for (int i = 0; i < 100000; ++i) {
+      l = foo();
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/codegen/LoadWithMask2.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8031743
+ * @summary loadI2L_immI broken for negative memory values
+ * @run main/othervm -server -Xbatch -XX:-TieredCompilation -XX:CompileCommand=compileonly,*.foo* LoadWithMask2
+ *
+ */
+public class LoadWithMask2 {
+  static int x;
+  static long foo1() {
+    return x & 0xfffffffe;
+  }
+  static long foo2() {
+    return x & 0xff000000;
+  }
+  static long foo3() {
+    return x & 0x8abcdef1;
+  }
+
+  public static void main(String[] args) {
+    x = -1;
+    long l = 0;
+    for (int i = 0; i < 100000; ++i) {
+      l = foo1() & foo2() & foo3();
+    }
+    if (l > 0) {
+      System.out.println("FAILED");
+      System.exit(97);
+    }
+    System.out.println("PASSED");
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/debug/VerifyAdapterSharing.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8030783
+ * @summary Regression test for 8026478
+ * @library /testlibrary
+ *
+ */
+import com.oracle.java.testlibrary.*;
+
+public class VerifyAdapterSharing {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb;
+    OutputAnalyzer out;
+
+    pb = ProcessTools.createJavaProcessBuilder("-Xcomp", "-XX:+IgnoreUnrecognizedVMOptions",
+                                               "-XX:+VerifyAdapterSharing", "-version");
+    out = new OutputAnalyzer(pb.start());
+    out.shouldHaveExitValue(0);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/inlining/DefaultAndConcreteMethodsCHA.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031695
+ * @summary CHA ignores default methods during analysis leading to incorrect code generation
+ *
+ * @run main/othervm -Xbatch DefaultAndConcreteMethodsCHA
+ */
+interface I {
+    default int m() { return 0; }
+}
+
+class A implements I {}
+
+class C extends A { }
+class D extends A { public int m() { return 1; } }
+
+public class DefaultAndConcreteMethodsCHA {
+    public static int test(A obj) {
+        return obj.m();
+    }
+    public static void main(String[] args) {
+        for (int i = 0; i < 10000; i++) {
+            int idC = test(new C());
+            if (idC != 0) {
+                throw new Error("C.m didn't invoke I.m: id "+idC);
+            }
+
+            int idD = test(new D());
+            if (idD != 1) {
+                throw new Error("D.m didn't invoke D.m: id "+idD);
+            }
+        }
+
+    }
+}
--- a/test/compiler/intrinsics/mathexact/AddExactICondTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/AddExactICondTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8024924
  * @summary Test non constant addExact
  * @compile AddExactICondTest.java
- * @run main AddExactICondTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main AddExactICondTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/AddExactIConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/AddExactIConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8024924
  * @summary Test constant addExact
  * @compile AddExactIConstantTest.java Verify.java
- * @run main AddExactIConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main AddExactIConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/AddExactILoadTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/AddExactILoadTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8024924
  * @summary Test non constant addExact
  * @compile AddExactILoadTest.java Verify.java
- * @run main AddExactILoadTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main AddExactILoadTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/AddExactILoopDependentTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/AddExactILoopDependentTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8024924
  * @summary Test non constant addExact
  * @compile AddExactILoopDependentTest.java Verify.java
- * @run main AddExactILoopDependentTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main AddExactILoopDependentTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/AddExactINonConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/AddExactINonConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8024924
  * @summary Test non constant addExact
  * @compile AddExactINonConstantTest.java Verify.java
- * @run main AddExactINonConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main AddExactINonConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/AddExactIRepeatTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/AddExactIRepeatTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8025657
  * @summary Test repeating addExact
  * @compile AddExactIRepeatTest.java Verify.java
- * @run main AddExactIRepeatTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main AddExactIRepeatTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/AddExactLConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/AddExactLConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test constant addExact
  * @compile AddExactLConstantTest.java Verify.java
- * @run main AddExactLConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main AddExactLConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/AddExactLNonConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/AddExactLNonConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test non constant addExact
  * @compile AddExactLNonConstantTest.java Verify.java
- * @run main AddExactLNonConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main AddExactLNonConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/CompareTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/CompareTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026722
  * @summary Verify that the compare after addExact is a signed compare
  * @compile CompareTest.java
- * @run main CompareTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main CompareTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/DecExactITest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/DecExactITest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test decrementExact
  * @compile DecExactITest.java Verify.java
- * @run main DecExactITest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main DecExactITest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/DecExactLTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/DecExactLTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test decrementExact
  * @compile DecExactLTest.java Verify.java
- * @run main DecExactLTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main DecExactLTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/GVNTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/GVNTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8028207
  * @summary Verify that GVN doesn't mess up the two addExacts
  * @compile GVNTest.java
- * @run main GVNTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main GVNTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/IncExactITest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/IncExactITest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test incrementExact
  * @compile IncExactITest.java Verify.java
- * @run main IncExactITest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main IncExactITest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/IncExactLTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/IncExactLTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test incrementExact
  * @compile IncExactLTest.java Verify.java
- * @run main IncExactLTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main IncExactLTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/MulExactICondTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/MulExactICondTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test multiplyExact as condition
  * @compile MulExactICondTest.java
- * @run main MulExactICondTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main MulExactICondTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/MulExactIConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/MulExactIConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test constant multiplyExact
  * @compile MulExactIConstantTest.java Verify.java
- * @run main MulExactIConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main MulExactIConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/MulExactILoadTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/MulExactILoadTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test multiplyExact
  * @compile MulExactILoadTest.java Verify.java
- * @run main MulExactILoadTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main MulExactILoadTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/MulExactILoopDependentTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/MulExactILoopDependentTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test loop dependent multiplyExact
  * @compile MulExactILoopDependentTest.java Verify.java
- * @run main MulExactILoopDependentTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main MulExactILoopDependentTest
  *
  */
 public class MulExactILoopDependentTest {
--- a/test/compiler/intrinsics/mathexact/MulExactINonConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/MulExactINonConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test non constant multiplyExact
  * @compile MulExactINonConstantTest.java Verify.java
- * @run main MulExactINonConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main MulExactINonConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/MulExactIRepeatTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/MulExactIRepeatTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test repeating multiplyExact
  * @compile MulExactIRepeatTest.java Verify.java
- * @run main MulExactIRepeatTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main MulExactIRepeatTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/MulExactLConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/MulExactLConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test constant mulExact
  * @compile MulExactLConstantTest.java Verify.java
- * @run main MulExactLConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main MulExactLConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/MulExactLNonConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/MulExactLNonConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test non constant mulExact
  * @compile MulExactLNonConstantTest.java Verify.java
- * @run main MulExactLNonConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main MulExactLNonConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/NegExactIConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/NegExactIConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test constant negExact
  * @compile NegExactIConstantTest.java Verify.java
- * @run main NegExactIConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main NegExactIConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/NegExactILoadTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/NegExactILoadTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,14 +26,14 @@
  * @bug 8026844
  * @summary Test negExact
  * @compile NegExactILoadTest.java Verify.java
- * @run main NegExactILoadTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main NegExactILoadTest
  *
  */
 
 public class NegExactILoadTest {
     public static void main(String[] args) {
-        Verify.LoadTest.init();
-        Verify.LoadTest.verify(new Verify.UnaryToBinary(new Verify.NegExactI()));
+      Verify.LoadTest.init();
+      Verify.LoadTest.verify(new Verify.UnaryToBinary(new Verify.NegExactI()));
     }
 
 }
--- a/test/compiler/intrinsics/mathexact/NegExactILoopDependentTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/NegExactILoopDependentTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test negExact loop dependent
  * @compile NegExactILoopDependentTest.java Verify.java
- * @run main NegExactILoopDependentTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main NegExactILoopDependentTest
  *
  */
 public class NegExactILoopDependentTest {
--- a/test/compiler/intrinsics/mathexact/NegExactINonConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/NegExactINonConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test non constant negExact
  * @compile NegExactINonConstantTest.java Verify.java
- * @run main NegExactINonConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main NegExactINonConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/NegExactLConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/NegExactLConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test constant negExact
  * @compile NegExactLConstantTest.java Verify.java
- * @run main NegExactLConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main NegExactLConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/NegExactLNonConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/NegExactLNonConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test constant negExact
  * @compile NegExactLNonConstantTest.java Verify.java
- * @run main NegExactLNonConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main NegExactLNonConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/NestedMathExactTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/NestedMathExactTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8027444
  * @summary Test nested loops
  * @compile NestedMathExactTest.java
- * @run main NestedMathExactTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main NestedMathExactTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/SplitThruPhiTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/SplitThruPhiTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8028198
  * @summary Verify that split through phi does the right thing
  * @compile SplitThruPhiTest.java
- * @run main SplitThruPhiTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main SplitThruPhiTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/SubExactICondTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/SubExactICondTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test subtractExact as condition
  * @compile SubExactICondTest.java Verify.java
- * @run main SubExactICondTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main SubExactICondTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/SubExactIConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/SubExactIConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test constant subtractExact
  * @compile SubExactIConstantTest.java Verify.java
- * @run main SubExactIConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main SubExactIConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/SubExactILoadTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/SubExactILoadTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test non constant subtractExact
  * @compile SubExactILoadTest.java Verify.java
- * @run main SubExactILoadTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main SubExactILoadTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/SubExactILoopDependentTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/SubExactILoopDependentTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test non constant subtractExact
  * @compile SubExactILoopDependentTest.java Verify.java
- * @run main SubExactILoopDependentTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main SubExactILoopDependentTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/SubExactINonConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/SubExactINonConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test non constant subtractExact
  * @compile SubExactINonConstantTest.java Verify.java
- * @run main SubExactINonConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main SubExactINonConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/SubExactIRepeatTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/SubExactIRepeatTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -26,7 +26,7 @@
  * @bug 8026844
  * @summary Test repeating subtractExact
  * @compile SubExactIRepeatTest.java Verify.java
- * @run main SubExactIRepeatTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main SubExactIRepeatTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/SubExactLConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/SubExactLConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -27,7 +27,7 @@
  * @bug 8027353
  * @summary Test constant subtractExact
  * @compile SubExactLConstantTest.java Verify.java
- * @run main SubExactLConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main SubExactLConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/SubExactLNonConstantTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -27,7 +27,7 @@
  * @bug 8027353
  * @summary Test non constant subtractExact
  * @compile SubExactLNonConstantTest.java Verify.java
- * @run main SubExactLNonConstantTest -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockExperimentalVMOptions -XX:+UseMathExactIntrinsics
+ * @run main SubExactLNonConstantTest
  *
  */
 
--- a/test/compiler/intrinsics/mathexact/Verify.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/intrinsics/mathexact/Verify.java	Wed Mar 12 13:30:08 2014 +0100
@@ -160,6 +160,7 @@
 
     public static class NonConstantTest {
         public static java.util.Random rnd = new java.util.Random();
+        public static int[] values = new int[] { Integer.MAX_VALUE, Integer.MIN_VALUE };
 
         public static void verify(BinaryMethod method) {
             for (int i = 0; i < 50000; ++i) {
@@ -169,6 +170,10 @@
                 Verify.verifyBinary(rnd1 + 1, rnd2, method);
                 Verify.verifyBinary(rnd1 - 1, rnd2, method);
                 Verify.verifyBinary(rnd1, rnd2 - 1, method);
+                Verify.verifyBinary(0, values[0], method);
+                Verify.verifyBinary(values[0], 0, method);
+                Verify.verifyBinary(0, values[1], method);
+                Verify.verifyBinary(values[1], 0, method);
             }
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/AddExactIntTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build AddExactIntTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics AddExactIntTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics AddExactIntTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class AddExactIntTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.IntTest(MathIntrinsic.IntIntrinsic.Add).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/AddExactLongTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build AddExactLongTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics AddExactLongTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics AddExactLongTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class AddExactLongTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.LongTest(MathIntrinsic.LongIntrinsic.Add).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/DecrementExactIntTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build DecrementExactIntTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics DecrementExactIntTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics DecrementExactIntTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class DecrementExactIntTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.IntTest(MathIntrinsic.IntIntrinsic.Decrement).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/DecrementExactLongTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build DecrementExactLongTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics DecrementExactLongTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics DecrementExactLongTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class DecrementExactLongTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.LongTest(MathIntrinsic.LongIntrinsic.Decrement).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/IncrementExactIntTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build IncrementExactIntTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics IncrementExactIntTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics IncrementExactIntTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class IncrementExactIntTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.IntTest(MathIntrinsic.IntIntrinsic.Increment).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/IncrementExactLongTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build IncrementExactLongTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics IncrementExactLongTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics IncrementExactLongTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class IncrementExactLongTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.LongTest(MathIntrinsic.LongIntrinsic.Increment).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/IntrinsicBase.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.Platform;
+
+import java.io.FileOutputStream;
+import java.lang.reflect.Executable;
+import java.util.Properties;
+
+public abstract class IntrinsicBase extends CompilerWhiteBoxTest {
+    protected String javaVmName;
+    protected String useMathExactIntrinsics;
+
+    protected IntrinsicBase(TestCase testCase) {
+        super(testCase);
+        javaVmName = System.getProperty("java.vm.name");
+        useMathExactIntrinsics = getVMOption("UseMathExactIntrinsics");
+    }
+
+    @Override
+    protected void test() throws Exception {
+        //java.lang.Math should be loaded to allow a compilation of the methods that use Math's method
+        System.out.println("class java.lang.Math should be loaded. Proof: " + Math.class);
+        printEnvironmentInfo();
+
+        int expectedIntrinsicCount = 0;
+
+        switch (MODE) {
+            case "compiled mode":
+            case "mixed mode":
+                if (isServerVM()) {
+                    if (TIERED_COMPILATION) {
+                        int max_level = TIERED_STOP_AT_LEVEL;
+                        expectedIntrinsicCount = (max_level == COMP_LEVEL_MAX) ? 1 : 0;
+                        for (int i = CompilerWhiteBoxTest.COMP_LEVEL_SIMPLE; i <= max_level; ++i) {
+                            deoptimize();
+                            compileAtLevel(i);
+                        }
+                    } else {
+                        expectedIntrinsicCount = 1;
+                        deoptimize();
+                        compileAtLevel(CompilerWhiteBoxTest.COMP_LEVEL_MAX);
+                    }
+                } else {
+                    deoptimize();
+                    compileAtLevel(CompilerWhiteBoxTest.COMP_LEVEL_SIMPLE);
+                }
+
+                if (!isIntrinsicSupported()) {
+                    expectedIntrinsicCount = 0;
+                }
+                break;
+            case "interpreted mode": //test is not applicable in this mode;
+                System.err.println("Warning: This test is not applicable in mode: " + MODE);
+                break;
+            default:
+                throw new RuntimeException("Test bug, unknown VM mode: " + MODE);
+        }
+
+        System.out.println("Expected intrinsic count is " + expectedIntrinsicCount + " name " + getIntrinsicId());
+
+        final FileOutputStream out = new FileOutputStream(getVMOption("LogFile") + ".verify.properties");
+        Properties expectedProps = new Properties();
+        expectedProps.setProperty("intrinsic.name", getIntrinsicId());
+        expectedProps.setProperty("intrinsic.expectedCount", String.valueOf(expectedIntrinsicCount));
+        expectedProps.store(out, null);
+
+        out.close();
+    }
+
+    protected void printEnvironmentInfo() {
+        System.out.println("java.vm.name=" + javaVmName);
+        System.out.println("os.arch=" + Platform.getOsArch());
+        System.out.println("java.vm.info=" + MODE);
+        System.out.println("useMathExactIntrinsics=" + useMathExactIntrinsics);
+    }
+
+    protected void compileAtLevel(int level) {
+        WHITE_BOX.enqueueMethodForCompilation(method, level);
+        waitBackgroundCompilation();
+        checkCompilation(method, level);
+    }
+
+    protected void checkCompilation(Executable executable, int level) {
+        if (!WHITE_BOX.isMethodCompiled(executable)) {
+            throw new RuntimeException("Test bug, expected compilation (level): " + level + ", but not compiled");
+        }
+        final int compilationLevel = WHITE_BOX.getMethodCompilationLevel(executable);
+        if (compilationLevel != level) {
+            if (!(TIERED_COMPILATION && level == COMP_LEVEL_FULL_PROFILE && compilationLevel == COMP_LEVEL_LIMITED_PROFILE)) { //possible case
+                throw new RuntimeException("Test bug, expected compilation (level): " + level + ", but level: " + compilationLevel);
+            }
+        }
+    }
+
+    protected abstract boolean isIntrinsicSupported();
+
+    protected abstract String getIntrinsicId();
+
+    protected boolean isServerVM() {
+        return javaVmName.toLowerCase().contains("server");
+    }
+
+    static class IntTest extends IntrinsicBase {
+        protected IntTest(MathIntrinsic.IntIntrinsic testCase) {
+            super(testCase);
+        }
+
+        @Override
+        protected boolean isIntrinsicSupported() {
+            return isServerVM() && Boolean.valueOf(useMathExactIntrinsics) && (Platform.isX86() || Platform.isX64());
+        }
+
+        @Override
+        protected String getIntrinsicId() {
+            return "_" + testCase.name().toLowerCase() + "ExactI";
+        }
+    }
+
+    static class LongTest extends IntrinsicBase {
+        protected LongTest(MathIntrinsic.LongIntrinsic testCase) {
+            super(testCase);
+        }
+
+        @Override
+        protected boolean isIntrinsicSupported() {
+            return isServerVM() && Boolean.valueOf(useMathExactIntrinsics) && Platform.isX64();
+        }
+
+        @Override
+        protected String getIntrinsicId() {
+            return "_" + testCase.name().toLowerCase() + "ExactL";
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/MathIntrinsic.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.reflect.Executable;
+import java.util.concurrent.Callable;
+
+public class MathIntrinsic {
+
+    enum IntIntrinsic implements CompilerWhiteBoxTest.TestCase {
+        Add {
+            @Override
+            Object execMathMethod() {
+                return intR = Math.addExact(int1, int2);
+            }
+        },
+        Subtract {
+            @Override
+            Object execMathMethod() {
+                return intR = Math.subtractExact(int1, int2);
+            }
+        },
+        Multiply {
+            @Override
+            Object execMathMethod() {
+                return intR = Math.multiplyExact(int1, int2);
+            }
+        },
+        Increment {
+            @Override
+            Object execMathMethod() {
+                return intR = Math.incrementExact(int1);
+            }
+        },
+        Decrement {
+            @Override
+            Object execMathMethod() {
+                return intR = Math.decrementExact(int1);
+            }
+        },
+        Negate {
+            @Override
+            Object execMathMethod() {
+                return intR = Math.negateExact(int1);
+            }
+        };
+        protected int int1;
+        protected int int2;
+        protected int intR;
+
+        abstract Object execMathMethod();
+
+        @Override
+        public Executable getExecutable() {
+            try {
+                return getClass().getDeclaredMethod("execMathMethod");
+            } catch (NoSuchMethodException e) {
+                throw new RuntimeException("Test bug, no such method: " + e);
+            }
+        }
+
+        @Override
+        public Callable<Integer> getCallable() {
+            return null;
+        }
+
+        @Override
+        public boolean isOsr() {
+            return false;
+        }
+
+    }
+
+    enum LongIntrinsic implements CompilerWhiteBoxTest.TestCase {
+        Add {
+            @Override
+            Object execMathMethod() {
+                return longR = Math.addExact(long1, long2);
+            }
+        },
+        Subtract {
+            @Override
+            Object execMathMethod() {
+                return longR = Math.subtractExact(long1, long2);
+            }
+        },
+        Multiply {
+            @Override
+            Object execMathMethod() {
+                return longR = Math.multiplyExact(long1, long2);
+            }
+        },
+        Increment {
+            @Override
+            Object execMathMethod() {
+                return longR = Math.incrementExact(long1);
+            }
+        },
+        Decrement {
+            @Override
+            Object execMathMethod() {
+                return longR = Math.decrementExact(long1);
+            }
+        },
+        Negate {
+            @Override
+            Object execMathMethod() {
+                return longR = Math.negateExact(long1);
+            }
+        };
+        protected long long1;
+        protected long long2;
+        protected long longR;
+
+        abstract Object execMathMethod();
+
+        @Override
+        public Executable getExecutable() {
+            try {
+                return getClass().getDeclaredMethod("execMathMethod");
+            } catch (NoSuchMethodException e) {
+                throw new RuntimeException("Test bug, no such method: " + e);
+            }
+        }
+
+        @Override
+        public Callable<Integer> getCallable() {
+            return null;
+        }
+
+        @Override
+        public boolean isOsr() {
+            return false;
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/MultiplyExactIntTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build MultiplyExactIntTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics MultiplyExactIntTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics MultiplyExactIntTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class MultiplyExactIntTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.IntTest(MathIntrinsic.IntIntrinsic.Multiply).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/MultiplyExactLongTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build MultiplyExactLongTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics MultiplyExactLongTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics MultiplyExactLongTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class MultiplyExactLongTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.LongTest(MathIntrinsic.LongIntrinsic.Multiply).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/NegateExactIntTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build NegateExactIntTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics NegateExactIntTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics NegateExactIntTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class NegateExactIntTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.IntTest(MathIntrinsic.IntIntrinsic.Negate).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/NegateExactLongTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build NegateExactLongTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics NegateExactLongTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics NegateExactLongTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class NegateExactLongTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.LongTest(MathIntrinsic.LongIntrinsic.Negate).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/SubtractExactIntTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build SubtractExactIntTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics SubtractExactIntTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics SubtractExactIntTest
+ * @run main Verifier hs_neg.log hs.log
+
+ */
+
+public class SubtractExactIntTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.IntTest(MathIntrinsic.IntIntrinsic.Subtract).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/SubtractExactLongTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary /testlibrary/whitebox /compiler/whitebox
+ * @build SubtractExactLongTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs_neg.log -XX:-UseMathExactIntrinsics SubtractExactLongTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -XX:+LogCompilation
+ *                   -XX:CompileCommand=compileonly,MathIntrinsic*::execMathMethod
+ *                   -XX:LogFile=hs.log -XX:+UseMathExactIntrinsics SubtractExactLongTest
+ * @run main Verifier hs_neg.log hs.log
+ */
+
+public class SubtractExactLongTest {
+
+    public static void main(String[] args) throws Exception {
+        new IntrinsicBase.LongTest(MathIntrinsic.LongIntrinsic.Subtract).test();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/intrinsics/mathexact/sanity/Verifier.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.util.Properties;
+
+public class Verifier {
+
+    public static void main(String[] args) throws Exception {
+        if (args.length == 0)
+            throw new RuntimeException("Test bug, nothing to verify");
+        for (String hsLogFile : args) {
+            verify(hsLogFile);
+        }
+    }
+
+    private static void verify(String hsLogFile) throws Exception {
+        System.out.println("Verifying " + hsLogFile);
+
+        final Properties expectedProperties = new Properties();
+        final FileReader reader = new FileReader(hsLogFile + ".verify.properties");
+        expectedProperties.load(reader);
+        reader.close();
+
+        int fullMatchCnt = 0;
+        int suspectCnt = 0;
+        final String intrinsicId = expectedProperties.getProperty("intrinsic.name");
+        final String prefix = "<intrinsic id='";
+        final String prefixWithId = prefix + intrinsicId + "'";
+        final int expectedCount = Integer.parseInt(expectedProperties.getProperty("intrinsic.expectedCount"));
+
+        BufferedReader r = new BufferedReader(new FileReader(hsLogFile));
+        String s;
+        while ((s = r.readLine()) != null) {
+            if (s.startsWith(prefix)) {
+                if (s.startsWith(prefixWithId)) {
+                    fullMatchCnt++;
+                } else {
+                    suspectCnt++;
+                    System.out.println("WARNING: Other intrinsic detected " + s);
+                }
+            }
+        }
+        r.close();
+
+        System.out.println("Intrinsic " + intrinsicId + " verification, expected: " + expectedCount + ", matched: " + fullMatchCnt + ", suspected: " + suspectCnt);
+        if (expectedCount != fullMatchCnt)
+            throw new RuntimeException("Unexpected count of intrinsic  " + prefixWithId + " expected:" + expectedCount + ", matched: " + fullMatchCnt + ", suspected: " + suspectCnt);
+    }
+}
--- a/test/compiler/jsr292/ConcurrentClassLoadingTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/jsr292/ConcurrentClassLoadingTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -159,34 +159,18 @@
     }
 
     final static String[] classNames = {
-            "java.lang.invoke.AbstractValidatingLambdaMetafactory",
-            "java.lang.invoke.BoundMethodHandle",
             "java.lang.invoke.CallSite",
             "java.lang.invoke.ConstantCallSite",
-            "java.lang.invoke.DirectMethodHandle",
-            "java.lang.invoke.InnerClassLambdaMetafactory",
-            "java.lang.invoke.InvokeDynamic",
-            "java.lang.invoke.InvokeGeneric",
-            "java.lang.invoke.InvokerBytecodeGenerator",
-            "java.lang.invoke.Invokers",
             "java.lang.invoke.LambdaConversionException",
-            "java.lang.invoke.LambdaForm",
             "java.lang.invoke.LambdaMetafactory",
-            "java.lang.invoke.MemberName",
             "java.lang.invoke.MethodHandle",
-            "java.lang.invoke.MethodHandleImpl",
             "java.lang.invoke.MethodHandleInfo",
-            "java.lang.invoke.MethodHandleNatives",
             "java.lang.invoke.MethodHandleProxies",
             "java.lang.invoke.MethodHandles",
-            "java.lang.invoke.MethodHandleStatics",
             "java.lang.invoke.MethodType",
-            "java.lang.invoke.MethodTypeForm",
             "java.lang.invoke.MutableCallSite",
             "java.lang.invoke.SerializedLambda",
-            "java.lang.invoke.SimpleMethodHandle",
             "java.lang.invoke.SwitchPoint",
-            "java.lang.invoke.TypeConvertingMethodAdapter",
             "java.lang.invoke.VolatileCallSite",
             "java.lang.invoke.WrongMethodTypeException"
     };
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/startup/NumCompilerThreadsCheck.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8034775
+ * @summary Ensures correct minimal number of compiler threads (provided by -XX:CICompilerCount=)
+ * @library /testlibrary
+ */
+import com.oracle.java.testlibrary.*;
+
+public class NumCompilerThreadsCheck {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:CICompilerCount=-1");
+    OutputAnalyzer out = new OutputAnalyzer(pb.start());
+
+    String expectedOutput = "CICompilerCount of -1 is invalid";
+    out.shouldContain(expectedOutput);
+  }
+}
--- a/test/compiler/startup/SmallCodeCacheStartup.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/startup/SmallCodeCacheStartup.java	Wed Mar 12 13:30:08 2014 +0100
@@ -24,20 +24,13 @@
 /*
  * @test
  * @bug 8023014
- * @summary Test ensures that there is no crash when compiler initialization fails
- * @library /testlibrary
- *
+ * @summary Test ensures that there is no crash if there is not enough ReservedCodeacacheSize
+ *          to initialize all compiler threads. The option -Xcomp gives the VM more time to
+ *          to trigger the old bug.
+ * @run main/othervm -XX:ReservedCodeCacheSize=3m -XX:CICompilerCount=64 -Xcomp SmallCodeCacheStartup
  */
-import com.oracle.java.testlibrary.*;
-
 public class SmallCodeCacheStartup {
   public static void main(String[] args) throws Exception {
-    ProcessBuilder pb;
-    OutputAnalyzer out;
-
-    pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m", "-XX:CICompilerCount=64", "-version");
-    out = new OutputAnalyzer(pb.start());
-    out.shouldContain("no space to run compiler");
-    out.shouldHaveExitValue(0);
+    System.out.println("TEST PASSED");
   }
 }
--- a/test/compiler/startup/StartupOutput.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/startup/StartupOutput.java	Wed Mar 12 13:30:08 2014 +0100
@@ -25,8 +25,7 @@
  * @test
  * @bug 8026949
  * @summary Test ensures correct VM output during startup
- * @library ../../testlibrary
- *
+ * @library /testlibrary
  */
 import com.oracle.java.testlibrary.*;
 
--- a/test/compiler/tiered/NonTieredLevelsTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/tiered/NonTieredLevelsTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -30,7 +30,7 @@
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:-TieredCompilation
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
- *                   -XX:CompileCommand=compileonly,TestCase$Helper::*
+ *                   -XX:CompileCommand=compileonly,SimpleTestCase$Helper::*
  *                   NonTieredLevelsTest
  * @summary Verify that only one level can be used
  * @author igor.ignatyev@oracle.com
@@ -59,9 +59,7 @@
                     + "TieredCompilation. Skip test.");
             return;
         }
-        for (TestCase test : TestCase.values()) {
-            new NonTieredLevelsTest(test).runTest();
-        }
+        CompilerWhiteBoxTest.main(NonTieredLevelsTest::new, args);
     }
 
     private NonTieredLevelsTest(TestCase testCase) {
@@ -80,7 +78,7 @@
         checkLevel(AVAILABLE_COMP_LEVEL, compLevel);
         int bci = WHITE_BOX.getMethodEntryBci(method);
         deoptimize();
-        if (!testCase.isOsr) {
+        if (!testCase.isOsr()) {
             for (int level = 1; level <= COMP_LEVEL_MAX; ++level) {
                 if (IS_AVAILABLE_COMPLEVEL.test(level)) {
                     testAvailableLevel(level, bci);
@@ -94,3 +92,4 @@
         }
     }
 }
+
--- a/test/compiler/tiered/TieredLevelsTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/tiered/TieredLevelsTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -28,7 +28,7 @@
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+TieredCompilation
  *                   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
- *                   -XX:CompileCommand=compileonly,TestCase$Helper::*
+ *                   -XX:CompileCommand=compileonly,SimpleTestCase$Helper::*
  *                   TieredLevelsTest
  * @summary Verify that all levels &lt; 'TieredStopAtLevel' can be used
  * @author igor.ignatyev@oracle.com
@@ -40,9 +40,7 @@
                     + "TieredCompilation. Skip test.");
             return;
         }
-        for (TestCase test : TestCase.values()) {
-            new TieredLevelsTest(test).runTest();
-        }
+        CompilerWhiteBoxTest.main(TieredLevelsTest::new, args);
     }
 
     private TieredLevelsTest(TestCase testCase) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/types/TestMeetTopArrayExactConstantArray.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8027571
+ * @summary meet of TopPTR exact array with constant array is not symmetric
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseOnStackReplacement -XX:TypeProfileLevel=222 -XX:+UnlockExperimentalVMOptions -XX:+UseTypeSpeculation -XX:-BackgroundCompilation TestMeetTopArrayExactConstantArray
+ *
+ */
+
+public class TestMeetTopArrayExactConstantArray {
+
+    static class A {
+    }
+
+    static class B {
+    }
+
+    static class C extends A {
+    }
+
+    static class D extends C {
+    }
+
+    final static B[] b = new B[10];
+
+    static void m0(Object[] o) {
+        if (o.getClass() ==  Object[].class) {
+        }
+    }
+
+    static void m1(Object[] o, boolean cond) {
+        if (cond) {
+            o = b;
+        }
+        m0(o);
+    }
+
+    static void m2(Object[] o, boolean cond1, boolean cond2) {
+        if (cond1) {
+            m1(o, cond2);
+        }
+    }
+
+    static void m3(C[] o, boolean cond1, boolean cond2, boolean cond3) {
+        if (cond1) {
+            m2(o, cond2, cond3);
+        }
+    }
+
+    static public void main(String[] args) {
+        A[] a = new A[10];
+        D[] d = new D[10];
+        Object[] o = new Object[10];
+        for (int i = 0; i < 5000; i++) {
+            // record in profiling that the if in m0 succeeds
+            m0(o);
+            // record some profiling for m2 and m1
+            m2(a, true, (i%2) == 0);
+            // record some profiling for m3 and conflicting profile for m2
+            m3(d, true, false, (i%2) == 0);
+        }
+
+        // get m3 compiled. The if in m0 will be optimized because of argument profiling in m3
+        C[] c = new C[10];
+        for (int i = 0; i < 20000; i++) {
+            m3(c, true, false, (i%2) == 0);
+        }
+        // make m3 not entrant and the if in m0 fail
+        m3(c, true, true, false);
+        m3(c, true, true, false);
+        m3(c, true, true, false);
+        m3(c, true, true, false);
+
+        // make m3 recompile, this time with if the not optimized
+        // on entry to m3, argument o is of type C[], profiled C[]
+        // on entry to m1, argument o is of type C[], speculative C[] exact, profiled A[]. Speculative becomes AnyNull
+        // after the if in m1, speculative type of o becomes constant from final field b
+        // the true if branch in m0 does a join between the type of o of speculative type constant from final field b and exact klass Object[]
+        for (int i = 0; i < 20000; i++) {
+            m3(c, true, false, (i%2) == 0);
+        }
+
+        System.out.println("TEST PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/types/TestSpeculationFailedHigherEqual.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8027422
+ * @summary type methods shouldn't always operate on speculative part
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:TypeProfileLevel=222 -XX:+UnlockExperimentalVMOptions -XX:+UseTypeSpeculation -XX:-BackgroundCompilation TestSpeculationFailedHigherEqual
+ *
+ */
+
+public class TestSpeculationFailedHigherEqual {
+
+    static class A {
+        void m() {}
+        int i;
+    }
+
+    static class C extends A {
+    }
+
+    static C c;
+
+    static A m1(A a, boolean cond) {
+        // speculative type for a is C not null
+        if (cond ) {
+            a = c;
+        }
+        // speculative type for a is C (may be null)
+        int i = a.i;
+        return a;
+    }
+
+    static public void main(String[] args) {
+        C c = new C();
+        TestSpeculationFailedHigherEqual.c = c;
+        for (int i = 0; i < 20000; i++) {
+            m1(c, i%2 == 0);
+        }
+
+        System.out.println("TEST PASSED");
+    }
+}
--- a/test/compiler/types/TypeSpeculation.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/types/TypeSpeculation.java	Wed Mar 12 13:30:08 2014 +0100
@@ -25,7 +25,7 @@
  * @test
  * @bug 8024070
  * @summary Test that type speculation doesn't cause incorrect execution
- * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:TypeProfileLevel=222 TypeSpeculation
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:TypeProfileLevel=222 -XX:+UnlockExperimentalVMOptions -XX:+UseTypeSpeculation TypeSpeculation
  *
  */
 
@@ -398,6 +398,133 @@
         return true;
     }
 
+    // java/lang/Object:AnyNull:exact *,iid=top
+    // meets
+    // stable:bottom[int:max..0]:NotNull *
+    static void test10_4(Object o) {
+    }
+
+    static void test10_3(Object o, boolean b) {
+        if (b) {
+            test10_4(o);
+        }
+    }
+
+    static void test10_2(Object o, boolean b1, boolean b2) {
+        if (b1) {
+            test10_3(o, b2);
+        }
+    }
+
+    static void test10_1(B[] b, boolean b1, boolean b2) {
+        test10_2(b, b1, b2);
+    }
+
+    static boolean test10() {
+        Object o = new Object();
+        A[] a = new A[10];
+        B[] b = new B[10];
+        B[] c = new C[10];
+        for (int i = 0; i < 20000; i++) {
+            test10_1(b, false, false);
+            test10_1(c, false, false);
+            test10_2(a, true, false);
+            test10_3(o, true);
+        }
+        return true;
+    }
+
+    // stable:TypeSpeculation$B:TopPTR *,iid=top[int:max..0]:TopPTR *,iid=top
+    // meets
+    // java/lang/Object:AnyNull:exact *,iid=top
+    static void test11_3(Object o) {
+    }
+
+    static void test11_2(Object o, boolean b) {
+        if (b) {
+            test11_3(o);
+        }
+    }
+
+    static void test11_1(B[] b, boolean bb) {
+        test11_2(b, bb);
+    }
+
+    static boolean test11() {
+        Object o = new Object();
+        B[] b = new B[10];
+        B[] c = new C[10];
+        for (int i = 0; i < 20000; i++) {
+            test11_1(b, false);
+            test11_1(c, false);
+            test11_2(o, true);
+        }
+        return true;
+    }
+
+    // TypeSpeculation$I *
+    // meets
+    // java/lang/Object:AnyNull *,iid=top
+    static void test12_3(Object o) {
+    }
+
+    static void test12_2(Object o, boolean b) {
+        if (b) {
+            test12_3(o);
+        }
+    }
+
+    static void test12_1(I i, boolean b) {
+        test12_2(i, b);
+    }
+
+    static boolean test12() {
+        Object o = new Object();
+        B b = new B();
+        C c = new C();
+        for (int i = 0; i < 20000; i++) {
+            test12_1(b, false);
+            test12_1(c, false);
+            test12_2(o, true);
+        }
+        return true;
+    }
+
+    // stable:bottom[int:max..0]:NotNull *
+    // meets
+    // stable:TypeSpeculation$A:TopPTR *,iid=top[int:max..0]:AnyNull:exact *,iid=top
+    static Object test13_3(Object o, boolean b) {
+        Object oo;
+        if (b) {
+            oo = o;
+        } else {
+            oo = new A[10];
+        }
+        return oo;
+    }
+
+    static void test13_2(Object o, boolean b1, boolean b2) {
+        if (b1) {
+            test13_3(o, b2);
+        }
+    }
+
+    static void test13_1(B[] b, boolean b1, boolean b2) {
+        test13_2(b, b1, b2);
+    }
+
+    static boolean test13() {
+        A[] a = new A[10];
+        B[] b = new B[10];
+        B[] c = new C[10];
+        for (int i = 0; i < 20000; i++) {
+            test13_1(b, false, false);
+            test13_1(c, false, false);
+            test13_2(a, true, (i%2) == 0);
+        }
+        return true;
+    }
+
     static public void main(String[] args) {
         boolean success = true;
 
@@ -419,6 +546,14 @@
 
         success = test9() && success;
 
+        success = test10() && success;
+
+        success = test11() && success;
+
+        success = test12() && success;
+
+        success = test13() && success;
+
         if (success) {
             System.out.println("TEST PASSED");
         } else {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/uncommontrap/StackOverflowGuardPagesOff.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8029383
+ * @summary stack overflow if callee is marked for deoptimization causes crash
+ * @run main/othervm -XX:TieredStopAtLevel=1 -XX:-BackgroundCompilation -XX:CompileCommand=dontinline,StackOverflowGuardPagesOff::m1 -XX:CompileCommand=exclude,StackOverflowGuardPagesOff::m2 -Xss256K -XX:-UseOnStackReplacement StackOverflowGuardPagesOff
+ *
+ */
+
+// This test calls m2 recursively until a stack overflow. Then calls
+// m3 that calls m1. m1 triggers B's class loading, as a result m1 and
+// m3 needs to be deoptimized. Deoptimization of m1 causes a stack
+// overflow exception to be thrown which is propagated to m3 in the
+// deopt blob. If the guard pages are no enabled, the stack bang in
+// the deopt blob triggers a crash.
+public class StackOverflowGuardPagesOff {
+
+    static class A {
+        void m() {}
+    }
+
+    static class B extends A {
+        void m() {}
+    }
+
+    static void m1(boolean deopt, A a) {
+        long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
+        l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
+        l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
+        l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
+        l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
+        l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
+        l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
+        l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
+        l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
+        l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
+        l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
+        l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
+        l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
+        l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
+        l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
+        l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
+        l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
+        l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
+        l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
+        l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
+        l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
+        l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
+        l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
+        l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
+        l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
+        l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
+        l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
+        l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
+        l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
+        l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
+        l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
+        l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
+        l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
+        l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
+        l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
+        l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
+        l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
+        l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
+        l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
+        l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
+        l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
+        l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
+        l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
+        l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
+        l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
+        l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
+        l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
+        l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
+        l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
+        l508, l509, l510, l511;
+
+        long ll0, ll1, ll2, ll3, ll4, ll5, ll6, ll7, ll8, ll9, ll10, ll11, ll12,
+        ll13, ll14, ll15, ll16, ll17, ll18, ll19, ll20, ll21, ll22, ll23, ll24,
+        ll25, ll26, ll27, ll28, ll29, ll30, ll31, ll32, ll33, ll34, ll35, ll36,
+        ll37, ll38, ll39, ll40, ll41, ll42, ll43, ll44, ll45, ll46, ll47, ll48,
+        ll49, ll50, ll51, ll52, ll53, ll54, ll55, ll56, ll57, ll58, ll59, ll60,
+        ll61, ll62, ll63, ll64, ll65, ll66, ll67, ll68, ll69, ll70, ll71, ll72,
+        ll73, ll74, ll75, ll76, ll77, ll78, ll79, ll80, ll81, ll82, ll83, ll84,
+        ll85, ll86, ll87, ll88, ll89, ll90, ll91, ll92, ll93, ll94, ll95, ll96,
+        ll97, ll98, ll99, ll100, ll101, ll102, ll103, ll104, ll105, ll106, ll107,
+        ll108, ll109, ll110, ll111, ll112, ll113, ll114, ll115, ll116, ll117,
+        ll118, ll119, ll120, ll121, ll122, ll123, ll124, ll125, ll126, ll127,
+        ll128, ll129, ll130, ll131, ll132, ll133, ll134, ll135, ll136, ll137,
+        ll138, ll139, ll140, ll141, ll142, ll143, ll144, ll145, ll146, ll147,
+        ll148, ll149, ll150, ll151, ll152, ll153, ll154, ll155, ll156, ll157,
+        ll158, ll159, ll160, ll161, ll162, ll163, ll164, ll165, ll166, ll167,
+        ll168, ll169, ll170, ll171, ll172, ll173, ll174, ll175, ll176, ll177,
+        ll178, ll179, ll180, ll181, ll182, ll183, ll184, ll185, ll186, ll187,
+        ll188, ll189, ll190, ll191, ll192, ll193, ll194, ll195, ll196, ll197,
+        ll198, ll199, ll200, ll201, ll202, ll203, ll204, ll205, ll206, ll207,
+        ll208, ll209, ll210, ll211, ll212, ll213, ll214, ll215, ll216, ll217,
+        ll218, ll219, ll220, ll221, ll222, ll223, ll224, ll225, ll226, ll227,
+        ll228, ll229, ll230, ll231, ll232, ll233, ll234, ll235, ll236, ll237,
+        ll238, ll239, ll240, ll241, ll242, ll243, ll244, ll245, ll246, ll247,
+        ll248, ll249, ll250, ll251, ll252, ll253, ll254, ll255, ll256, ll257,
+        ll258, ll259, ll260, ll261, ll262, ll263, ll264, ll265, ll266, ll267,
+        ll268, ll269, ll270, ll271, ll272, ll273, ll274, ll275, ll276, ll277,
+        ll278, ll279, ll280, ll281, ll282, ll283, ll284, ll285, ll286, ll287,
+        ll288, ll289, ll290, ll291, ll292, ll293, ll294, ll295, ll296, ll297,
+        ll298, ll299, ll300, ll301, ll302, ll303, ll304, ll305, ll306, ll307,
+        ll308, ll309, ll310, ll311, ll312, ll313, ll314, ll315, ll316, ll317,
+        ll318, ll319, ll320, ll321, ll322, ll323, ll324, ll325, ll326, ll327,
+        ll328, ll329, ll330, ll331, ll332, ll333, ll334, ll335, ll336, ll337,
+        ll338, ll339, ll340, ll341, ll342, ll343, ll344, ll345, ll346, ll347,
+        ll348, ll349, ll350, ll351, ll352, ll353, ll354, ll355, ll356, ll357,
+        ll358, ll359, ll360, ll361, ll362, ll363, ll364, ll365, ll366, ll367,
+        ll368, ll369, ll370, ll371, ll372, ll373, ll374, ll375, ll376, ll377,
+        ll378, ll379, ll380, ll381, ll382, ll383, ll384, ll385, ll386, ll387,
+        ll388, ll389, ll390, ll391, ll392, ll393, ll394, ll395, ll396, ll397,
+        ll398, ll399, ll400, ll401, ll402, ll403, ll404, ll405, ll406, ll407,
+        ll408, ll409, ll410, ll411, ll412, ll413, ll414, ll415, ll416, ll417,
+        ll418, ll419, ll420, ll421, ll422, ll423, ll424, ll425, ll426, ll427,
+        ll428, ll429, ll430, ll431, ll432, ll433, ll434, ll435, ll436, ll437,
+        ll438, ll439, ll440, ll441, ll442, ll443, ll444, ll445, ll446, ll447,
+        ll448, ll449, ll450, ll451, ll452, ll453, ll454, ll455, ll456, ll457,
+        ll458, ll459, ll460, ll461, ll462, ll463, ll464, ll465, ll466, ll467,
+        ll468, ll469, ll470, ll471, ll472, ll473, ll474, ll475, ll476, ll477,
+        ll478, ll479, ll480, ll481, ll482, ll483, ll484, ll485, ll486, ll487,
+        ll488, ll489, ll490, ll491, ll492, ll493, ll494, ll495, ll496, ll497,
+        ll498, ll499, ll500, ll501, ll502, ll503, ll504, ll505, ll506, ll507,
+        ll508, ll509, ll510, ll511;
+
+        a.m();
+
+        if (deopt) {
+            do_load = true;
+            while (!load_done);
+        }
+    }
+
+    static void m2(boolean deopt, A a) {
+        long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
+        l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
+        l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
+        l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
+        l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
+        l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
+        l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
+        l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
+        l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
+        l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
+        l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
+        l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
+        l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
+        l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
+        l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
+        l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
+        l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
+        l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
+        l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
+        l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
+        l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
+        l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
+        l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
+        l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
+        l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
+        l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
+        l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
+        l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
+        l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
+        l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
+        l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
+        l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
+        l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
+        l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
+        l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
+        l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
+        l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
+        l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
+        l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
+        l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
+        l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
+        l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
+        l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
+        l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
+        l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
+        l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
+        l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
+        l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
+        l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
+        l508, l509, l510, l511;
+
+        try {
+            m2(deopt, a);
+        } catch (StackOverflowError e) {
+            m3(deopt, a);
+        }
+    }
+
+    static void m3(boolean deopt, A a) {
+        long l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12,
+        l13, l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24,
+        l25, l26, l27, l28, l29, l30, l31, l32, l33, l34, l35, l36,
+        l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47, l48,
+        l49, l50, l51, l52, l53, l54, l55, l56, l57, l58, l59, l60,
+        l61, l62, l63, l64, l65, l66, l67, l68, l69, l70, l71, l72,
+        l73, l74, l75, l76, l77, l78, l79, l80, l81, l82, l83, l84,
+        l85, l86, l87, l88, l89, l90, l91, l92, l93, l94, l95, l96,
+        l97, l98, l99, l100, l101, l102, l103, l104, l105, l106, l107,
+        l108, l109, l110, l111, l112, l113, l114, l115, l116, l117,
+        l118, l119, l120, l121, l122, l123, l124, l125, l126, l127,
+        l128, l129, l130, l131, l132, l133, l134, l135, l136, l137,
+        l138, l139, l140, l141, l142, l143, l144, l145, l146, l147,
+        l148, l149, l150, l151, l152, l153, l154, l155, l156, l157,
+        l158, l159, l160, l161, l162, l163, l164, l165, l166, l167,
+        l168, l169, l170, l171, l172, l173, l174, l175, l176, l177,
+        l178, l179, l180, l181, l182, l183, l184, l185, l186, l187,
+        l188, l189, l190, l191, l192, l193, l194, l195, l196, l197,
+        l198, l199, l200, l201, l202, l203, l204, l205, l206, l207,
+        l208, l209, l210, l211, l212, l213, l214, l215, l216, l217,
+        l218, l219, l220, l221, l222, l223, l224, l225, l226, l227,
+        l228, l229, l230, l231, l232, l233, l234, l235, l236, l237,
+        l238, l239, l240, l241, l242, l243, l244, l245, l246, l247,
+        l248, l249, l250, l251, l252, l253, l254, l255, l256, l257,
+        l258, l259, l260, l261, l262, l263, l264, l265, l266, l267,
+        l268, l269, l270, l271, l272, l273, l274, l275, l276, l277,
+        l278, l279, l280, l281, l282, l283, l284, l285, l286, l287,
+        l288, l289, l290, l291, l292, l293, l294, l295, l296, l297,
+        l298, l299, l300, l301, l302, l303, l304, l305, l306, l307,
+        l308, l309, l310, l311, l312, l313, l314, l315, l316, l317,
+        l318, l319, l320, l321, l322, l323, l324, l325, l326, l327,
+        l328, l329, l330, l331, l332, l333, l334, l335, l336, l337,
+        l338, l339, l340, l341, l342, l343, l344, l345, l346, l347,
+        l348, l349, l350, l351, l352, l353, l354, l355, l356, l357,
+        l358, l359, l360, l361, l362, l363, l364, l365, l366, l367,
+        l368, l369, l370, l371, l372, l373, l374, l375, l376, l377,
+        l378, l379, l380, l381, l382, l383, l384, l385, l386, l387,
+        l388, l389, l390, l391, l392, l393, l394, l395, l396, l397,
+        l398, l399, l400, l401, l402, l403, l404, l405, l406, l407,
+        l408, l409, l410, l411, l412, l413, l414, l415, l416, l417,
+        l418, l419, l420, l421, l422, l423, l424, l425, l426, l427,
+        l428, l429, l430, l431, l432, l433, l434, l435, l436, l437,
+        l438, l439, l440, l441, l442, l443, l444, l445, l446, l447,
+        l448, l449, l450, l451, l452, l453, l454, l455, l456, l457,
+        l458, l459, l460, l461, l462, l463, l464, l465, l466, l467,
+        l468, l469, l470, l471, l472, l473, l474, l475, l476, l477,
+        l478, l479, l480, l481, l482, l483, l484, l485, l486, l487,
+        l488, l489, l490, l491, l492, l493, l494, l495, l496, l497,
+        l498, l499, l500, l501, l502, l503, l504, l505, l506, l507,
+        l508, l509, l510, l511;
+
+        long ll0, ll1, ll2, ll3, ll4, ll5, ll6, ll7, ll8, ll9, ll10, ll11, ll12,
+        ll13, ll14, ll15, ll16, ll17, ll18, ll19, ll20, ll21, ll22, ll23, ll24,
+        ll25, ll26, ll27, ll28, ll29, ll30, ll31, ll32, ll33, ll34, ll35, ll36,
+        ll37, ll38, ll39, ll40, ll41, ll42, ll43, ll44, ll45, ll46, ll47, ll48,
+        ll49, ll50, ll51, ll52, ll53, ll54, ll55, ll56, ll57, ll58, ll59, ll60,
+        ll61, ll62, ll63, ll64, ll65, ll66, ll67, ll68, ll69, ll70, ll71, ll72,
+        ll73, ll74, ll75, ll76, ll77, ll78, ll79, ll80, ll81, ll82, ll83, ll84,
+        ll85, ll86, ll87, ll88, ll89, ll90, ll91, ll92, ll93, ll94, ll95, ll96,
+        ll97, ll98, ll99, ll100, ll101, ll102, ll103, ll104, ll105, ll106, ll107,
+        ll108, ll109, ll110, ll111, ll112, ll113, ll114, ll115, ll116, ll117,
+        ll118, ll119, ll120, ll121, ll122, ll123, ll124, ll125, ll126, ll127,
+        ll128, ll129, ll130, ll131, ll132, ll133, ll134, ll135, ll136, ll137,
+        ll138, ll139, ll140, ll141, ll142, ll143, ll144, ll145, ll146, ll147,
+        ll148, ll149, ll150, ll151, ll152, ll153, ll154, ll155, ll156, ll157,
+        ll158, ll159, ll160, ll161, ll162, ll163, ll164, ll165, ll166, ll167,
+        ll168, ll169, ll170, ll171, ll172, ll173, ll174, ll175, ll176, ll177,
+        ll178, ll179, ll180, ll181, ll182, ll183, ll184, ll185, ll186, ll187,
+        ll188, ll189, ll190, ll191, ll192, ll193, ll194, ll195, ll196, ll197,
+        ll198, ll199, ll200, ll201, ll202, ll203, ll204, ll205, ll206, ll207,
+        ll208, ll209, ll210, ll211, ll212, ll213, ll214, ll215, ll216, ll217,
+        ll218, ll219, ll220, ll221, ll222, ll223, ll224, ll225, ll226, ll227,
+        ll228, ll229, ll230, ll231, ll232, ll233, ll234, ll235, ll236, ll237,
+        ll238, ll239, ll240, ll241, ll242, ll243, ll244, ll245, ll246, ll247,
+        ll248, ll249, ll250, ll251, ll252, ll253, ll254, ll255, ll256, ll257,
+        ll258, ll259, ll260, ll261, ll262, ll263, ll264, ll265, ll266, ll267,
+        ll268, ll269, ll270, ll271, ll272, ll273, ll274, ll275, ll276, ll277,
+        ll278, ll279, ll280, ll281, ll282, ll283, ll284, ll285, ll286, ll287,
+        ll288, ll289, ll290, ll291, ll292, ll293, ll294, ll295, ll296, ll297,
+        ll298, ll299, ll300, ll301, ll302, ll303, ll304, ll305, ll306, ll307,
+        ll308, ll309, ll310, ll311, ll312, ll313, ll314, ll315, ll316, ll317,
+        ll318, ll319, ll320, ll321, ll322, ll323, ll324, ll325, ll326, ll327,
+        ll328, ll329, ll330, ll331, ll332, ll333, ll334, ll335, ll336, ll337,
+        ll338, ll339, ll340, ll341, ll342, ll343, ll344, ll345, ll346, ll347,
+        ll348, ll349, ll350, ll351, ll352, ll353, ll354, ll355, ll356, ll357,
+        ll358, ll359, ll360, ll361, ll362, ll363, ll364, ll365, ll366, ll367,
+        ll368, ll369, ll370, ll371, ll372, ll373, ll374, ll375, ll376, ll377,
+        ll378, ll379, ll380, ll381, ll382, ll383, ll384, ll385, ll386, ll387,
+        ll388, ll389, ll390, ll391, ll392, ll393, ll394, ll395, ll396, ll397,
+        ll398, ll399, ll400, ll401, ll402, ll403, ll404, ll405, ll406, ll407,
+        ll408, ll409, ll410, ll411, ll412, ll413, ll414, ll415, ll416, ll417,
+        ll418, ll419, ll420, ll421, ll422, ll423, ll424, ll425, ll426, ll427,
+        ll428, ll429, ll430, ll431, ll432, ll433, ll434, ll435, ll436, ll437,
+        ll438, ll439, ll440, ll441, ll442, ll443, ll444, ll445, ll446, ll447,
+        ll448, ll449, ll450, ll451, ll452, ll453, ll454, ll455, ll456, ll457,
+        ll458, ll459, ll460, ll461, ll462, ll463, ll464, ll465, ll466, ll467,
+        ll468, ll469, ll470, ll471, ll472, ll473, ll474, ll475, ll476, ll477,
+        ll478, ll479, ll480, ll481, ll482, ll483, ll484, ll485, ll486, ll487,
+        ll488, ll489, ll490, ll491, ll492, ll493, ll494, ll495, ll496, ll497,
+        ll498, ll499, ll500, ll501, ll502, ll503, ll504, ll505, ll506, ll507,
+        ll508, ll509, ll510, ll511;
+
+        a.m();
+
+        m1(deopt, a);
+    }
+
+    // Used for synchronization betwen main thread and thread
+    // responsible for class loading
+    static volatile boolean thread_started = false;
+    static volatile boolean do_load = false;
+    static volatile boolean load_done = false;
+
+    static public void main(String[] args) {
+        // This thread does the loading of B. If m1 does it, the class
+        // loading can cause stack overflows.
+        Thread thread = new Thread() {
+            public void run() {
+                thread_started = true;
+                while(!do_load);
+                new B();
+                load_done = true;
+            }
+        };
+        thread.start();
+        while(!thread_started);
+        // get m3 and m1 compiled
+        A a = new A();
+        for (int i = 0; i < 5000; i++) {
+            m3(false, a);
+            m1(false, a);
+        }
+        m2(true, a);
+
+        System.out.println("TEST PASSED");
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/uncommontrap/TestLockEliminatedAtDeopt.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8032011
+ * @summary biased locking's revoke_bias locks monitor in compiled frame with eliminated lock
+ * @run main/othervm -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestLockEliminatedAtDeopt$A.m2 -XX:-BackgroundCompilation -XX:BiasedLockingStartupDelay=0 TestLockEliminatedAtDeopt
+ *
+ */
+
+public class TestLockEliminatedAtDeopt {
+
+    static class A {
+        void m() {
+        }
+
+        // This lock is not eliminated but biased to main thread on
+        // first call
+        synchronized void m2(boolean trap) {
+            if (trap) {
+                new B();
+            }
+        }
+    }
+
+    static class B extends A {
+        void m() {
+        }
+    }
+
+    static void m1(boolean trap) {
+        A a = new A();
+        // This lock is eliminated by c2
+        synchronized(a) {
+            a.m2(trap);
+            a.m();
+        }
+    }
+
+     public static void main(String[] args) {
+        for (int i = 0; i < 20000; i++) {
+            m1(false);
+        }
+        // Trigger uncommon trap in A.m2() (class unloaded) and
+        // deoptimization of m1() (CHA invalidated). Uncommon trap
+        // code locks monitor in m1's frame where's it's eliminated.
+        m1(true);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/uncommontrap/TestSpecTrapClassUnloading.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8031752
+ * @summary speculative traps need to be cleaned up at GC
+ * @run main/othervm -XX:-TieredCompilation -XX:-UseOnStackReplacement -XX:-BackgroundCompilation -XX:+UnlockExperimentalVMOptions -XX:+UseTypeSpeculation -XX:TypeProfileLevel=222 -XX:CompileCommand=exclude,java.lang.reflect.Method::invoke -XX:CompileCommand=exclude,sun.reflect.DelegatingMethodAccessorImpl::invoke -Xmx1M TestSpecTrapClassUnloading
+ *
+ */
+
+import java.lang.reflect.Method;
+
+public class TestSpecTrapClassUnloading {
+    static class B {
+        final public boolean m(Object o) {
+            if (o.getClass() == B.class) {
+                return true;
+            }
+            return false;
+        }
+    }
+
+    static class MemoryChunk {
+        MemoryChunk other;
+        long[] array;
+        MemoryChunk(MemoryChunk other) {
+            other = other;
+            array = new long[1024 * 1024 * 1024];
+        }
+    }
+
+    static void m1(B b, Object o) {
+        b.m(o);
+    }
+
+    static void m2(B b, Object o) {
+        b.m(o);
+    }
+
+    public static void main(String[] args) throws Exception {
+        Method m = B.class.getMethod("m", Object.class);
+        Object o = new Object();
+        B b = new B();
+
+        // add speculative trap in B.m() for m1
+        for (int i = 0; i < 20000; i++) {
+            m1(b, b);
+        }
+        m1(b, o);
+
+        // add speculative trap in B.m() for code generated by reflection
+        for (int i = 0; i < 20000; i++) {
+            m.invoke(b, b);
+        }
+        m.invoke(b, o);
+
+        m = null;
+
+        // add speculative trap in B.m() for m2
+        for (int i = 0; i < 20000; i++) {
+            m2(b, b);
+        }
+        m2(b, o);
+
+        // Exhaust memory which causes the code generated by
+        // reflection to be unloaded but B.m() is not.
+        MemoryChunk root = null;
+        try {
+            while (true) {
+                root = new MemoryChunk(root);
+            }
+        } catch(OutOfMemoryError e) {
+            root = null;
+        }
+    }
+}
--- a/test/compiler/whitebox/ClearMethodStateTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/whitebox/ClearMethodStateTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -21,25 +21,25 @@
  * questions.
  */
 
+import java.util.function.Function;
+
 /*
  * @test ClearMethodStateTest
  * @bug 8006683 8007288 8022832
  * @library /testlibrary /testlibrary/whitebox
  * @build ClearMethodStateTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* ClearMethodStateTest
+ * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* ClearMethodStateTest
  * @summary testing of WB::clearMethodState()
  * @author igor.ignatyev@oracle.com
  */
 public class ClearMethodStateTest extends CompilerWhiteBoxTest {
 
     public static void main(String[] args) throws Exception {
-        for (TestCase test : TestCase.values()) {
-            new ClearMethodStateTest(test).runTest();
-        }
+        CompilerWhiteBoxTest.main(ClearMethodStateTest::new, args);
     }
 
-    public ClearMethodStateTest(TestCase testCase) {
+    private ClearMethodStateTest(TestCase testCase) {
         super(testCase);
         // to prevent inlining of #method
         WHITE_BOX.testSetDontInlineMethod(method, true);
@@ -63,7 +63,7 @@
         deoptimize();
         checkNotCompiled();
 
-        if (testCase.isOsr) {
+        if (testCase.isOsr()) {
             // part test isn't applicable for OSR test case
             return;
         }
--- a/test/compiler/whitebox/CompilerWhiteBoxTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/whitebox/CompilerWhiteBoxTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -31,6 +31,7 @@
 import java.lang.reflect.Method;
 import java.util.Objects;
 import java.util.concurrent.Callable;
+import java.util.function.Function;
 
 /**
  * Abstract class for WhiteBox testing of JIT.
@@ -50,7 +51,7 @@
     protected static int COMP_LEVEL_FULL_PROFILE = 3;
     /** {@code CompLevel::CompLevel_full_optimization} -- C2 or Shark */
     protected static int COMP_LEVEL_FULL_OPTIMIZATION = 4;
-    /** Maximal value for CompLeveL */
+    /** Maximal value for CompLevel */
     protected static int COMP_LEVEL_MAX = COMP_LEVEL_FULL_OPTIMIZATION;
 
     /** Instance of WhiteBox */
@@ -75,8 +76,7 @@
     /** count of invocation to triger OSR compilation */
     protected static final long BACKEDGE_THRESHOLD;
     /** Value of {@code java.vm.info} (interpreted|mixed|comp mode) */
-    protected static final String MODE
-            = System.getProperty("java.vm.info");
+    protected static final String MODE = System.getProperty("java.vm.info");
 
     static {
         if (TIERED_COMPILATION) {
@@ -133,6 +133,20 @@
         return compLevel == COMP_LEVEL_FULL_OPTIMIZATION;
     }
 
+    protected static void main(
+            Function<TestCase, CompilerWhiteBoxTest> constructor,
+            String[] args) {
+        if (args.length == 0) {
+            for (TestCase test : SimpleTestCase.values()) {
+                constructor.apply(test).runTest();
+            }
+        } else {
+            for (String name : args) {
+                constructor.apply(SimpleTestCase.valueOf(name)).runTest();
+            }
+        }
+    }
+
     /** tested method */
     protected final Executable method;
     protected final TestCase testCase;
@@ -145,7 +159,7 @@
     protected CompilerWhiteBoxTest(TestCase testCase) {
         Objects.requireNonNull(testCase);
         System.out.println("TEST CASE:" + testCase.name());
-        method = testCase.executable;
+        method = testCase.getExecutable();
         this.testCase = testCase;
     }
 
@@ -204,7 +218,7 @@
         if (WHITE_BOX.getMethodCompilationLevel(method, true) != 0) {
             throw new RuntimeException(method + " osr_comp_level must be == 0");
         }
-   }
+    }
 
     /**
      * Checks, that {@linkplain #method} is compiled.
@@ -221,44 +235,46 @@
                     method, System.currentTimeMillis() - start);
             return;
         }
-        if (!WHITE_BOX.isMethodCompiled(method, testCase.isOsr)) {
+        if (!WHITE_BOX.isMethodCompiled(method, testCase.isOsr())) {
             throw new RuntimeException(method + " must be "
-                    + (testCase.isOsr ? "osr_" : "") + "compiled");
+                    + (testCase.isOsr() ? "osr_" : "") + "compiled");
         }
-        if (WHITE_BOX.getMethodCompilationLevel(method, testCase.isOsr) == 0) {
+        if (WHITE_BOX.getMethodCompilationLevel(method, testCase.isOsr())
+                == 0) {
             throw new RuntimeException(method
-                    + (testCase.isOsr ? " osr_" : " ")
+                    + (testCase.isOsr() ? " osr_" : " ")
                     + "comp_level must be != 0");
         }
     }
 
     protected final void deoptimize() {
-        WHITE_BOX.deoptimizeMethod(method, testCase.isOsr);
-        if (testCase.isOsr) {
+        WHITE_BOX.deoptimizeMethod(method, testCase.isOsr());
+        if (testCase.isOsr()) {
             WHITE_BOX.deoptimizeMethod(method, false);
         }
     }
 
     protected final int getCompLevel() {
-        return WHITE_BOX.getMethodCompilationLevel(method, testCase.isOsr);
+        return WHITE_BOX.getMethodCompilationLevel(method, testCase.isOsr());
     }
 
     protected final boolean isCompilable() {
         return WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_ANY,
-                testCase.isOsr);
+                testCase.isOsr());
     }
 
     protected final boolean isCompilable(int compLevel) {
-        return WHITE_BOX.isMethodCompilable(method, compLevel, testCase.isOsr);
+        return WHITE_BOX
+                .isMethodCompilable(method, compLevel, testCase.isOsr());
     }
 
     protected final void makeNotCompilable() {
         WHITE_BOX.makeMethodNotCompilable(method, COMP_LEVEL_ANY,
-                testCase.isOsr);
+                testCase.isOsr());
     }
 
     protected final void makeNotCompilable(int compLevel) {
-        WHITE_BOX.makeMethodNotCompilable(method, compLevel, testCase.isOsr);
+        WHITE_BOX.makeMethodNotCompilable(method, compLevel, testCase.isOsr());
     }
 
     /**
@@ -298,7 +314,7 @@
                 WHITE_BOX.isMethodCompiled(method, true));
         System.out.printf("\tosr_comp_level:\t%d%n",
                 WHITE_BOX.getMethodCompilationLevel(method, true));
-         System.out.printf("\tin_queue:\t%b%n",
+        System.out.printf("\tin_queue:\t%b%n",
                 WHITE_BOX.isMethodQueuedForCompilation(method));
         System.out.printf("compile_queues_size:\t%d%n%n",
                 WHITE_BOX.getCompileQueuesSize());
@@ -311,13 +327,13 @@
 
     /**
      * Tries to trigger compilation of {@linkplain #method} by call
-     * {@linkplain #testCase.callable} enough times.
+     * {@linkplain TestCase#getCallable()} enough times.
      *
      * @return accumulated result
      * @see #compile(int)
      */
     protected final int compile() {
-        if (testCase.isOsr) {
+        if (testCase.isOsr()) {
             return compile(1);
         } else {
             return compile(THRESHOLD);
@@ -326,7 +342,7 @@
 
     /**
      * Tries to trigger compilation of {@linkplain #method} by call
-     * {@linkplain #testCase.callable} specified times.
+     * {@linkplain TestCase#getCallable()} specified times.
      *
      * @param count invocation count
      * @return accumulated result
@@ -336,7 +352,7 @@
         Integer tmp;
         for (int i = 0; i < count; ++i) {
             try {
-                tmp = testCase.callable.call();
+                tmp = testCase.getCallable().call();
             } catch (Exception e) {
                 tmp = null;
             }
@@ -347,19 +363,32 @@
         }
         return result;
     }
+
+    /**
+     * Utility interface provides tested method and object to invoke it.
+     */
+    public interface TestCase {
+        /** the name of test case */
+        String name();
+
+        /** tested method */
+        Executable getExecutable();
+
+        /** object to invoke {@linkplain #getExecutable()} */
+        Callable<Integer> getCallable();
+
+        /** flag for OSR test case */
+        boolean isOsr();
+    }
 }
 
-/**
- * Utility structure containing tested method and object to invoke it.
- */
-enum TestCase {
+enum SimpleTestCase implements CompilerWhiteBoxTest.TestCase {
     /** constructor test case */
     CONSTRUCTOR_TEST(Helper.CONSTRUCTOR, Helper.CONSTRUCTOR_CALLABLE, false),
     /** method test case */
     METOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE, false),
     /** static method test case */
     STATIC_TEST(Helper.STATIC, Helper.STATIC_CALLABLE, false),
-
     /** OSR constructor test case */
     OSR_CONSTRUCTOR_TEST(Helper.OSR_CONSTRUCTOR,
             Helper.OSR_CONSTRUCTOR_CALLABLE, true),
@@ -368,20 +397,32 @@
     /** OSR static method test case */
     OSR_STATIC_TEST(Helper.OSR_STATIC, Helper.OSR_STATIC_CALLABLE, true);
 
-    /** tested method */
-    final Executable executable;
-    /** object to invoke {@linkplain #executable} */
-    final Callable<Integer> callable;
-    /** flag for OSR test case */
-    final boolean isOsr;
+    private final Executable executable;
+    private final Callable<Integer> callable;
+    private final boolean isOsr;
 
-    private TestCase(Executable executable, Callable<Integer> callable,
+    private SimpleTestCase(Executable executable, Callable<Integer> callable,
             boolean isOsr) {
         this.executable = executable;
         this.callable = callable;
         this.isOsr = isOsr;
     }
 
+    @Override
+    public Executable getExecutable() {
+        return executable;
+    }
+
+    @Override
+    public Callable<Integer> getCallable() {
+        return callable;
+    }
+
+    @Override
+    public boolean isOsr() {
+        return isOsr;
+    }
+
     private static class Helper {
 
         private static final Callable<Integer> CONSTRUCTOR_CALLABLE
@@ -436,7 +477,6 @@
             }
         };
 
-
         private static final Constructor CONSTRUCTOR;
         private static final Constructor OSR_CONSTRUCTOR;
         private static final Method METHOD;
--- a/test/compiler/whitebox/DeoptimizeAllTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/whitebox/DeoptimizeAllTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -27,19 +27,17 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build DeoptimizeAllTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* DeoptimizeAllTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* DeoptimizeAllTest
  * @summary testing of WB::deoptimizeAll()
  * @author igor.ignatyev@oracle.com
  */
 public class DeoptimizeAllTest extends CompilerWhiteBoxTest {
 
     public static void main(String[] args) throws Exception {
-        for (TestCase test : TestCase.values()) {
-            new DeoptimizeAllTest(test).runTest();
-        }
+        CompilerWhiteBoxTest.main(DeoptimizeAllTest::new, args);
     }
 
-    public DeoptimizeAllTest(TestCase testCase) {
+    private DeoptimizeAllTest(TestCase testCase) {
         super(testCase);
         // to prevent inlining of #method
         WHITE_BOX.testSetDontInlineMethod(method, true);
@@ -53,7 +51,7 @@
      */
     @Override
     protected void test() throws Exception {
-        if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith(
+        if (testCase.isOsr() && CompilerWhiteBoxTest.MODE.startsWith(
                 "compiled ")) {
           System.err.printf("Warning: %s is not applicable in %s%n",
                 testCase.name(), CompilerWhiteBoxTest.MODE);
--- a/test/compiler/whitebox/DeoptimizeMethodTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/whitebox/DeoptimizeMethodTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -27,19 +27,17 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build DeoptimizeMethodTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* DeoptimizeMethodTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* DeoptimizeMethodTest
  * @summary testing of WB::deoptimizeMethod()
  * @author igor.ignatyev@oracle.com
  */
 public class DeoptimizeMethodTest extends CompilerWhiteBoxTest {
 
     public static void main(String[] args) throws Exception {
-        for (TestCase test : TestCase.values()) {
-            new DeoptimizeMethodTest(test).runTest();
-        }
+        CompilerWhiteBoxTest.main(DeoptimizeMethodTest::new, args);
     }
 
-    public DeoptimizeMethodTest(TestCase testCase) {
+    private DeoptimizeMethodTest(TestCase testCase) {
         super(testCase);
         // to prevent inlining of #method
         WHITE_BOX.testSetDontInlineMethod(method, true);
@@ -53,7 +51,7 @@
      */
     @Override
     protected void test() throws Exception {
-        if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith(
+        if (testCase.isOsr() && CompilerWhiteBoxTest.MODE.startsWith(
                 "compiled ")) {
           System.err.printf("Warning: %s is not applicable in %s%n",
                 testCase.name(), CompilerWhiteBoxTest.MODE);
--- a/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -27,19 +27,17 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build EnqueueMethodForCompilationTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm/timeout=600 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* EnqueueMethodForCompilationTest
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* EnqueueMethodForCompilationTest
  * @summary testing of WB::enqueueMethodForCompilation()
  * @author igor.ignatyev@oracle.com
  */
 public class EnqueueMethodForCompilationTest extends CompilerWhiteBoxTest {
 
     public static void main(String[] args) throws Exception {
-        for (TestCase test : TestCase.values()) {
-            new EnqueueMethodForCompilationTest(test).runTest();
-        }
+        CompilerWhiteBoxTest.main(EnqueueMethodForCompilationTest::new, args);
     }
 
-    public EnqueueMethodForCompilationTest(TestCase testCase) {
+    private EnqueueMethodForCompilationTest(TestCase testCase) {
         super(testCase);
         // to prevent inlining of #method
         WHITE_BOX.testSetDontInlineMethod(method, true);
--- a/test/compiler/whitebox/IsMethodCompilableTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/whitebox/IsMethodCompilableTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -27,7 +27,7 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build IsMethodCompilableTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* IsMethodCompilableTest
+ * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
  * @summary testing of WB::isMethodCompilable()
  * @author igor.ignatyev@oracle.com
  */
@@ -48,12 +48,10 @@
     }
 
     public static void main(String[] args) throws Exception {
-        for (TestCase test : TestCase.values()) {
-            new IsMethodCompilableTest(test).runTest();
-        }
+        CompilerWhiteBoxTest.main(IsMethodCompilableTest::new, args);
     }
 
-    public IsMethodCompilableTest(TestCase testCase) {
+    private IsMethodCompilableTest(TestCase testCase) {
         super(testCase);
         // to prevent inlining of #method
         WHITE_BOX.testSetDontInlineMethod(method, true);
@@ -68,7 +66,7 @@
      */
     @Override
     protected void test() throws Exception {
-        if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith(
+        if (testCase.isOsr() && CompilerWhiteBoxTest.MODE.startsWith(
                 "compiled ")) {
           System.err.printf("Warning: %s is not applicable in %s%n",
                 testCase.name(), CompilerWhiteBoxTest.MODE);
@@ -89,7 +87,7 @@
         for (long i = 0L, n = PER_METHOD_RECOMPILATION_CUTOFF - 1; i < n; ++i) {
             compileAndDeoptimize();
         }
-        if (!testCase.isOsr && !isCompilable()) {
+        if (!testCase.isOsr() && !isCompilable()) {
             // in osr test case count of deopt maybe more than iterations
             throw new RuntimeException(method + " is not compilable after "
                     + (PER_METHOD_RECOMPILATION_CUTOFF - 1) + " iterations");
@@ -102,7 +100,7 @@
                 && isCompilable(); ++i) {
             compileAndDeoptimize();
         }
-        if (!testCase.isOsr && i != PER_METHOD_RECOMPILATION_CUTOFF) {
+        if (!testCase.isOsr() && i != PER_METHOD_RECOMPILATION_CUTOFF) {
             // in osr test case count of deopt maybe more than iterations
             throw new RuntimeException(method + " is not compilable after "
                     + i + " iterations, but must only after "
--- a/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -27,26 +27,17 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build MakeMethodNotCompilableTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* MakeMethodNotCompilableTest
+ * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* MakeMethodNotCompilableTest
  * @summary testing of WB::makeMethodNotCompilable()
  * @author igor.ignatyev@oracle.com
  */
 public class MakeMethodNotCompilableTest extends CompilerWhiteBoxTest {
     private int bci;
     public static void main(String[] args) throws Exception {
-        if (args.length == 0) {
-            for (TestCase test : TestCase.values()) {
-                new MakeMethodNotCompilableTest(test).runTest();
-            }
-        } else {
-            for (String name : args) {
-                new MakeMethodNotCompilableTest(
-                        TestCase.valueOf(name)).runTest();
-            }
-        }
+        CompilerWhiteBoxTest.main(MakeMethodNotCompilableTest::new, args);
     }
 
-    public MakeMethodNotCompilableTest(TestCase testCase) {
+    private MakeMethodNotCompilableTest(TestCase testCase) {
         super(testCase);
         // to prevent inlining of #method
         WHITE_BOX.testSetDontInlineMethod(method, true);
@@ -62,7 +53,7 @@
      */
     @Override
     protected void test() throws Exception {
-        if (testCase.isOsr && CompilerWhiteBoxTest.MODE.startsWith(
+        if (testCase.isOsr() && CompilerWhiteBoxTest.MODE.startsWith(
                 "compiled ")) {
           System.err.printf("Warning: %s is not applicable in %s%n",
                 testCase.name(), CompilerWhiteBoxTest.MODE);
--- a/test/compiler/whitebox/SetDontInlineMethodTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/whitebox/SetDontInlineMethodTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -27,19 +27,17 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build SetDontInlineMethodTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* SetDontInlineMethodTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* SetDontInlineMethodTest
  * @summary testing of WB::testSetDontInlineMethod()
  * @author igor.ignatyev@oracle.com
  */
 public class SetDontInlineMethodTest extends CompilerWhiteBoxTest {
 
     public static void main(String[] args) throws Exception {
-        for (TestCase test : TestCase.values()) {
-            new SetDontInlineMethodTest(test).runTest();
-        }
+        CompilerWhiteBoxTest.main(SetDontInlineMethodTest::new, args);
     }
 
-    public SetDontInlineMethodTest(TestCase testCase) {
+    private SetDontInlineMethodTest(TestCase testCase) {
         super(testCase);
     }
 
--- a/test/compiler/whitebox/SetForceInlineMethodTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/compiler/whitebox/SetForceInlineMethodTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -27,19 +27,17 @@
  * @library /testlibrary /testlibrary/whitebox
  * @build SetForceInlineMethodTest
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* SetForceInlineMethodTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* SetForceInlineMethodTest
  * @summary testing of WB::testSetForceInlineMethod()
  * @author igor.ignatyev@oracle.com
  */
 public class SetForceInlineMethodTest extends CompilerWhiteBoxTest {
 
     public static void main(String[] args) throws Exception {
-        for (TestCase test : TestCase.values()) {
-            new SetForceInlineMethodTest(test).runTest();
-        }
+        CompilerWhiteBoxTest.main(SetForceInlineMethodTest::new, args);
     }
 
-    public SetForceInlineMethodTest(TestCase testCase) {
+    private SetForceInlineMethodTest(TestCase testCase) {
         super(testCase);
     }
 
--- a/test/gc/6941923/Test6941923.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/gc/6941923/Test6941923.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/TestVerifySilently.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test TestVerifySilently.java
+ * @key gc
+ * @bug 8032771
+ * @summary Test silent verification.
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+import java.util.ArrayList;
+import java.util.Collections;
+
+class RunSystemGC {
+  public static void main(String args[]) throws Exception {
+    System.gc();
+  }
+}
+
+
+public class TestVerifySilently {
+  private static String[] getTestJavaOpts() {
+    String testVmOptsStr = System.getProperty("test.java.opts");
+    if (!testVmOptsStr.isEmpty()) {
+      return testVmOptsStr.split(" ");
+    } else {
+      return new String[] {};
+    }
+  }
+
+  private static OutputAnalyzer runTest(boolean verifySilently) throws Exception {
+    ArrayList<String> vmOpts = new ArrayList();
+
+    Collections.addAll(vmOpts, getTestJavaOpts());
+    Collections.addAll(vmOpts, new String[] {"-XX:+UnlockDiagnosticVMOptions",
+                                             "-XX:+VerifyDuringStartup",
+                                             "-XX:+VerifyBeforeGC",
+                                             "-XX:+VerifyAfterGC",
+                                             "-XX:" + (verifySilently ? "+":"-") + "VerifySilently",
+                                             RunSystemGC.class.getName()});
+    ProcessBuilder pb =
+      ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()]));
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    System.out.println("Output:\n" + output.getOutput());
+    return output;
+  }
+
+
+  public static void main(String args[]) throws Exception {
+
+    OutputAnalyzer output;
+
+    output = runTest(false);
+    output.shouldContain("[Verifying");
+    output.shouldHaveExitValue(0);
+
+    output = runTest(true);
+    output.shouldNotContain("[Verifying");
+    output.shouldHaveExitValue(0);
+  }
+}
--- a/test/gc/arguments/TestMaxHeapSizeTools.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/gc/arguments/TestMaxHeapSizeTools.java	Wed Mar 12 13:30:08 2014 +0100
@@ -41,8 +41,8 @@
   public long initialHeapSize;
   public long maxHeapSize;
 
-  public long minAlignment;
-  public long maxAlignment;
+  public long spaceAlignment;
+  public long heapAlignment;
 }
 
 class TestMaxHeapSizeTools {
@@ -192,7 +192,7 @@
     // Unfortunately there is no other way to retrieve the minimum heap size and
     // the alignments.
 
-    Matcher m = Pattern.compile("Minimum heap \\d+ Initial heap \\d+ Maximum heap \\d+ Min alignment \\d+ Max alignment \\d+").
+    Matcher m = Pattern.compile("Minimum heap \\d+ Initial heap \\d+ Maximum heap \\d+ Space alignment \\d+ Heap alignment \\d+").
       matcher(output.getStdout());
     if (!m.find()) {
       throw new RuntimeException("Could not find heap size string.");
@@ -204,8 +204,8 @@
     val.minHeapSize = valueAfter(match, "Minimum heap ");
     val.initialHeapSize = valueAfter(match, "Initial heap ");
     val.maxHeapSize = valueAfter(match, "Maximum heap ");
-    val.minAlignment = valueAfter(match, "Min alignment ");
-    val.maxAlignment = valueAfter(match, "Max alignment ");
+    val.spaceAlignment = valueAfter(match, "Space alignment ");
+    val.heapAlignment = valueAfter(match, "Heap alignment ");
   }
 
   /**
@@ -218,12 +218,12 @@
     MinInitialMaxValues v = new MinInitialMaxValues();
     getMinInitialMaxHeap(args, v);
 
-    if ((expectedMin != -1) && (align_up(expectedMin, v.minAlignment) != v.minHeapSize)) {
+    if ((expectedMin != -1) && (align_up(expectedMin, v.heapAlignment) != v.minHeapSize)) {
       throw new RuntimeException("Actual minimum heap size of " + v.minHeapSize +
         " differs from expected minimum heap size of " + expectedMin);
     }
 
-    if ((expectedInitial != -1) && (align_up(expectedInitial, v.minAlignment) != v.initialHeapSize)) {
+    if ((expectedInitial != -1) && (align_up(expectedInitial, v.heapAlignment) != v.initialHeapSize)) {
       throw new RuntimeException("Actual initial heap size of " + v.initialHeapSize +
         " differs from expected initial heap size of " + expectedInitial);
     }
@@ -247,7 +247,7 @@
     MinInitialMaxValues v = new MinInitialMaxValues();
     getMinInitialMaxHeap(new String[] { gcflag, "-XX:MaxHeapSize=" + maxHeapsize + "M" }, v);
 
-    long expectedHeapSize = align_up(maxHeapsize * K * K, v.maxAlignment);
+    long expectedHeapSize = align_up(maxHeapsize * K * K, v.heapAlignment);
     long actualHeapSize = v.maxHeapSize;
 
     if (actualHeapSize > expectedHeapSize) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/defnew/HeapChangeLogging.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test HeapChangeLogging.java
+ * @bug 8027440
+ * @library /testlibrary
+ * @build HeapChangeLogging
+ * @summary Allocate to get a promotion failure and verify that that heap change logging is present.
+ * @run main HeapChangeLogging
+ *
+ * Test the output of G1SummarizeRSetStats in conjunction with G1SummarizeRSetStatsPeriod.
+ */
+
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.oracle.java.testlibrary.*;
+
+public class HeapChangeLogging {
+  public static void main(String[] args) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xmx128m", "-Xmn100m", "-XX:+UseSerialGC", "-XX:+PrintGC", "HeapFiller");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    String stdout = output.getStdout();
+    System.out.println(stdout);
+    Matcher stdoutMatcher = Pattern.compile("\\[GC .Allocation Failure.*K->.*K\\(.*K\\), .* secs\\]", Pattern.MULTILINE).matcher(stdout);
+    if (!stdoutMatcher.find()) {
+      throw new RuntimeException("No proper GC log line found");
+    }
+    output.shouldHaveExitValue(0);
+  }
+}
+
+class HeapFiller {
+  public static Entry root;
+  private static final int PAYLOAD_SIZE = 1000;
+
+  public static void main(String[] args) {
+    root = new Entry(PAYLOAD_SIZE, null);
+    Entry current = root;
+    try {
+      while (true) {
+        Entry newEntry = new Entry(PAYLOAD_SIZE, current);
+        current = newEntry;
+      }
+    } catch (OutOfMemoryError e) {
+      root = null;
+    }
+
+  }
+}
+
+class Entry {
+  public Entry previous;
+  public byte[] payload;
+
+  Entry(int payloadSize, Entry previous) {
+    payload = new byte[payloadSize];
+    this.previous = previous;
+  }
+}
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/Test2GbHeap.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test Test2GbHeap
+ * @bug 8031686
+ * @summary Regression test to ensure we can start G1 with 2gb heap.
+ * @key gc
+ * @key regression
+ * @library /testlibrary
+ */
+
+import java.util.ArrayList;
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+public class Test2GbHeap {
+  public static void main(String[] args) throws Exception {
+    ArrayList<String> testArguments = new ArrayList<String>();
+
+    testArguments.add("-XX:+UseG1GC");
+    testArguments.add("-Xmx2g");
+    testArguments.add("-version");
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(testArguments.toArray(new String[0]));
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    // Avoid failing test for setups not supported.
+    if (output.getOutput().contains("Could not reserve enough space for 2097152KB object heap")) {
+      // Will fail on machines with too little memory (and Windows 32-bit VM), ignore such failures.
+      output.shouldHaveExitValue(1);
+    } else if (output.getOutput().contains("G1 GC is disabled in this release")) {
+      // G1 is not supported on embedded, ignore such failures.
+      output.shouldHaveExitValue(1);
+    } else {
+      // Normally everything should be fine.
+      output.shouldHaveExitValue(0);
+    }
+  }
+}
--- a/test/gc/g1/TestHumongousAllocInitialMark.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/gc/g1/TestHumongousAllocInitialMark.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/g1/TestStringSymbolTableStats.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestStringSymbolTableStats.java
+ * @bug 8027476 8027455
+ * @summary Ensure that the G1TraceStringSymbolTableScrubbing prints the expected message.
+ * @key gc
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestStringSymbolTableStats {
+  public static void main(String[] args) throws Exception {
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+                                                              "-XX:+UnlockExperimentalVMOptions",
+                                                              "-XX:+G1TraceStringSymbolTableScrubbing",
+                                                              SystemGCTest.class.getName());
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    System.out.println("Output:\n" + output.getOutput());
+
+    output.shouldContain("Cleaned string and symbol table");
+    output.shouldHaveExitValue(0);
+  }
+
+  static class SystemGCTest {
+    public static void main(String [] args) {
+      System.out.println("Calling System.gc()");
+      System.gc();
+    }
+  }
+}
--- a/test/runtime/6626217/Test6626217.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/6626217/Test6626217.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 # 
-#  Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 # 
 #  This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,11 @@
 # A Clean Compile: this line will probably fail within jtreg as have a clean dir:
 ${RM} -f *.class *.impl many_loader.java
 
+# Make sure that the compilation steps occurs in the future as not to allow fast systems
+# to copy and compile bug_21227.java so fast as to make the class and java have the same
+# time stamp, which later on would make the compilation step of many_loader.java fail
+sleep 2
+
 # Compile all the usual suspects, including the default 'many_loader'
 ${CP} many_loader1.java.foo many_loader.java
 ${JAVAC} ${TESTJAVACOPTS} -source 1.4 -target 1.4 -Xlint *.java
--- a/test/runtime/6925573/SortMethodsTest.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/6925573/SortMethodsTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -47,6 +47,7 @@
 import javax.tools.ToolProvider;
 
 /*
+ * @ignore 6959423
  * @test SortMethodsTest
  * @bug 6925573
  * @summary verify that class loading does not need quadratic time with regard to the number of class
--- a/test/runtime/6929067/Test6929067.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/6929067/Test6929067.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,15 +1,14 @@
 #!/bin/sh
 
 ##
-## @ignore 8028740
 ## @test Test6929067.sh
 ## @bug 6929067
 ## @bug 8021296
+## @bug 8025519
 ## @summary Stack guard pages should be removed when thread is detached
-## @compile T.java
 ## @run shell Test6929067.sh
 ##
-set -x
+
 if [ "${TESTSRC}" = "" ]
 then
   TESTSRC=${PWD}
@@ -114,10 +113,8 @@
 LD_LIBRARY_PATH=.:${COMPILEJAVA}/jre/lib/${ARCH}/${VMTYPE}:/usr/lib:$LD_LIBRARY_PATH
 export LD_LIBRARY_PATH
 
-cp ${TESTSRC}${FS}invoke.c .
-
-# Copy the result of our @compile action:
-cp ${TESTCLASSES}${FS}T.class .
+cp ${TESTSRC}${FS}*.java ${THIS_DIR}
+${COMPILEJAVA}${FS}bin${FS}javac *.java
 
 echo "Architecture: ${ARCH}"
 echo "Compilation flag: ${COMP_FLAG}"
@@ -129,7 +126,7 @@
 $gcc_cmd -DLINUX ${COMP_FLAG} -o invoke \
     -I${COMPILEJAVA}/include -I${COMPILEJAVA}/include/linux \
     -L${COMPILEJAVA}/jre/lib/${ARCH}/${VMTYPE} \
-    -ljvm -lpthread invoke.c
+     ${TESTSRC}${FS}invoke.c -ljvm -lpthread
 
 ./invoke
 exit $?
--- a/test/runtime/7107135/Test7107135.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/7107135/Test7107135.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 #
-#  Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
 #  Copyright (c) 2011 SAP AG.  All Rights Reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
@@ -25,9 +25,11 @@
 #
 
 ##
+## @ignore 8025519
 ## @test Test7107135.sh
 ## @bug 7107135
 ## @bug 8021296
+## @bug 8025519
 ## @summary Stack guard pages lost after loading library with executable stack.
 ## @run shell Test7107135.sh
 ##
@@ -63,10 +65,10 @@
 THIS_DIR=.
 
 cp ${TESTSRC}${FS}*.java ${THIS_DIR}
-${TESTJAVA}${FS}bin${FS}javac *.java
+${COMPILEJAVA}${FS}bin${FS}javac *.java
 
 $gcc_cmd -fPIC -shared -c -o test.o \
-    -I${TESTJAVA}${FS}include -I${TESTJAVA}${FS}include${FS}linux \
+    -I${COMPILEJAVA}${FS}include -I${COMPILEJAVA}${FS}include${FS}linux \
     ${TESTSRC}${FS}test.c
 
 ld -shared -z   execstack -o libtest-rwx.so test.o
--- a/test/runtime/7110720/Test7110720.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/7110720/Test7110720.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-#  Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 
--- a/test/runtime/7158988/FieldMonitor.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/7158988/FieldMonitor.java	Wed Mar 12 13:30:08 2014 +0100
@@ -34,10 +34,6 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.io.Reader;
-import java.io.Writer;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -56,6 +52,7 @@
 import com.sun.jdi.event.EventSet;
 import com.sun.jdi.event.ModificationWatchpointEvent;
 import com.sun.jdi.event.VMDeathEvent;
+import com.sun.jdi.event.VMStartEvent;
 import com.sun.jdi.event.VMDisconnectEvent;
 import com.sun.jdi.request.ClassPrepareRequest;
 import com.sun.jdi.request.EventRequest;
@@ -71,24 +68,10 @@
   public static void main(String[] args)
       throws IOException, InterruptedException {
 
-    StringBuffer sb = new StringBuffer();
-
-    for (int i=0; i < args.length; i++) {
-        sb.append(' ');
-        sb.append(args[i]);
-    }
     //VirtualMachine vm = launchTarget(sb.toString());
     VirtualMachine vm = launchTarget(CLASS_NAME);
 
     System.out.println("Vm launched");
-    // set watch field on already loaded classes
-    List<ReferenceType> referenceTypes = vm
-        .classesByName(CLASS_NAME);
-    for (ReferenceType refType : referenceTypes) {
-      addFieldWatch(vm, refType);
-    }
-    // watch for loaded classes
-    addClassWatch(vm);
 
     // process events
     EventQueue eventQueue = vm.eventQueue();
@@ -104,13 +87,15 @@
     errThread.start();
     outThread.start();
 
-
-    vm.resume();
     boolean connected = true;
+    int watched = 0;
     while (connected) {
       EventSet eventSet = eventQueue.remove();
       for (Event event : eventSet) {
-        if (event instanceof VMDeathEvent
+        System.out.println("FieldMonitor-main receives: "+event);
+        if (event instanceof VMStartEvent) {
+          addClassWatch(vm);
+        } else if (event instanceof VMDeathEvent
             || event instanceof VMDisconnectEvent) {
           // exit
           connected = false;
@@ -122,17 +107,17 @@
               .referenceType();
           addFieldWatch(vm, refType);
         } else if (event instanceof ModificationWatchpointEvent) {
+          watched++;
           System.out.println("sleep for 500 ms");
           Thread.sleep(500);
-          System.out.println("resume...");
 
           ModificationWatchpointEvent modEvent = (ModificationWatchpointEvent) event;
           System.out.println("old="
               + modEvent.valueCurrent());
           System.out.println("new=" + modEvent.valueToBe());
-          System.out.println();
         }
       }
+      System.out.println("resume...");
       eventSet.resume();
     }
     // Shutdown begins when event thread terminates
@@ -142,6 +127,10 @@
     } catch (InterruptedException exc) {
         // we don't interrupt
     }
+
+    if (watched != 11) { // init + 10 modifications in TestPostFieldModification class
+        throw new Error("Expected to receive 11 times ModificationWatchpointEvent, but got "+watched);
+    }
   }
 
   /**
--- a/test/runtime/7162488/Test7162488.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/7162488/Test7162488.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 #
-#  Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 #  This code is free software; you can redistribute it and/or modify it
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/ClassFile/UnsupportedClassFileVersion.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @library /testlibrary
+ * @compile -XDignore.symbol.file UnsupportedClassFileVersion.java
+ * @run main UnsupportedClassFileVersion
+ */
+
+import java.io.File;
+import java.io.FileOutputStream;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+import com.oracle.java.testlibrary.*;
+
+public class UnsupportedClassFileVersion implements Opcodes {
+    public static void main(String... args) throws Exception {
+        writeClassFile();
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(true, "-cp", ".",  "ClassFile");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("ClassFile has been compiled by a more recent version of the " +
+                            "Java Runtime (class file version 99.0), this version of " +
+                            "the Java Runtime only recognizes class file versions up to " +
+                            System.getProperty("java.class.version"));
+
+        output.shouldHaveExitValue(1);
+    }
+
+    public static void writeClassFile() throws Exception {
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+
+        cw.visit(99, ACC_PUBLIC + ACC_SUPER, "ClassFile", null, "java/lang/Object", null);
+        mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+        mv.visitCode();
+        mv.visitVarInsn(ALOAD, 0);
+        mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
+        mv.visitInsn(RETURN);
+        mv.visitMaxs(1, 1);
+        mv.visitEnd();
+        cw.visitEnd();
+
+        try (FileOutputStream fos = new FileOutputStream(new File("ClassFile.class"))) {
+             fos.write(cw.toByteArray());
+        }
+    }
+}
--- a/test/runtime/CommandLine/CompilerConfigFileWarning.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/CommandLine/CompilerConfigFileWarning.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,7 @@
 
 public class CompilerConfigFileWarning {
     public static void main(String[] args) throws Exception {
-        String vmVersion = System.getProperty("java.vm.version");
-        if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) {
+        if (Platform.isDebugBuild()) {
             System.out.println("Skip on debug builds since we'll always read the file there");
             return;
         }
--- a/test/runtime/CommandLine/ConfigFileWarning.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/CommandLine/ConfigFileWarning.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,7 @@
 
 public class ConfigFileWarning {
     public static void main(String[] args) throws Exception {
-        String vmVersion = System.getProperty("java.vm.version");
-        if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) {
+        if (Platform.isDebugBuild()) {
             System.out.println("Skip on debug builds since we'll always read the file there");
             return;
         }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/CommandLine/VMOptionWarning.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8027314
+ * @summary Warn if diagnostic or experimental vm option is used and -XX:+UnlockDiagnosticVMOptions or -XX:+UnlockExperimentalVMOptions, respectively, isn't specified. Warn if develop or notproduct vm option is used with product version of VM.
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class VMOptionWarning {
+    public static void main(String[] args) throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+PredictedLoadedClassCount", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Error: VM option 'PredictedLoadedClassCount' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.");
+
+        if (Platform.isDebugBuild()) {
+            System.out.println("Skip the rest of the tests on debug builds since diagnostic, develop, and notproduct options are available on debug builds.");
+            return;
+        }
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:+PrintInlining", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Error: VM option 'PrintInlining' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.");
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceJNICalls", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Error: VM option 'TraceJNICalls' is develop and is available only in debug version of VM.");
+
+        pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceJVMCalls", "-version");
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Error: VM option 'TraceJVMCalls' is notproduct and is available only in debug version of VM.");
+    }
+}
--- a/test/runtime/CompressedOops/CompressedClassPointers.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/CompressedOops/CompressedClassPointers.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -80,6 +80,18 @@
         output.shouldHaveExitValue(0);
     }
 
+    public static void heapBaseMinAddressTest() throws Exception {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+            "-XX:HeapBaseMinAddress=1m",
+            "-XX:+PrintMiscellaneous",
+            "-XX:+Verbose",
+            "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("HeapBaseMinAddress must be at least");
+        output.shouldContain("HotSpot");
+        output.shouldHaveExitValue(0);
+    }
+
     public static void sharingTest() throws Exception {
         // Test small heaps
         ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
@@ -113,24 +125,25 @@
         }
     }
 
-  public static void main(String[] args) throws Exception {
-      if (!Platform.is64bit()) {
-          // Can't test this on 32 bit, just pass
-          System.out.println("Skipping test on 32bit");
-          return;
-      }
-      // Solaris 10 can't mmap compressed oops space without a base
-      if (Platform.isSolaris()) {
-           String name = System.getProperty("os.version");
-           if (name.equals("5.10")) {
-               System.out.println("Skipping test on Solaris 10");
-               return;
-           }
-      }
-      smallHeapTest();
-      smallHeapTestWith3G();
-      largeHeapTest();
-      largePagesTest();
-      sharingTest();
-  }
+    public static void main(String[] args) throws Exception {
+        if (!Platform.is64bit()) {
+            // Can't test this on 32 bit, just pass
+            System.out.println("Skipping test on 32bit");
+            return;
+        }
+        // Solaris 10 can't mmap compressed oops space without a base
+        if (Platform.isSolaris()) {
+             String name = System.getProperty("os.version");
+             if (name.equals("5.10")) {
+                 System.out.println("Skipping test on Solaris 10");
+                 return;
+             }
+        }
+        smallHeapTest();
+        smallHeapTestWith3G();
+        largeHeapTest();
+        largePagesTest();
+        heapBaseMinAddressTest();
+        sharingTest();
+    }
 }
--- a/test/runtime/LoadClass/LoadClassNegative.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/LoadClass/LoadClassNegative.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,6 @@
  */
 
 /*
- * @ignore 8028095
  * @test
  * @key regression
  * @bug 8020675
--- a/test/runtime/NMT/CommandLineDetail.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/NMT/CommandLineDetail.java	Wed Mar 12 13:30:08 2014 +0100
@@ -24,7 +24,7 @@
  /*
  * @test
  * @key nmt
- * @summary Running with NMT detail should not result in an error or warning
+ * @summary Running with NMT detail should not result in an error
  * @library /testlibrary
  */
 
@@ -39,7 +39,6 @@
       "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("error");
-    output.shouldNotContain("warning");
     output.shouldHaveExitValue(0);
   }
 }
--- a/test/runtime/NMT/CommandLineSummary.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/NMT/CommandLineSummary.java	Wed Mar 12 13:30:08 2014 +0100
@@ -24,7 +24,7 @@
  /*
  * @test
  * @key nmt
- * @summary Running with NMT summary should not result in an error or warning
+ * @summary Running with NMT summary should not result in an error
  * @library /testlibrary
  */
 
@@ -39,7 +39,6 @@
       "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("error");
-    output.shouldNotContain("warning");
     output.shouldHaveExitValue(0);
   }
 }
--- a/test/runtime/NMT/CommandLineTurnOffNMT.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/NMT/CommandLineTurnOffNMT.java	Wed Mar 12 13:30:08 2014 +0100
@@ -24,7 +24,7 @@
  /*
  * @test
  * @key nmt
- * @summary Turning off NMT should not result in an error or warning
+ * @summary Turning off NMT should not result in an error
  * @library /testlibrary
  */
 
@@ -38,7 +38,6 @@
               "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
     output.shouldNotContain("error");
-    output.shouldNotContain("warning");
     output.shouldHaveExitValue(0);
   }
 }
--- a/test/runtime/NMT/PrintNMTStatistics.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/NMT/PrintNMTStatistics.java	Wed Mar 12 13:30:08 2014 +0100
@@ -64,7 +64,6 @@
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
     output.shouldContain("Java Heap (reserved=");
     output.shouldNotContain("error");
-    output.shouldNotContain("warning");
     output.shouldHaveExitValue(0);
   }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/PerfMemDestroy/PerfMemDestroy.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8030955
+ * @summary Allow multiple calls to PerfMemory::destroy() without asserting.
+ * @library /testlibrary
+ * @run main PerfMemDestroy
+ */
+
+import java.io.File;
+import java.util.Map;
+import com.oracle.java.testlibrary.*;
+
+public class PerfMemDestroy {
+    public static void main(String args[]) throws Throwable {
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+PerfAllowAtExitRegistration", "-version");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+    }
+}
--- a/test/runtime/RedefineObject/Agent.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/RedefineObject/Agent.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,8 +65,8 @@
     public static void main(String[] args) {
         byte[] ba = new byte[0];
 
-        // If it survives 1000 GC's, it's good.
-        for (int i = 0; i < 1000 ; i++) {
+        // If it survives 100 GC's, it's good.
+        for (int i = 0; i < 100 ; i++) {
             System.gc();
             ba.clone();
         }
--- a/test/runtime/RedefineObject/TestRedefineObject.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/RedefineObject/TestRedefineObject.java	Wed Mar 12 13:30:08 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test ArchiveDoesNotExist
+ * @summary Test how VM handles "file does not exist" situation while
+ *          attempting to use CDS archive. JVM should exit gracefully
+ *          when sharing mode is ON, and continue w/o sharing if sharing
+ *          mode is AUTO.
+ * @library /testlibrary
+ * @run main ArchiveDoesNotExist
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+
+public class ArchiveDoesNotExist {
+    public static void main(String[] args) throws Exception {
+        String fileName = "test.jsa";
+
+        File cdsFile = new File(fileName);
+        if (cdsFile.exists())
+            throw new RuntimeException("Test error: cds file already exists");
+
+        // Sharing: on
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-Xshare:on",
+           "-version");
+
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldContain("Specified shared archive not found");
+        output.shouldHaveExitValue(1);
+
+        // Sharing: auto
+        pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-Xshare:auto",
+           "-version");
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("java version");
+        output.shouldNotContain("sharing");
+        output.shouldHaveExitValue(0);
+    }
+}
--- a/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java	Wed Mar 12 13:30:08 2014 +0100
@@ -22,6 +22,7 @@
  */
 
 /*
+ * @ignore 8025642
  * @test CdsDifferentObjectAlignment
  * @summary Testing CDS (class data sharing) using varying object alignment.
  *          Using different object alignment for each dump/load pair.
@@ -29,6 +30,7 @@
  *          is different from object alignment for creating a CDS file
  *          should fail when loading.
  * @library /testlibrary
+ * @bug 8025642
  */
 
 import com.oracle.java.testlibrary.*;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/CdsWriteError.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @ignore 8032222
+ * @test CdsWriteError
+ * @summary Test how VM handles situation when it is impossible to write the
+ *          CDS archive. VM is expected to exit gracefully and display the
+ *          correct reason for the error.
+ * @library /testlibrary
+ * @run main CdsWriteError
+ * @bug 8032222
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+
+public class CdsWriteError {
+    public static void main(String[] args) throws Exception {
+
+        if (Platform.isWindows()) {
+            System.out.println("This test is ignored on Windows. This test " +
+                "manipulates folder writable attribute, which is known to be " +
+                "often ignored by Windows");
+
+            return;
+        }
+
+        String folderName = "tmp";
+        String fileName = folderName + File.separator + "empty.jsa";
+
+        // create an empty archive file and make it read only
+        File folder = new File(folderName);
+        if (!folder.mkdir())
+            throw new RuntimeException("Error when creating a tmp folder");
+
+        File cdsFile = new File(fileName);
+        if (!cdsFile.createNewFile())
+            throw new RuntimeException("Error when creating an empty CDS file");
+        if (!cdsFile.setWritable(false))
+            throw new RuntimeException("Error: could not set writable attribute on cds file");
+        if (!folder.setWritable(false))
+            throw new RuntimeException("Error: could not set writable attribute on the cds folder");
+
+        try {
+           ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+             "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./" + fileName, "-Xshare:dump");
+
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldContain("Unable to create shared archive file");
+            output.shouldHaveExitValue(1);
+        } finally {
+            // doing this, just in case, to make sure that files can be deleted by the harness
+            // on any subsequent run
+            folder.setWritable(true);
+            cdsFile.setWritable(true);
+        }
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/SharedArchiveFile/DefaultUseWithClient.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @ignore 8032224
+ * @test DefaultUseWithClient
+ * @summary Test default behavior of sharing with -client
+ * @library /testlibrary
+ * @run main DefaultUseWithClient
+ * @bug 8032224
+ */
+
+import com.oracle.java.testlibrary.*;
+import java.io.File;
+
+public class DefaultUseWithClient {
+    public static void main(String[] args) throws Exception {
+        String fileName = "test.jsa";
+
+        // On 32-bit windows CDS should be on by default in "-client" config
+        // Skip this test on any other platform
+        boolean is32BitWindows = (Platform.isWindows() && Platform.is32bit());
+        if (!is32BitWindows) {
+            System.out.println("Test only applicable on 32-bit Windows. Skipping");
+            return;
+        }
+
+        // create the archive
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-Xshare:dump");
+        OutputAnalyzer output = new OutputAnalyzer(pb.start());
+        output.shouldHaveExitValue(0);
+
+        pb = ProcessTools.createJavaProcessBuilder(
+           "-XX:+UnlockDiagnosticVMOptions",
+           "-XX:SharedArchiveFile=./" + fileName,
+           "-client",
+           "-version");
+
+        output = new OutputAnalyzer(pb.start());
+        output.shouldContain("sharing");
+        output.shouldHaveExitValue(0);
+   }
+}
--- a/test/runtime/XCheckJniJsig/XCheckJSig.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/XCheckJniJsig/XCheckJSig.java	Wed Mar 12 13:30:08 2014 +0100
@@ -22,7 +22,6 @@
  */
 
 /*
- * @ignore 8023735
  * @test
  * @bug 7051189 8023393
  * @summary Need to suppress info message if -Xcheck:jni is used with libjsig.so
@@ -30,7 +29,8 @@
  * @run main XCheckJSig
  */
 
-import java.util.*;
+import java.io.File;
+import java.util.Map;
 import com.oracle.java.testlibrary.*;
 
 public class XCheckJSig {
@@ -47,33 +47,36 @@
         String libjsig;
         String env_var;
         if (Platform.isOSX()) {
-            libjsig = jdk_path + "/jre/lib/server/libjsig.dylib";
             env_var = "DYLD_INSERT_LIBRARIES";
+            libjsig = jdk_path + "/jre/lib/libjsig.dylib"; // jdk location
+            if (!(new File(libjsig).exists())) {
+                libjsig = jdk_path + "/lib/libjsig.dylib"; // jre location
+            }
         } else {
-            libjsig = jdk_path + "/jre/lib/" + os_arch + "/libjsig.so";
             env_var = "LD_PRELOAD";
-        }
-        String java_program;
-        if (Platform.isSolaris()) {
-            // On Solaris, need to call the 64-bit Java directly in order for
-            // LD_PRELOAD to work because libjsig.so is 64-bit.
-            java_program = jdk_path + "/jre/bin/" + os_arch + "/java";
-        } else {
-            java_program = JDKToolFinder.getJDKTool("java");
+            libjsig = jdk_path + "/jre/lib/" + os_arch + "/libjsig.so"; // jdk location
+            if (!(new File(libjsig).exists())) {
+                libjsig = jdk_path + "/lib/" + os_arch + "/libjsig.so"; // jre location
+            }
         }
         // If this test fails, these might be useful to know.
         System.out.println("libjsig: " + libjsig);
         System.out.println("osArch: " + os_arch);
-        System.out.println("java_program: " + java_program);
 
-        ProcessBuilder pb = new ProcessBuilder(java_program, "-Xcheck:jni", "-version");
+        // Make sure the libjsig file exists.
+        if (!(new File(libjsig).exists())) {
+            System.out.println("File " + libjsig + " not found, skipping");
+            return;
+        }
+
+        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xcheck:jni", "-version");
         Map<String, String> env = pb.environment();
         env.put(env_var, libjsig);
         OutputAnalyzer output = new OutputAnalyzer(pb.start());
         output.shouldNotContain("libjsig is activated");
         output.shouldHaveExitValue(0);
 
-        pb = new ProcessBuilder(java_program, "-Xcheck:jni", "-verbose:jni", "-version");
+        pb = ProcessTools.createJavaProcessBuilder("-Xcheck:jni", "-verbose:jni", "-version");
         env = pb.environment();
         env.put(env_var, libjsig);
         output = new OutputAnalyzer(pb.start());
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/finalStatic/FinalStatic.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8028553
+ * @summary Test that VerifyError is not thrown when 'overriding' a static method.
+ * @run main FinalStatic
+ */
+
+import java.lang.reflect.*;
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+import jdk.internal.org.objectweb.asm.Opcodes;
+
+/*
+ *  class A { static final int m() {return FAIL; } }
+ *  class B extends A { int m() { return PASS; } }
+ *  class FinalStatic {
+ *      public static void main () {
+ *          Object b = new B();
+ *          b.m();
+ *      }
+ *  }
+ */
+public class FinalStatic {
+
+    static final String CLASS_NAME_A = "A";
+    static final String CLASS_NAME_B = "B";
+    static final int FAILED = 0;
+    static final int EXPECTED = 1234;
+
+    static class TestClassLoader extends ClassLoader implements Opcodes {
+
+        @Override
+        public Class findClass(String name) throws ClassNotFoundException {
+            byte[] b;
+            try {
+                b = loadClassData(name);
+            } catch (Throwable th) {
+                // th.printStackTrace();
+                throw new ClassNotFoundException("Loading error", th);
+            }
+            return defineClass(name, b, 0, b.length);
+        }
+
+        private byte[] loadClassData(String name) throws Exception {
+            ClassWriter cw = new ClassWriter(0);
+            MethodVisitor mv;
+            switch (name) {
+               case CLASS_NAME_A:
+                    cw.visit(52, ACC_SUPER | ACC_PUBLIC, CLASS_NAME_A, null, "java/lang/Object", null);
+                    {
+                        mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+                        mv.visitCode();
+                        mv.visitVarInsn(ALOAD, 0);
+                        mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V");
+                        mv.visitInsn(RETURN);
+                        mv.visitMaxs(1, 1);
+                        mv.visitEnd();
+
+                        mv = cw.visitMethod(ACC_FINAL | ACC_STATIC, "m", "()I", null, null);
+                        mv.visitCode();
+                        mv.visitLdcInsn(FAILED);
+                        mv.visitInsn(IRETURN);
+                        mv.visitMaxs(1, 1);
+                        mv.visitEnd();
+                    }
+                    break;
+                case CLASS_NAME_B:
+                    cw.visit(52, ACC_SUPER | ACC_PUBLIC, CLASS_NAME_B, null, CLASS_NAME_A, null);
+                    {
+                        mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+                        mv.visitCode();
+                        mv.visitVarInsn(ALOAD, 0);
+                        mv.visitMethodInsn(INVOKESPECIAL, CLASS_NAME_A, "<init>", "()V");
+                        mv.visitInsn(RETURN);
+                        mv.visitMaxs(1, 1);
+                        mv.visitEnd();
+
+                        mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null);
+                        mv.visitCode();
+                        mv.visitLdcInsn(EXPECTED);
+                        mv.visitInsn(IRETURN);
+                        mv.visitMaxs(1, 1);
+                        mv.visitEnd();
+
+                    }
+                    break;
+                default:
+                    break;
+            }
+            cw.visitEnd();
+
+            return cw.toByteArray();
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        TestClassLoader tcl = new TestClassLoader();
+        Class<?> a = tcl.loadClass(CLASS_NAME_A);
+        Class<?> b = tcl.loadClass(CLASS_NAME_B);
+        Object inst = b.newInstance();
+        Method[] meths = b.getDeclaredMethods();
+
+        Method m = meths[0];
+        int mod = m.getModifiers();
+        if ((mod & Modifier.FINAL) != 0) {
+            throw new Exception("FAILED: " + m + " is FINAL");
+        }
+        if ((mod & Modifier.STATIC) != 0) {
+            throw new Exception("FAILED: " + m + " is STATIC");
+        }
+
+        m.setAccessible(true);
+        if (!m.invoke(inst).equals(EXPECTED)) {
+              throw new Exception("FAILED: " + EXPECTED + " from " + m);
+        }
+
+        System.out.println("Passed.");
+    }
+}
--- a/test/runtime/jsig/Test8017498.sh	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/jsig/Test8017498.sh	Wed Mar 12 13:30:08 2014 +0100
@@ -1,7 +1,7 @@
 #!/bin/sh
 
 #
-#  Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+#  Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 #  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 #  This code is free software; you can redistribute it and/or modify it
@@ -29,8 +29,10 @@
 ## @bug 8017498
 ## @bug 8020791
 ## @bug 8021296
+## @bug 8022301
+## @bug 8025519
 ## @summary sigaction(sig) results in process hang/timed-out if sig is much greater than SIGRTMAX
-## @run shell/timeout=30 Test8017498.sh
+## @run shell/timeout=60 Test8017498.sh
 ##
 
 if [ "${TESTSRC}" = "" ]
@@ -42,6 +44,8 @@
 ## Adding common setup Variables for running shell tests.
 . ${TESTSRC}/../../test_env.sh
 
+EXTRA_CFLAG=
+
 # set platform-dependent variables
 OS=`uname -s`
 case "$OS" in
@@ -52,11 +56,9 @@
         echo "WARNING: gcc not found. Cannot execute test." 2>&1
         exit 0;
     fi
-    if [ "$VM_BITS" = "64" ]
-    then
-        MY_LD_PRELOAD=${TESTJAVA}${FS}jre${FS}lib${FS}amd64${FS}libjsig.so
-    else
-        MY_LD_PRELOAD=${TESTJAVA}${FS}jre${FS}lib${FS}i386${FS}libjsig.so
+    MY_LD_PRELOAD=${TESTJAVA}${FS}jre${FS}lib${FS}${VM_CPU}${FS}libjsig.so
+    if [ "$VM_BITS" == "32" ] && [ "$VM_CPU" != "arm" ] && [ "$VM_CPU" != "ppc" ]; then
+            EXTRA_CFLAG=-m32
     fi
     echo MY_LD_PRELOAD = ${MY_LD_PRELOAD}
     ;;
@@ -69,12 +71,13 @@
 THIS_DIR=.
 
 cp ${TESTSRC}${FS}*.java ${THIS_DIR}
-${TESTJAVA}${FS}bin${FS}javac *.java
+${COMPILEJAVA}${FS}bin${FS}javac *.java
 
 $gcc_cmd -DLINUX -fPIC -shared \
+    ${EXTRA_CFLAG} -z noexecstack \
     -o ${TESTSRC}${FS}libTestJNI.so \
-    -I${TESTJAVA}${FS}include \
-    -I${TESTJAVA}${FS}include${FS}linux \
+    -I${COMPILEJAVA}${FS}include \
+    -I${COMPILEJAVA}${FS}include${FS}linux \
     ${TESTSRC}${FS}TestJNI.c
 
 # run the java test in the background
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/lambda-features/InvokespecialInterface.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8032024
+ * @bug 8025937
+ * @bug 8033528
+ * @summary [JDK 8] Test invokespecial and invokeinterface with the same JVM_CONSTANT_InterfaceMethodref
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+StressRewriter InvokespecialInterface
+ */
+import java.util.function.*;
+import java.util.*;
+
+interface I {
+  default void imethod() { System.out.println("I::imethod"); }
+}
+
+class C implements I {
+  public void foo() { I.super.imethod(); }  // invokespecial InterfaceMethod
+  public void bar() { I i = this; i.imethod(); } // invokeinterface same
+  public void doSomeInvokedynamic() {
+      String str = "world";
+      Supplier<String> foo = ()->"hello, "+str;
+      String res = foo.get();
+      System.out.println(res);
+  }
+}
+
+public class InvokespecialInterface {
+  public static void main(java.lang.String[] unused) {
+     // need to create C and call I::foo()
+     C c = new C();
+     c.foo();
+     c.bar();
+     c.doSomeInvokedynamic();
+  }
+};
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/lambda-features/TestConcreteClassWithAbstractMethod.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8032010
+ * @summary method lookup on an abstract method in a concrete class should be successful
+ * @run main TestConcreteClassWithAbstractMethod
+ */
+
+import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.MethodVisitor;
+
+import static jdk.internal.org.objectweb.asm.Opcodes.*;
+
+/*
+ *   class T1 { public int m() {} }
+ *   class T2 { public abstract int m(); }
+ *   class T3 { public int m() {} }
+ *
+ *   Call site: T3.test() { invokevirtual T2.m() }
+ *   T3.m() should be invoked
+ */
+public class TestConcreteClassWithAbstractMethod {
+    static final String classT1 = "p1.T1";
+    static final String classT2 = "p1.T2";
+    static final String classT3 = "p1.T3";
+
+    static final String callerName = classT3;
+
+    public static void main(String[] args) throws Exception {
+        ClassLoader cl = new ClassLoader() {
+            public Class<?> loadClass(String name) throws ClassNotFoundException {
+                if (findLoadedClass(name) != null) {
+                    return findLoadedClass(name);
+                }
+
+                if (classT1.equals(name)) {
+                    byte[] classFile = dumpT1();
+                    return defineClass(classT1, classFile, 0, classFile.length);
+                }
+                if (classT2.equals(name)) {
+                    byte[] classFile = dumpT2();
+                    return defineClass(classT2, classFile, 0, classFile.length);
+                }
+                if (classT3.equals(name)) {
+                    byte[] classFile = dumpT3();
+                    return defineClass(classT3, classFile, 0, classFile.length);
+                }
+
+                return super.loadClass(name);
+            }
+        };
+
+        cl.loadClass(classT1);
+        cl.loadClass(classT2);
+        cl.loadClass(classT3);
+
+        //cl.loadClass(callerName).getDeclaredMethod("m");
+        cl.loadClass(callerName).newInstance();
+
+        int result = (Integer)cl.loadClass(callerName).getDeclaredMethod("test").invoke(null);
+        System.out.println(""+result);
+    }
+
+    public static byte[] dumpT1() {
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+
+        cw.visit(52, ACC_PUBLIC | ACC_SUPER, "p1/T1", null, "java/lang/Object", null);
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(1, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null);
+            mv.visitCode();
+            mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
+            mv.visitLdcInsn("p1/T1.m()");
+            mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "print", "(Ljava/lang/String;)V", false);
+            mv.visitIntInsn(BIPUSH, 3);
+            mv.visitInsn(IRETURN);
+            mv.visitMaxs(2, 1);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+
+        return cw.toByteArray();
+    }
+
+    public static byte[] dumpT2() {
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+
+        cw.visit(52, ACC_PUBLIC | ACC_SUPER, "p1/T2", null, "p1/T1", null);
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "p1/T1", "<init>", "()V", false);
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(1, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC + ACC_ABSTRACT, "m", "()I", null, null);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+
+        return cw.toByteArray();
+    }
+
+    public static byte[] dumpT3() {
+        ClassWriter cw = new ClassWriter(0);
+        MethodVisitor mv;
+
+        cw.visit(52, ACC_PUBLIC + ACC_SUPER, "p1/T3", null, "p1/T2", null);
+
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
+            mv.visitCode();
+            mv.visitVarInsn(ALOAD, 0);
+            mv.visitMethodInsn(INVOKESPECIAL, "p1/T2", "<init>", "()V", false);
+            mv.visitInsn(RETURN);
+            mv.visitMaxs(1, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null);
+            mv.visitCode();
+            mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
+            mv.visitLdcInsn("p1/T3.m()");
+            mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "print", "(Ljava/lang/String;)V", false);
+            mv.visitIntInsn(BIPUSH, 2);
+            mv.visitInsn(IRETURN);
+            mv.visitMaxs(2, 1);
+            mv.visitEnd();
+        }
+        {
+            mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "test", "()I", null, null);
+            mv.visitCode();
+            mv.visitTypeInsn(NEW, "p1/T3");
+            mv.visitInsn(DUP);
+            mv.visitMethodInsn(INVOKESPECIAL, "p1/T3", "<init>", "()V", false);
+            mv.visitMethodInsn(INVOKEVIRTUAL, "p1/T2", "m", "()I", false);
+            mv.visitInsn(IRETURN);
+            mv.visitMaxs(3, 2);
+            mv.visitEnd();
+        }
+        cw.visitEnd();
+
+        return cw.toByteArray();
+    }
+}
--- a/test/runtime/memory/ReadFromNoaccessArea.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/runtime/memory/ReadFromNoaccessArea.java	Wed Mar 12 13:30:08 2014 +0100
@@ -22,7 +22,6 @@
  */
 
 /*
- * @ignore 8028398
  * @test
  * @summary Test that touching noaccess area in class ReservedHeapSpace results in SIGSEGV/ACCESS_VIOLATION
  * @library /testlibrary /testlibrary/whitebox
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/serviceability/dcmd/DcmdUtil.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import sun.management.ManagementFactoryHelper;
+
+import com.sun.management.DiagnosticCommandMBean;
+
+public class DcmdUtil
+{
+    public static String executeDcmd(String cmd, String ... args) {
+        DiagnosticCommandMBean dcmd = ManagementFactoryHelper.getDiagnosticCommandMBean();
+        Object[] dcmdArgs = {args};
+        String[] signature = {String[].class.getName()};
+
+        try {
+            System.out.print("> " + cmd + " ");
+            for (String s : args) {
+                System.out.print(s + " ");
+            }
+            System.out.println(":");
+            String result = (String) dcmd.invoke(transform(cmd), dcmdArgs, signature);
+            System.out.println(result);
+            return result;
+        } catch(Exception ex) {
+            ex.printStackTrace();
+        }
+        return null;
+    }
+
+    private static String transform(String name) {
+        StringBuilder sb = new StringBuilder();
+        boolean toLower = true;
+        boolean toUpper = false;
+        for (int i = 0; i < name.length(); i++) {
+            char c = name.charAt(i);
+            if (c == '.' || c == '_') {
+                toLower = false;
+                toUpper = true;
+            } else {
+                if (toUpper) {
+                    toUpper = false;
+                    sb.append(Character.toUpperCase(c));
+                } else if(toLower) {
+                    sb.append(Character.toLowerCase(c));
+                } else {
+                    sb.append(c);
+                }
+            }
+        }
+        return sb.toString();
+    }
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/serviceability/dcmd/DynLibDcmdTest.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,67 @@
+import java.util.HashSet;
+import java.util.Set;
+import com.oracle.java.testlibrary.Platform;
+
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test of VM.dynlib diagnostic command via MBean
+ * @library /testlibrary
+ * @compile DcmdUtil.java
+ * @run main DynLibDcmdTest
+ */
+
+public class DynLibDcmdTest {
+
+    public static void main(String[] args) throws Exception {
+        String result = DcmdUtil.executeDcmd("VM.dynlibs");
+
+        String osDependentBaseString = null;
+        if (Platform.isSolaris()) {
+            osDependentBaseString = "lib%s.so";
+        } else if (Platform.isWindows()) {
+            osDependentBaseString = "%s.dll";
+        } else if (Platform.isOSX()) {
+            osDependentBaseString = "lib%s.dylib";
+        } else if (Platform.isLinux()) {
+            osDependentBaseString = "lib%s.so";
+        }
+
+        if (osDependentBaseString == null) {
+            throw new Exception("Unsupported OS");
+        }
+
+        Set<String> expectedContent = new HashSet<>();
+        expectedContent.add(String.format(osDependentBaseString, "jvm"));
+        expectedContent.add(String.format(osDependentBaseString, "java"));
+        expectedContent.add(String.format(osDependentBaseString, "management"));
+
+        for(String expected : expectedContent) {
+            if (!result.contains(expected)) {
+                throw new Exception("Dynamic library list output did not contain the expected string: '" + expected + "'");
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/serviceability/sa/jmap-hashcode/Test8028623.java	Wed Mar 12 13:30:08 2014 +0100
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8028623
+ * @summary Test hashing of extended characters in Serviceability Agent.
+ * @library /testlibrary
+ * @compile -encoding utf8 Test8028623.java
+ * @run main Test8028623
+ */
+
+import com.oracle.java.testlibrary.JDKToolLauncher;
+import com.oracle.java.testlibrary.OutputBuffer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+import java.io.File;
+
+public class Test8028623 {
+
+  public static int à = 1;
+  public static String dumpFile = "heap.out";
+
+  public static void main (String[] args) {
+
+    System.out.println(Ã);
+
+    try {
+        int pid = ProcessTools.getProcessId();
+        JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
+                                              .addToolArg("-F")
+                                              .addToolArg("-dump:live,format=b,file=" + dumpFile)
+                                              .addToolArg(Integer.toString(pid));
+        ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
+        OutputBuffer output = ProcessTools.getOutput(pb);
+        Process p = pb.start();
+        int e = p.waitFor();
+        System.out.println("stdout:");
+        System.out.println(output.getStdout());
+        System.out.println("stderr:");
+        System.out.println(output.getStderr());
+
+        if (e != 0) {
+            throw new RuntimeException("jmap returns: " + e);
+        }
+        if (! new File(dumpFile).exists()) {
+            throw new RuntimeException("dump file NOT created: '" + dumpFile + "'");
+        }
+    } catch (Throwable t) {
+        t.printStackTrace();
+        throw new RuntimeException("Test failed with: " + t);
+    }
+  }
+}
--- a/test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java	Tue Mar 11 15:34:06 2014 +0100
+++ b/test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java	Wed Mar 12 13:30:08 2014 +0100
@@ -142,11 +142,23 @@
    * with any platform specific arguments prepended
    */
   public static ProcessBuilder createJavaProcessBuilder(String... command) throws Exception {
+    return createJavaProcessBuilder(false, command);
+  }
+
+  public static ProcessBuilder createJavaProcessBuilder(boolean addTestVmOptions, String... command) throws Exception {
     String javapath = JDKToolFinder.getJDKTool("java");
 
     ArrayList<String> args = new ArrayList<>();
     args.add(javapath);
     Collections.addAll(args, getPlatformSpecificVMArgs());
+
+    if (addTestVmOptions) {
+      String vmopts = System.getProperty("test.vm.opts");
+      if (vmopts != null && vmopts.length() > 0) {
+        Collections.addAll(args, vmopts.split("\\s"));
+      }
+    }
+
     Collections.addAll(args, command);
 
     // Reporting
@@ -157,5 +169,4 @@
 
     return new ProcessBuilder(args.toArray(new String[args.size()]));
   }
-
 }